text
stringlengths
4
1.02M
meta
dict
""" This example demonstrates the use of the Zookeeper job store. On each run, it adds a new alarm that fires after ten seconds. You can exit the program, restart it and observe that any previous alarms that have not fired yet are still active. Running the example with the --clear switch will remove any existing alarms. """ from datetime import datetime, timedelta import sys import os from apscheduler.schedulers.blocking import BlockingScheduler def alarm(time): print('Alarm! This alarm was scheduled at %s.' % time) if __name__ == '__main__': scheduler = BlockingScheduler() scheduler.add_jobstore('zookeeper', path='/example_jobs') if len(sys.argv) > 1 and sys.argv[1] == '--clear': scheduler.remove_all_jobs() alarm_time = datetime.now() + timedelta(seconds=10) scheduler.add_job(alarm, 'date', run_date=alarm_time, args=[datetime.now()]) print('To clear the alarms, run this example with the --clear argument.') print('Press Ctrl+{0} to exit'.format('Break' if os.name == 'nt' else 'C')) try: scheduler.start() except (KeyboardInterrupt, SystemExit): pass
{ "content_hash": "2a884d93babda09f9c81ae47b01e43b6", "timestamp": "", "source": "github", "line_count": 33, "max_line_length": 97, "avg_line_length": 34.45454545454545, "alnum_prop": 0.6965699208443272, "repo_name": "srault95/apscheduler", "id": "12b3e42c12ccfdc2424832f241f48f4bc79315b4", "size": "1137", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "examples/jobstores/zookeeper.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "268777" } ], "symlink_target": "" }
import os import os.path as osp import numpy as np import grasp_fusion_lib from grasp_fusion_lib.contrib import grasp_fusion def main(): dataset_dir = grasp_fusion.datasets.PinchDataset.root_dir print('Dataset dir: {}'.format(dataset_dir)) heightmap_color_dir = osp.join(dataset_dir, 'color-input') for filename in sorted(os.listdir(heightmap_color_dir)): print(filename) filename2 = filename.split('-')[0] + '.png' print(filename2) if filename2 != '000002.png': continue rgb_file = osp.join(dataset_dir, 'color-input', filename) rgb = grasp_fusion_lib.io.imread(rgb_file)[:, :, :3] depth_file = osp.join(dataset_dir, 'depth-input', filename) depth = grasp_fusion_lib.io.imread( depth_file).astype(np.float32) / 10000. depth[depth == 0] = np.nan depth_viz = grasp_fusion_lib.image.colorize_depth(depth) heightmap_rgb_file = osp.join( dataset_dir, 'heightmap-color', filename2 ) heightmap_rgb = grasp_fusion_lib.io.imread(heightmap_rgb_file) heightmap_depth_file = osp.join( dataset_dir, 'heightmap-depth', filename2 ) heightmap_depth = grasp_fusion_lib.io.imread( heightmap_depth_file) / 10000. heightmap_depth_viz = grasp_fusion_lib.image.colorize_depth( heightmap_depth) grasp_fusion_lib.io.imsave('logs/heightmap/raw_rgb.jpg', rgb) grasp_fusion_lib.io.imsave('logs/heightmap/raw_depth.jpg', depth_viz) grasp_fusion_lib.io.imsave( 'logs/heightmap/heightmap_rgb.jpg', heightmap_rgb) grasp_fusion_lib.io.imsave( 'logs/heightmap/heightmap_depth.jpg', heightmap_depth_viz ) # grasp_fusion_lib.io.tileimg( # [rgb, depth_viz, heightmap_rgb, heightmap_depth] # ) # grasp_fusion_lib.io.show() break if __name__ == '__main__': main()
{ "content_hash": "ab40b35cc9dd886248b1c581e198a0ba", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 77, "avg_line_length": 32.40983606557377, "alnum_prop": 0.6125442589782498, "repo_name": "pazeshun/jsk_apc", "id": "82ed453a66f9823491447e0e558f9a9d59fbd89b", "size": "2000", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "demos/grasp_fusion/examples/grasp_fusion/paper/visualize_heightmap.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "C++", "bytes": "101871" }, { "name": "CMake", "bytes": "42995" }, { "name": "Common Lisp", "bytes": "695864" }, { "name": "Dockerfile", "bytes": "1503" }, { "name": "HTML", "bytes": "6364" }, { "name": "Python", "bytes": "406153" }, { "name": "Shell", "bytes": "4475" } ], "symlink_target": "" }
""" Main Logic of the svnmailer =========================== This module is the central core of the svnmailer. It dispatches all work to be done. It contains just one class (L{Main}), which reads the config file while it is initialized. When the C{Main.run()} method is called, it selects the groups to be notified, the notifiers to be run and runs all notifiers for each group. The Main class may raise several exceptions (which all inherit from L{Error}): - L{ConfigError} occurs, if the configuration contains errors (like type or value errors, unicode errors etc). The L{ConfigError} exception is initialized with a string describing what kind of error occured. - L{NotifierError} occurs, if one or more of the notifiers throw an exception. The L{Main} class catches these exceptions (except C{KeyboardInterrupt} and C{SystemExit}) and will initialize the L{NotifierError} with the list of traceback strings, one for each exception occured. (See the format_exception docs at U{http://docs.python.org/lib/module-traceback.html}). - L{svnmailer.subversion.RepositoryError} occurs, if something failed while accessing the subversion repository. It contains some attributes for identifying the error: C{svn_err_code}, C{svn_err_name} and C{svn_err_str} """ __author__ = "André Malo" __docformat__ = "epytext en" __all__ = ['Main', 'Error', 'ConfigError', 'NotifierError'] # Exceptions class Error(Exception): """ Base exception for this module """ pass class ConfigError(Error): """ Configuration error occurred """ pass class NotifierError(Error): """ An Notifier error occured """ pass class Main(object): """ main svnmailer logic @ivar _settings: The settings to use @type _settings: C{svnmailer.settings.Settings} """ def __init__(self, options): """ Initialization @param options: Command line options @type options: C{optparse.OptionParser} @exception ConfigError: Configuration error """ self._settings = self._getSettings(options) def run(self): """ Dispatches the work to be done @exception svnmailer.subversion.RepositoryError: Error while accessing the subversion repository @exception NotifierError: One or more notifiers went crazy """ from svnmailer import subversion try: try: self._openRepository() notifier_errors = [] throwables = (KeyboardInterrupt, SystemExit, subversion.Error) selector = self._getNotifierSelector() for groupset in self._getGroupSets(): notifiers = selector.selectNotifiers(groupset) for notifier in notifiers: try: notifier.run() except throwables: raise except: import sys, traceback info = sys.exc_info() backtrace = traceback.format_exception( info[0], info[1], info[2] ) del info backtrace[0] = "Notifier: %s.%s\nRevision: %s\n" \ "Groups: %r\n%s" % ( notifier.__module__, notifier.__class__.__name__, self._settings.runtime.revision, [group._name for group in groupset.groups], backtrace[0], ) notifier_errors.append(''.join(backtrace)) if notifier_errors: raise NotifierError(*notifier_errors) except subversion.Error, exc: import sys raise subversion.RepositoryError, exc, sys.exc_info()[2] finally: # IMPORTANT! otherwise the locks are kept and # we run into bdb "out of memory" errors some time self._closeRepository() def _getNotifierSelector(self): """ Returns the notifier selector @return: The selector @rtype: C{svnmailer.notifier.selector.Selector} """ from svnmailer.notifier import selector return selector.Selector(self._settings) def _getChanges(self): """ Returns the list of changes for the requested revision @return: The list of changes (C{[Descriptor, ...]}) @rtype: C{list} @exception svnmailer.subversion.Error: Error while accessing the subversion repository """ from svnmailer import settings, subversion modes = settings.modes runtime = self._settings.runtime if runtime.mode in (modes.commit, modes.propchange): changes = runtime._repos.getChangesList(runtime.revision) elif runtime.mode in (modes.lock, modes.unlock): is_locked = bool(runtime.mode == modes.lock) changes = [ subversion.LockedPathDescriptor(runtime._repos, path, is_locked) for path in runtime.stdin.splitlines() if path ] changes.sort() else: raise AssertionError("Unknown runtime.mode %r" % (runtime.mode,)) return changes def _getGroupSets(self): """ Returns the list of groupsets (grouped groups...) to notify @return: The list (maybe empty). (C{[GroupSet, ...]}) @rtype: C{list} """ # collect changes and group by group [ ;-) ] group_changes = {} group_cache = {} changes = self._getChanges() for change in changes: for group in self._getGroupsByChange(change): groupid = id(group) try: group_changes[groupid].append(change) except KeyError: group_cache[groupid] = group group_changes[groupid] = [change] # Build the groupset # TODO: make group compression configurable? group_sets = [] for groupid, changelist in group_changes.items(): group = group_cache[groupid] for stored in group_sets: # We don't need to compare the group with *all* # groups of this set. If the group is considered # equal to the first stored group, all other stored # groups are considered equal as well. (Otherwise # they wouldn't been there ...) if stored.changes == changelist and \ group._compare(stored.groups[0]): stored.groups.append(group) group = None break if group is not None: group_sets.append(GroupSet([group], changelist, changes)) return group_sets def _getGroupsByChange(self, change): """ Returns the matching groups for a particular change @param change: The change to select @type change: C{svnmailer.subversion.VersionedPathDescriptor} @return: The group list @rtype: C{list} """ selected_groups = [] ignored_groups = [] # the repos path is always *without* slash (see # subversion.Respository.__init__) repos_path = change.repos.path.decode("utf-8", "strict") # we guarantee, that directories end with a slash path = "%s%s" % (change.path, ["", "/"][change.isDirectory()]) path = path.decode("utf-8", "strict") for group in self._settings.groups: subst = self._getDefaultSubst(group, repos_path, path) # if for_repos is set and does not match -> ignore if group.for_repos: match = group.for_repos.match(repos_path) if match: subst.update(match.groupdict()) else: continue # if exclude_paths is set and does match -> ignore if group.exclude_paths and group.exclude_paths.match(path): continue # if for_paths is set and does not match -> ignore if group.for_paths: match = group.for_paths.match(path) if match: subst.update(match.groupdict()) else: continue # store the substdict for later use for name, value in subst.items(): group._sub_(name, value) (selected_groups, ignored_groups)[ bool(group.ignore_if_other_matches) ].append(group) # BRAINER: theoretically there could be more than one group # in the ignore list, which would have to be ignored at all then. # (ignore_if_OTHER_MATCHES, think about it) # Instead we select them ALL, so the output isn't lost return selected_groups and selected_groups or ignored_groups def _getDefaultSubst(self, group, repos_path, path): """ Returns the default substitution dict @param group: The group to consider @type group: C{svnmailer.settings.GroupSettingsContainer} @param repos_path: The repository path @type repos_path: C{unicode} @param path: The change path @type path: C{unicode} @return: The initialized dictionary @rtype: C{dict} @exception svnmailer.subversion.Error: An error occured while accessing the subversion repository """ from svnmailer.settings import modes runtime = self._settings.runtime author = runtime.author if not author and runtime.mode in (modes.commit, modes.propchange): author = runtime._repos.getRevisionAuthor(runtime.revision) subst = { 'author' : author or 'no_author', 'group' : group._name, 'property': runtime.propname, 'revision': runtime.revision and u"%d" % runtime.revision, } if group.extract_x509_author: from svnmailer import util x509 = util.extractX509User(author) if x509: from email import Header realname, mail = x509 subst.update({ 'x509_address': realname and "%s <%s>" % ( Header.Header(realname).encode(), mail) or mail, 'x509_CN': realname, 'x509_emailAddress': mail, }) if group._def_for_repos: match = group._def_for_repos.match(repos_path) if match: subst.update(match.groupdict()) if group._def_for_paths: match = group._def_for_paths.match(path) if match: subst.update(match.groupdict()) return subst def _getSettings(self, options): """ Returns the settings object @param options: Command line options @type options: C{svnmailer.cli.SvnmailerOptionParser} @return: The settings object @rtype: C{svnmailer.config.ConfigFileSettings} @exception ConfigError: configuration error """ from svnmailer import config try: return config.ConfigFileSettings(options) except config.Error, exc: import sys raise ConfigError, str(exc), sys.exc_info()[2] def _openRepository(self): """ Opens the repository @exception svnmailer.subversion.Error: Error while accessing the subversion repository """ from svnmailer import subversion, util config = self._settings.runtime repos_path = util.filename.fromLocale( config.repository, config.path_encoding ) if isinstance(repos_path, str): # !!! HACK ALERT !!! # # --path-encoding=none # subversion needs unicode as path and translates it # back to the locale, we try our best by translating # literally to unicode... repos_path = repos_path.decode("iso-8859-1", "strict") config._repos = subversion.Repository(repos_path) def _closeRepository(self): """ Closes the repository """ try: self._settings.runtime._repos.close() except AttributeError: # That's ok pass class GroupSet(object): """ Container object for a single groupset @ivar groups: The groups to process @type groups: C{list} @ivar changes: The changes that belong to the group @type changes: C{list} @ivar xchanges: The changes that don't belong to the group (only filled if show_nonmatching_paths = yes) @type xchanges: C{list} """ def __init__(self, groups, changes, allchanges): """ Initialization @param groups: The groups to process @type groups: C{list} @param changes: The changes that belong to the group @type changes: C{list} @param allchanges: All changes @type allchanges: C{list} """ from svnmailer.settings import xpath self.groups = groups self.changes = changes nongroups = groups[0].show_nonmatching_paths if nongroups == xpath.ignore: self.xchanges = None elif nongroups == xpath.yes: self.xchanges = [ change for change in allchanges if change not in changes ] else: # no is default self.xchanges = []
{ "content_hash": "f499dc7f9f530c469669921a7ca039c2", "timestamp": "", "source": "github", "line_count": 410, "max_line_length": 80, "avg_line_length": 34.421951219512195, "alnum_prop": 0.5543824842343938, "repo_name": "danielshahaf/svnmailer-debian", "id": "66d84c2efa5ee41a0b4fdace55b7431c8473dcfb", "size": "14782", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/lib/svnmailer/main.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "299480" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import print_function import struct from collections import OrderedDict from collections.abc import Mapping, Sequence from enum import Enum TLV_TYPE_SIGNED_INTEGER = 0x00 TLV_TYPE_UNSIGNED_INTEGER = 0x04 TLV_TYPE_BOOLEAN = 0x08 TLV_TYPE_FLOATING_POINT_NUMBER = 0x0A TLV_TYPE_UTF8_STRING = 0x0C TLV_TYPE_BYTE_STRING = 0x10 TLV_TYPE_NULL = 0x14 TLV_TYPE_STRUCTURE = 0x15 TLV_TYPE_ARRAY = 0x16 TLV_TYPE_PATH = 0x17 TLV_TAG_CONTROL_ANONYMOUS = 0x00 TLV_TAG_CONTROL_CONTEXT_SPECIFIC = 0x20 TLV_TAG_CONTROL_COMMON_PROFILE_2Bytes = 0x40 TLV_TAG_CONTROL_COMMON_PROFILE_4Bytes = 0x60 TLV_TAG_CONTROL_IMPLICIT_PROFILE_2Bytes = 0x80 TLV_TAG_CONTROL_IMPLICIT_PROFILE_4Bytes = 0xA0 TLV_TAG_CONTROL_FULLY_QUALIFIED_6Bytes = 0xC0 TLV_TAG_CONTROL_FULLY_QUALIFIED_8Bytes = 0xE0 TLVBoolean_False = TLV_TYPE_BOOLEAN TLVBoolean_True = TLV_TYPE_BOOLEAN + 1 TLVEndOfContainer = 0x18 INT8_MIN = -128 INT16_MIN = -32768 INT32_MIN = -2147483648 INT64_MIN = -9223372036854775808 INT8_MAX = 127 INT16_MAX = 32767 INT32_MAX = 2147483647 INT64_MAX = 9223372036854775807 UINT8_MAX = 255 UINT16_MAX = 65535 UINT32_MAX = 4294967295 UINT64_MAX = 18446744073709551615 ElementTypes = { 0x00: "Signed Integer 1-byte value", 0x01: "Signed Integer 2-byte value", 0x02: "Signed Integer 4-byte value", 0x03: "Signed Integer 8-byte value", 0x04: "Unsigned Integer 1-byte value", 0x05: "Unsigned Integer 2-byte value", 0x06: "Unsigned Integer 4-byte value", 0x07: "Unsigned Integer 8-byte value", 0x08: "Boolean False", 0x09: "Boolean True", 0x0A: "Floating Point 4-byte value", 0x0B: "Floating Point 8-byte value", 0x0C: "UTF-8 String 1-byte length", 0x0D: "UTF-8 String 2-byte length", 0x0E: "UTF-8 String 4-byte length", 0x0F: "UTF-8 String 8-byte length", 0x10: "Byte String 1-byte length", 0x11: "Byte String 2-byte length", 0x12: "Byte String 4-byte length", 0x13: "Byte String 8-byte length", 0x14: "Null", 0x15: "Structure", 0x16: "Array", 0x17: "Path", 0x18: "End of Collection", } TagControls = { 0x00: "Anonymous", 0x20: "Context 1-byte", 0x40: "Common Profile 2-byte", 0x60: "Common Profile 4-byte", 0x80: "Implicit Profile 2-byte", 0xA0: "Implicit Profile 4-byte", 0xC0: "Fully Qualified 6-byte", 0xE0: "Fully Qualified 8-byte", } class uint(int): ''' NewType will not return a class until Python 3.10, as Python 3.10 is not widely used, we still need to construct a class so it can work as a type. ''' def __init__(self, val: int): if (val < 0): raise TypeError( 'expecting positive value, got negative value of %d instead' % val) class float32(float): ''' A type for single precision floats distinct from the double precision 'float' type offered by default in Python. This type distinction is present in the Matter data model types so we need it here as well. It is backed by an ordinary float, which means there will be precision loss at the time the value is converted to TLV. ''' pass class TLVWriter(object): def __init__(self, encoding=None, implicitProfile=None): self._encoding = encoding if encoding is not None else bytearray() self._implicitProfile = implicitProfile self._containerStack = [] @property def encoding(self): """The object into which encoded TLV data is written. By default this is a bytearray object. """ return self._encoding @encoding.setter def encoding(self, val): self._encoding = val @property def implicitProfile(self): """The Chip profile id used when encoding implicit profile tags. Setting this value will result in an implicit profile tag being encoded whenever the profile of the tag to be encoded matches the specified implicit profile id. Setting this value to None (the default) disabled encoding of implicit profile tags. """ return self._implicitProfile @implicitProfile.setter def implicitProfile(self, val): self._implicitProfile = val def put(self, tag, val): """Write a value in TLV format with the specified TLV tag. val can be a Python object which will be encoded as follows: - Python bools, floats and strings are encoded as their respective TLV types. - Python ints are encoded as unsigned TLV integers if zero or positive; signed TLV integers if negative. - None is encoded as a TLV Null. - bytes and bytearray objects are encoded as TVL byte strings. - Mapping-like objects (e.g. dict) are encoded as TLV structures. The keys of the map object are expected to be tag values, as described below for the tag argument. Map values are encoded recursively, using the same rules as defined for the val argument. The encoding order of elements depends on the type of the map object. Elements within a dict are automatically encoded tag numerical order. Elements within other forms of mapping object (e.g. OrderedDict) are encoded in the object's natural iteration order. - Sequence-like objects (e.g. arrays) are written as TLV arrays. Elements within the array are encoded recursively, using the same rules as defined for the val argument. tag can be a small int (0-255), a tuple of two integers, or None. If tag is an integer, it is encoded as a TLV context-specific tag. If tag is a two-integer tuple, it is encoded as a TLV profile-specific tag, with the first integer encoded as the profile id and the second as the tag number. If tag is None, it is encoded as a TLV anonymous tag. """ if val is None: self.putNull(tag) elif isinstance(val, Enum): self.putUnsignedInt(tag, val) elif isinstance(val, bool): self.putBool(tag, val) elif isinstance(val, uint): self.putUnsignedInt(tag, val) elif isinstance(val, int): self.putSignedInt(tag, val) elif isinstance(val, float32): self.putFloat(tag, val) elif isinstance(val, float): self.putDouble(tag, val) elif isinstance(val, str): self.putString(tag, val) elif isinstance(val, bytes) or isinstance(val, bytearray): self.putBytes(tag, val) elif isinstance(val, Mapping): self.startStructure(tag) if type(val) == dict: val = OrderedDict( sorted(val.items(), key=lambda item: tlvTagToSortKey(item[0])) ) for containedTag, containedVal in val.items(): self.put(containedTag, containedVal) self.endContainer() elif isinstance(val, Sequence): self.startArray(tag) for containedVal in val: self.put(None, containedVal) self.endContainer() else: raise ValueError("Attempt to TLV encode unsupported value") def putSignedInt(self, tag, val): """Write a value as a TLV signed integer with the specified TLV tag.""" if val >= INT8_MIN and val <= INT8_MAX: format = "<b" elif val >= INT16_MIN and val <= INT16_MAX: format = "<h" elif val >= INT32_MIN and val <= INT32_MAX: format = "<l" elif val >= INT64_MIN and val <= INT64_MAX: format = "<q" else: raise ValueError("Integer value out of range") val = struct.pack(format, val) controlAndTag = self._encodeControlAndTag( TLV_TYPE_SIGNED_INTEGER, tag, lenOfLenOrVal=len(val) ) self._encoding.extend(controlAndTag) self._encoding.extend(val) def putUnsignedInt(self, tag, val): """Write a value as a TLV unsigned integer with the specified TLV tag.""" val = self._encodeUnsignedInt(val) controlAndTag = self._encodeControlAndTag( TLV_TYPE_UNSIGNED_INTEGER, tag, lenOfLenOrVal=len(val) ) self._encoding.extend(controlAndTag) self._encoding.extend(val) def putFloat(self, tag, val): """Write a value as a TLV float with the specified TLV tag.""" val = struct.pack("f", val) controlAndTag = self._encodeControlAndTag( TLV_TYPE_FLOATING_POINT_NUMBER, tag, lenOfLenOrVal=len(val) ) self._encoding.extend(controlAndTag) self._encoding.extend(val) def putDouble(self, tag, val): """Write a value as a TLV double with the specified TLV tag.""" val = struct.pack("d", val) controlAndTag = self._encodeControlAndTag( TLV_TYPE_FLOATING_POINT_NUMBER, tag, lenOfLenOrVal=len(val) ) self._encoding.extend(controlAndTag) self._encoding.extend(val) def putString(self, tag, val): """Write a value as a TLV string with the specified TLV tag.""" val = val.encode("utf-8") valLen = self._encodeUnsignedInt(len(val)) controlAndTag = self._encodeControlAndTag( TLV_TYPE_UTF8_STRING, tag, lenOfLenOrVal=len(valLen) ) self._encoding.extend(controlAndTag) self._encoding.extend(valLen) self._encoding.extend(val) def putBytes(self, tag, val): """Write a value as a TLV byte string with the specified TLV tag.""" valLen = self._encodeUnsignedInt(len(val)) controlAndTag = self._encodeControlAndTag( TLV_TYPE_BYTE_STRING, tag, lenOfLenOrVal=len(valLen) ) self._encoding.extend(controlAndTag) self._encoding.extend(valLen) self._encoding.extend(val) def putBool(self, tag, val): """Write a value as a TLV boolean with the specified TLV tag.""" if val: type = TLVBoolean_True else: type = TLVBoolean_False controlAndTag = self._encodeControlAndTag(type, tag) self._encoding.extend(controlAndTag) def putNull(self, tag): """Write a TLV null with the specified TLV tag.""" controlAndTag = self._encodeControlAndTag(TLV_TYPE_NULL, tag) self._encoding.extend(controlAndTag) def startContainer(self, tag, containerType): """Start writing a TLV container with the specified TLV tag. containerType can be one of TLV_TYPE_STRUCTURE, TLV_TYPE_ARRAY or TLV_TYPE_PATH. """ self._verifyValidContainerType(containerType) controlAndTag = self._encodeControlAndTag(containerType, tag) self._encoding.extend(controlAndTag) self._containerStack.insert(0, containerType) def startStructure(self, tag): """Start writing a TLV structure with the specified TLV tag.""" self.startContainer(tag, containerType=TLV_TYPE_STRUCTURE) def startArray(self, tag): """Start writing a TLV array with the specified TLV tag.""" self.startContainer(tag, containerType=TLV_TYPE_ARRAY) def startPath(self, tag): """Start writing a TLV path with the specified TLV tag.""" self.startContainer(tag, containerType=TLV_TYPE_PATH) def endContainer(self): """End writing the current TLV container.""" self._containerStack.pop(0) controlAndTag = self._encodeControlAndTag(TLVEndOfContainer, None) self._encoding.extend(controlAndTag) def _encodeControlAndTag(self, type, tag, lenOfLenOrVal=0): controlByte = type if lenOfLenOrVal == 2: controlByte |= 1 elif lenOfLenOrVal == 4: controlByte |= 2 elif lenOfLenOrVal == 8: controlByte |= 3 if tag is None: if ( type != TLVEndOfContainer and len(self._containerStack) != 0 and self._containerStack[0] == TLV_TYPE_STRUCTURE ): raise ValueError( "Attempt to encode anonymous tag within TLV structure") controlByte |= TLV_TAG_CONTROL_ANONYMOUS return struct.pack("<B", controlByte) if isinstance(tag, int): if tag < 0 or tag > UINT8_MAX: raise ValueError( "Context-specific TLV tag number out of range") if len(self._containerStack) == 0: raise ValueError( "Attempt to encode context-specific TLV tag at top level" ) if self._containerStack[0] == TLV_TYPE_ARRAY: raise ValueError( "Attempt to encode context-specific tag within TLV array" ) controlByte |= TLV_TAG_CONTROL_CONTEXT_SPECIFIC return struct.pack("<BB", controlByte, tag) if isinstance(tag, tuple): (profile, tagNum) = tag if not isinstance(tagNum, int): raise ValueError("Invalid object given for TLV tag") if tagNum < 0 or tagNum > UINT32_MAX: raise ValueError("TLV tag number out of range") if profile != None: if not isinstance(profile, int): raise ValueError("Invalid object given for TLV profile id") if profile < 0 or profile > UINT32_MAX: raise ValueError("TLV profile id value out of range") if ( len(self._containerStack) != 0 and self._containerStack[0] == TLV_TYPE_ARRAY ): raise ValueError( "Attempt to encode profile-specific tag within TLV array" ) if profile is None or profile == self._implicitProfile: if tagNum <= UINT16_MAX: controlByte |= TLV_TAG_CONTROL_IMPLICIT_PROFILE_2Bytes return struct.pack("<BH", controlByte, tagNum) else: controlByte |= TLV_TAG_CONTROL_IMPLICIT_PROFILE_4Bytes return struct.pack("<BL", controlByte, tagNum) elif profile == 0: if tagNum <= UINT16_MAX: controlByte |= TLV_TAG_CONTROL_COMMON_PROFILE_2Bytes return struct.pack("<BH", controlByte, tagNum) else: controlByte |= TLV_TAG_CONTROL_COMMON_PROFILE_4Bytes return struct.pack("<BL", controlByte, tagNum) else: vendorId = (profile >> 16) & 0xFFFF profileNum = (profile >> 0) & 0xFFFF if tagNum <= UINT16_MAX: controlByte |= TLV_TAG_CONTROL_FULLY_QUALIFIED_6Bytes return struct.pack("<BHHH", controlByte, vendorId, profileNum, tagNum) else: controlByte |= TLV_TAG_CONTROL_FULLY_QUALIFIED_8Bytes return struct.pack("<BHHL", controlByte, vendorId, profileNum, profile, tagNum) raise ValueError("Invalid object given for TLV tag") @staticmethod def _encodeUnsignedInt(val): if val < 0: raise ValueError("Integer value out of range") if val <= UINT8_MAX: format = "<B" elif val <= UINT16_MAX: format = "<H" elif val <= UINT32_MAX: format = "<L" elif val <= UINT64_MAX: format = "<Q" else: raise ValueError("Integer value out of range") return struct.pack(format, val) @staticmethod def _verifyValidContainerType(containerType): if ( containerType != TLV_TYPE_STRUCTURE and containerType != TLV_TYPE_ARRAY and containerType != TLV_TYPE_PATH ): raise ValueError("Invalid TLV container type") class TLVReader(object): def __init__(self, tlv): self._tlv = tlv self._bytesRead = 0 self._decodings = [] @property def decoding(self): return self._decodings def get(self): """Get the dictionary representation of tlv data""" out = {} self._get(self._tlv, self._decodings, out) return out def _decodeControlByte(self, tlv, decoding): (controlByte,) = struct.unpack( "<B", tlv[self._bytesRead: self._bytesRead + 1]) controlTypeIndex = controlByte & 0xE0 decoding["tagControl"] = TagControls[controlTypeIndex] elementtypeIndex = controlByte & 0x1F decoding["type"] = ElementTypes[elementtypeIndex] self._bytesRead += 1 def _decodeControlAndTag(self, tlv, decoding): """The control byte specifies the type of a TLV element and how its tag, length and value fields are encoded. The control byte consists of two subfields: an element type field which occupies the lower 5 bits, and a tag control field which occupies the upper 3 bits. The element type field encodes the element’s type as well as how the corresponding length and value fields are encoded. In the case of Booleans and the null value, the element type field also encodes the value itself.""" self._decodeControlByte(tlv, decoding) if decoding["tagControl"] == "Anonymous": decoding["tag"] = None decoding["tagLen"] = 0 elif decoding["tagControl"] == "Context 1-byte": (decoding["tag"],) = struct.unpack( "<B", tlv[self._bytesRead: self._bytesRead + 1] ) decoding["tagLen"] = 1 self._bytesRead += 1 elif decoding["tagControl"] == "Common Profile 2-byte": profile = 0 (tag,) = struct.unpack( "<H", tlv[self._bytesRead: self._bytesRead + 2]) decoding["profileTag"] = (profile, tag) decoding["tagLen"] = 2 self._bytesRead += 2 elif decoding["tagControl"] == "Common Profile 4-byte": profile = 0 (tag,) = struct.unpack( "<L", tlv[self._bytesRead: self._bytesRead + 4]) decoding["profileTag"] = (profile, tag) decoding["tagLen"] = 4 self._bytesRead += 4 elif decoding["tagControl"] == "Implicit Profile 2-byte": profile = None (tag,) = struct.unpack( "<H", tlv[self._bytesRead: self._bytesRead + 2]) decoding["profileTag"] = (profile, tag) decoding["tagLen"] = 2 self._bytesRead += 2 elif decoding["tagControl"] == "Implicit Profile 4-byte": profile = None (tag,) = struct.unpack( "<L", tlv[self._bytesRead: self._bytesRead + 4]) decoding["profileTag"] = (profile, tag) decoding["tagLen"] = 4 self._bytesRead += 4 elif decoding["tagControl"] == "Fully Qualified 6-byte": (vendorId, profileNum) = struct.unpack( "<HH", tlv[self._bytesRead: self._bytesRead + 4]) profile = (vendorId << 16) | profileNum (tag,) = struct.unpack( "<H", tlv[self._bytesRead + 4: self._bytesRead + 6]) decoding["profileTag"] = (profile, tag) decoding["tagLen"] = 2 self._bytesRead += 6 elif decoding["tagControl"] == "Fully Qualified 8-byte": (vendorId, profileNum) = struct.unpack( "<HH", tlv[self._bytesRead: self._bytesRead + 4]) profile = (vendorId << 16) | profileNum (tag,) = struct.unpack( "<L", tlv[self._bytesRead + 4: self._bytesRead + 8]) decoding["profileTag"] = (profile, tag) decoding["tagLen"] = 4 self._bytesRead += 8 def _decodeStrLength(self, tlv, decoding): """UTF-8 or Byte StringLength fields are encoded in 0, 1, 2 or 4 byte widths, as specified by the element type field. If the element type needs a length field grab the next bytes as length""" if "length" in decoding["type"]: if "1-byte" in decoding["type"]: (decoding["strDataLen"],) = struct.unpack( "<B", tlv[self._bytesRead: self._bytesRead + 1] ) decoding["strDataLenLen"] = 1 self._bytesRead += 1 elif "2-byte" in decoding["type"]: (decoding["strDataLen"],) = struct.unpack( "<H", tlv[self._bytesRead: self._bytesRead + 2] ) decoding["strDataLenLen"] = 2 self._bytesRead += 2 elif "4-byte" in decoding["type"]: (decoding["strDataLen"],) = struct.unpack( "<L", tlv[self._bytesRead: self._bytesRead + 4] ) decoding["strDataLenLen"] = 4 self._bytesRead += 4 elif "8-byte" in decoding["type"]: (decoding["strDataLen"],) = struct.unpack( "<Q", tlv[self._bytesRead: self._bytesRead + 8] ) decoding["strDataLenLen"] = 8 self._bytesRead += 8 else: decoding["strDataLen"] = 0 decoding["strDataLenLen"] = 0 def _decodeVal(self, tlv, decoding): """decode primitive tlv value to the corresponding python value, tlv array and path are decoded as python list, tlv structure is decoded as python dictionary""" if decoding["type"] == "Structure": decoding["value"] = {} decoding["Structure"] = [] self._get(tlv, decoding["Structure"], decoding["value"]) elif decoding["type"] == "Array": decoding["value"] = [] decoding["Array"] = [] self._get(tlv, decoding["Array"], decoding["value"]) elif decoding["type"] == "Path": decoding["value"] = [] decoding["Path"] = [] self._get(tlv, decoding["Path"], decoding["value"]) elif decoding["type"] == "Null": decoding["value"] = None elif decoding["type"] == "End of Collection": decoding["value"] = None elif decoding["type"] == "Boolean True": decoding["value"] = True elif decoding["type"] == "Boolean False": decoding["value"] = False elif decoding["type"] == "Unsigned Integer 1-byte value": (decoding["value"],) = struct.unpack( "<B", tlv[self._bytesRead: self._bytesRead + 1] ) decoding["value"] = uint(decoding["value"]) self._bytesRead += 1 elif decoding["type"] == "Signed Integer 1-byte value": (decoding["value"],) = struct.unpack( "<b", tlv[self._bytesRead: self._bytesRead + 1] ) self._bytesRead += 1 elif decoding["type"] == "Unsigned Integer 2-byte value": (decoding["value"],) = struct.unpack( "<H", tlv[self._bytesRead: self._bytesRead + 2] ) decoding["value"] = uint(decoding["value"]) self._bytesRead += 2 elif decoding["type"] == "Signed Integer 2-byte value": (decoding["value"],) = struct.unpack( "<h", tlv[self._bytesRead: self._bytesRead + 2] ) self._bytesRead += 2 elif decoding["type"] == "Unsigned Integer 4-byte value": (decoding["value"],) = struct.unpack( "<L", tlv[self._bytesRead: self._bytesRead + 4] ) decoding["value"] = uint(decoding["value"]) self._bytesRead += 4 elif decoding["type"] == "Signed Integer 4-byte value": (decoding["value"],) = struct.unpack( "<l", tlv[self._bytesRead: self._bytesRead + 4] ) self._bytesRead += 4 elif decoding["type"] == "Unsigned Integer 8-byte value": (decoding["value"],) = struct.unpack( "<Q", tlv[self._bytesRead: self._bytesRead + 8] ) decoding["value"] = uint(decoding["value"]) self._bytesRead += 8 elif decoding["type"] == "Signed Integer 8-byte value": (decoding["value"],) = struct.unpack( "<q", tlv[self._bytesRead: self._bytesRead + 8] ) self._bytesRead += 8 elif decoding["type"] == "Floating Point 4-byte value": (decoding["value"],) = struct.unpack( "<f", tlv[self._bytesRead: self._bytesRead + 4] ) decoding["value"] = float32(decoding["value"]) self._bytesRead += 4 elif decoding["type"] == "Floating Point 8-byte value": (decoding["value"],) = struct.unpack( "<d", tlv[self._bytesRead: self._bytesRead + 8] ) self._bytesRead += 8 elif "UTF-8 String" in decoding["type"]: (val,) = struct.unpack( "<%ds" % decoding["strDataLen"], tlv[self._bytesRead: self._bytesRead + decoding["strDataLen"]], ) try: decoding["value"] = str(val, "utf-8") except Exception as ex: decoding["value"] = val self._bytesRead += decoding["strDataLen"] elif "Byte String" in decoding["type"]: (val,) = struct.unpack( "<%ds" % decoding["strDataLen"], tlv[self._bytesRead: self._bytesRead + decoding["strDataLen"]], ) decoding["value"] = val self._bytesRead += decoding["strDataLen"] else: raise ValueError("Attempt to decode unsupported TLV type") def _get(self, tlv, decodings, out): endOfEncoding = False while len(tlv[self._bytesRead:]) > 0 and endOfEncoding == False: decoding = {} self._decodeControlAndTag(tlv, decoding) self._decodeStrLength(tlv, decoding) self._decodeVal(tlv, decoding) decodings.append(decoding) if decoding["type"] == "End of Collection": endOfEncoding = True else: if "profileTag" in list(decoding.keys()): out[decoding["profileTag"]] = decoding["value"] elif "tag" in list(decoding.keys()): if isinstance(out, Mapping): tag = decoding["tag"] if decoding["tag"] is not None else "Any" out[tag] = decoding["value"] else: out.append(decoding["value"]) else: raise ValueError("Attempt to decode unsupported TLV tag") def tlvTagToSortKey(tag): if tag is None: return -1 if isinstance(tag, int): majorOrder = 0 elif isinstance(tag, tuple): (profileId, tag) = tag if profileId is None: majorOrder = 1 else: majorOrder = profileId + 2 else: raise ValueError("Invalid TLV tag") return (majorOrder << 32) + tag if __name__ == "__main__": val = dict( [ (1, 0), (2, 65536), (3, True), (4, None), (5, "Hello!"), (6, bytearray([0xDE, 0xAD, 0xBE, 0xEF])), (7, ["Goodbye!", 71024724507, False]), ((0x235A0000, 42), "FOO"), ((None, 42), "BAR"), ] ) writer = TLVWriter() encodedVal = writer.put(None, val) reader = TLVReader(writer.encoding) out = reader.get() print("TLVReader input: " + str(val)) print("TLVReader output: " + str(out["Any"])) if val == out["Any"]: print("Test Success") else: print("Test Failure")
{ "content_hash": "344edb384f669180c6137333e96a71ed", "timestamp": "", "source": "github", "line_count": 708, "max_line_length": 150, "avg_line_length": 39.54661016949152, "alnum_prop": 0.569270331083253, "repo_name": "project-chip/connectedhomeip", "id": "5665c5e2a4f46bead00fac3e7447328611d4e58a", "size": "28824", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/controller/python/chip/tlv/__init__.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "1759301" }, { "name": "C++", "bytes": "19104548" }, { "name": "CMake", "bytes": "140510" }, { "name": "Dockerfile", "bytes": "50353" }, { "name": "Emacs Lisp", "bytes": "1042" }, { "name": "Java", "bytes": "167719" }, { "name": "JavaScript", "bytes": "2106" }, { "name": "Jinja", "bytes": "22322" }, { "name": "Objective-C", "bytes": "930838" }, { "name": "Objective-C++", "bytes": "435348" }, { "name": "Python", "bytes": "1931007" }, { "name": "Shell", "bytes": "195843" }, { "name": "Tcl", "bytes": "311" }, { "name": "ZAP", "bytes": "584219" } ], "symlink_target": "" }
""" Copyright (c) 2011, 2012, Regents of the University of California All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: - Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. - Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ """ @author Sagar Karandikar <skarandikar@berkeley.edu> """ import time import urllib2 from smap.driver import SmapDriver from smap.util import periodicSequentialCall urllib2.install_opener(urllib2.build_opener()) class ScraperDriver(SmapDriver): """Periodically republish scraped data as an sMAP feed. The driver that extends this needs to define a scrape method that will be used by update and a special setup method with a special attr as defined below. Examples of setup methods that work automatically can be found in many of the iso scrapers, like pjm.py. """ def scrape(self): """Implemented by a subclass. This should scrape data and return a dict that is used by setup to create the timeseries and by update to add data to the timeseries""" """ Dict format is as follows: {"data_type": {"location1": {"value_type1": timeseries, "value_type2": timeseries, ...} "location2": {"value_type1": timeseries, ...} }, "data_type2": {"location1": {"value_type1": timeseries, "value_type2": timeseries, ...} "location2": {"value_type1": timeseries, ...} } } timeseries of format [[1, 1], [2, 2], [3, 3], etc.] This will create feeds with paths like: /PrefixFromIni/data_type/location/value_type For example: /PJM/LMP/112 WILT/FiveMin """ return {} def update(self): """This automatically updates/adds timeseries data, assuming that the dict returned by the scrape method is formatted as above.""" # Note that the scrape method is in a try/except clause here in order to # allow the driver to recover if there is an error on pageload. However, # DO NOT place the call to self.scrape() in setup() inside a try/except # clause. setup() should only complete successfully if all of the # data is able to be loaded (since timeseries creation depends on an # initial fetch of data). If errors are handled elsewhere (ie in the # scrape() method), nasty things like partial setup of timeseries could # occur. try: scraped = self.scrape() except urllib2.URLError: pass except urllib2.HTTPError: pass except IOError: pass else: for data_type in scraped.keys(): for location in scraped[data_type].keys(): for valtype in scraped[data_type][location].keys(): timeseries = scraped[data_type][location][valtype] path = "/" + data_type + "/" + location + "/" + valtype for pair in timeseries: if pair[0] <= self.lastLatests[path]: continue self.add(path, pair[0], pair[1]) self.lastLatests[path] = pair[0] def setup(self, opts): """This can be done almost completely automatically using code similar to that used in the ISO scrapers. See the setup method in pjm.py for an example.""" # WARNING: DO NOT put the call to self.scrape() in a try/except clause # here. Doing so can cause nasty things like partial timeseries setup # since complete data load is essential to setup. More information is in # the note in update() # Effectively, you should allow all errors to propagate in this method. # User needs to define: # lastLatests, a dict used to prevent resubmission of duplicates # self.lastLatest = {}, each item is (path: None) by default # update_frequency of the feeds, in seconds # self.update_frequency = 3600 # standard timeseries add or automatic version as noted above. pass def start(self): periodicSequentialCall(self.update).start(self.update_frequency)
{ "content_hash": "aa7a9ff246841f47bb8d00274b62bd9d", "timestamp": "", "source": "github", "line_count": 123, "max_line_length": 81, "avg_line_length": 45.146341463414636, "alnum_prop": 0.6335314244552495, "repo_name": "tectronics/smap-data", "id": "628b0d2839e64ac2e25ffbf55b4c2ee58670e14c", "size": "5553", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "python/smap/drivers/scraper.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "C", "bytes": "301328" }, { "name": "HTML", "bytes": "7902" }, { "name": "Makefile", "bytes": "5268" }, { "name": "Python", "bytes": "1394465" }, { "name": "R", "bytes": "23461" }, { "name": "Shell", "bytes": "1273" }, { "name": "TeX", "bytes": "40212" }, { "name": "XSLT", "bytes": "5081" } ], "symlink_target": "" }
from flask import render_template, request, jsonify from . import main @main.app_errorhandler(403) def forbidden(e): if request.accept_mimetypes.accept_json and \ not request.accept_mimetypes.accept_html: response = jsonify({'error': 'forbidden'}) response.status_code = 403 return response return render_template('403.html'), 403 @main.app_errorhandler(404) def page_not_found(e): if request.accept_mimetypes.accept_json and \ not request.accept_mimetypes.accept_html: response = jsonify({'error': 'not found'}) response.status_code = 404 return response return render_template('404.html'), 404 @main.app_errorhandler(500) def internal_server_error(e): if request.accept_mimetypes.accept_json and \ not request.accept_mimetypes.accept_html: response = jsonify({'error': 'internal server error'}) response.status_code = 500 return response return render_template('500.html'), 500
{ "content_hash": "a9488678020d5595858457f1ec304f82", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 62, "avg_line_length": 31.8125, "alnum_prop": 0.6699410609037328, "repo_name": "NilsGuo/nilsblog", "id": "c599320fdc5a1cf5371188238f6c047e8ddb3869", "size": "1041", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "app/main/errors.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "25744" }, { "name": "HTML", "bytes": "525161" }, { "name": "JavaScript", "bytes": "45199" }, { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "86976" } ], "symlink_target": "" }
""" mfdrn module. Contains the ModflowDrn class. Note that the user can access the ModflowDrn class as `flopy.modflow.ModflowDrn`. Additional information for this MODFLOW package can be found at the `Online MODFLOW Guide <http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/index.html?drn.htm>`_. """ import numpy as np from ..pakbase import Package from ..utils.recarray_utils import create_empty_recarray from ..utils.util_list import MfList class ModflowDrn(Package): """ MODFLOW Drain Package Class. Parameters ---------- model : model object The model object (of type :class:`flopy.modflow.mf.Modflow`) to which this package will be added. ipakcb : int A flag that is used to determine if cell-by-cell budget data should be saved. If ipakcb is non-zero cell-by-cell budget data will be saved. (default is None). stress_period_data : list of boundaries, recarrays, or dictionary of boundaries. Each drain cell is defined through definition of layer(int), row(int), column(int), elevation(float), conductance(float). The simplest form is a dictionary with a lists of boundaries for each stress period, where each list of boundaries itself is a list of boundaries. Indices of the dictionary are the numbers of the stress period. This gives the form of:: stress_period_data = {0: [ [lay, row, col, stage, cond], [lay, row, col, stage, cond], [lay, row, col, stage, cond], ], 1: [ [lay, row, col, stage, cond], [lay, row, col, stage, cond], [lay, row, col, stage, cond], ], ... kper: [ [lay, row, col, stage, cond], [lay, row, col, stage, cond], [lay, row, col, stage, cond], ] } Note that if no values are specified for a certain stress period, then the list of boundaries for the previous stress period for which values were defined is used. Full details of all options to specify stress_period_data can be found in the flopy3boundaries Notebook in the basic subdirectory of the examples directory. dtype : dtype definition if data type is different from default options : list of strings Package options. (default is None). extension : string Filename extension (default is 'drn') unitnumber : int File unit number (default is None). filenames : str or list of str Filenames to use for the package and the output files. If filenames=None the package name will be created using the model name and package extension and the cbc output name will be created using the model name and .cbc extension (for example, modflowtest.cbc), if ipakcbc is a number greater than zero. If a single string is passed the package will be set to the string and cbc output names will be created using the model name and .cbc extension, if ipakcbc is a number greater than zero. To define the names for all package files (input and output) the length of the list of strings should be 2. Default is None. Attributes ---------- Methods ------- See Also -------- Notes ----- Parameters are not supported in FloPy. If "RETURNFLOW" in passed in options, the drain return package (DRT) activated, which expects a different (longer) dtype for stress_period_data Examples -------- >>> import flopy >>> ml = flopy.modflow.Modflow() >>> lrcec = {0:[2, 3, 4, 10., 100.]} #this drain will be applied to all >>> #stress periods >>> drn = flopy.modflow.ModflowDrn(ml, stress_period_data=lrcec) """ def __init__( self, model, ipakcb=None, stress_period_data=None, dtype=None, extension="drn", unitnumber=None, options=None, filenames=None, **kwargs, ): # set default unit number of one is not specified if unitnumber is None: unitnumber = ModflowDrn._defaultunit() # set filenames filenames = self._prepare_filenames(filenames, 2) # update external file information with cbc output, if necessary if ipakcb is not None: model.add_output_file( ipakcb, fname=filenames[1], package=self._ftype() ) else: ipakcb = 0 if options is None: options = [] self.is_drt = False for opt in options: if opt.upper() == "RETURNFLOW": self.is_drt = True break if self.is_drt: name = ["DRT"] else: name = [self._ftype()] # call base package constructor super().__init__( model, extension=extension, name=name, unit_number=unitnumber, filenames=filenames[0], ) self._generate_heading() self.url = "drn.htm" self.ipakcb = ipakcb self.np = 0 self.options = options if dtype is not None: self.dtype = dtype else: self.dtype = self.get_default_dtype( structured=self.parent.structured, is_drt=self.is_drt ) self.stress_period_data = MfList(self, stress_period_data) self.parent.add_package(self) @staticmethod def get_default_dtype(structured=True, is_drt=False): if structured: if not is_drt: dtype = np.dtype( [ ("k", int), ("i", int), ("j", int), ("elev", np.float32), ("cond", np.float32), ] ) else: dtype = np.dtype( [ ("k", int), ("i", int), ("j", int), ("elev", np.float32), ("cond", np.float32), ("layr", int), ("rowr", int), ("colr", int), ("rfprop", np.float32), ] ) else: dtype = np.dtype( [("node", int), ("elev", np.float32), ("cond", np.float32)] ) return dtype def _ncells(self): """Maximum number of cells that have drains (developed for MT3DMS SSM package). Returns ------- ncells: int maximum number of drain cells """ return self.stress_period_data.mxact def write_file(self, check=True): """ Write the package file. Parameters ---------- check : boolean Check package data for common errors. (default True) Returns ------- None """ if ( check ): # allows turning off package checks when writing files at model level self.check( f=f"{self.name[0]}.chk", verbose=self.parent.verbose, level=1, ) f_drn = open(self.fn_path, "w") f_drn.write(f"{self.heading}\n") line = f"{self.stress_period_data.mxact:10d}{self.ipakcb:10d}" if self.is_drt: line += "{0:10d}{0:10d}".format(0) for opt in self.options: line += " " + str(opt) line += "\n" f_drn.write(line) self.stress_period_data.write_transient(f_drn) f_drn.close() def add_record(self, kper, index, values): try: self.stress_period_data.add_record(kper, index, values) except Exception as e: raise Exception(f"mfdrn error adding record to list: {e!s}") @staticmethod def get_empty(ncells=0, aux_names=None, structured=True, is_drt=False): # get an empty recarray that corresponds to dtype dtype = ModflowDrn.get_default_dtype( structured=structured, is_drt=is_drt ) if aux_names is not None: dtype = Package.add_to_dtype(dtype, aux_names, np.float32) return create_empty_recarray(ncells, dtype, default_value=-1.0e10) @staticmethod def _get_sfac_columns(): return ["cond"] @classmethod def load(cls, f, model, nper=None, ext_unit_dict=None, check=True): """ Load an existing package. Parameters ---------- f : filename or file handle File to load. model : model object The model object (of type :class:`flopy.modflow.mf.Modflow`) to which this package will be added. ext_unit_dict : dictionary, optional If the arrays in the file are specified using EXTERNAL, or older style array control records, then `f` should be a file handle. In this case ext_unit_dict is required, which can be constructed using the function :class:`flopy.utils.mfreadnam.parsenamefile`. check : boolean Check package data for common errors. (default True) Returns ------- drn : ModflowDrn object ModflowDrn object. Examples -------- >>> import flopy >>> m = flopy.modflow.Modflow() >>> drn = flopy.modflow.ModflowDrn.load('test.drn', m) """ if model.verbose: print("loading drn package file...") return Package.load( f, model, cls, nper=nper, check=check, ext_unit_dict=ext_unit_dict, ) @staticmethod def _ftype(): return "DRN" @staticmethod def _defaultunit(): return 21
{ "content_hash": "bb674cebfbab5761b7b64dbc1e8806d0", "timestamp": "", "source": "github", "line_count": 329, "max_line_length": 97, "avg_line_length": 30.97872340425532, "alnum_prop": 0.5315934065934066, "repo_name": "jentjr/flopy", "id": "32f6e7992bee58e2632d220e5a60d0b7c52489e9", "size": "10192", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "flopy/modflow/mfdrn.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "832" }, { "name": "CSS", "bytes": "321" }, { "name": "Makefile", "bytes": "634" }, { "name": "Python", "bytes": "6353118" }, { "name": "Shell", "bytes": "292" } ], "symlink_target": "" }
class HaltStateException(Exception): """ Exception thrown when trying to continue execution from halt state """ pass # # class UnsetTapeException(Exception): """ Exception thrown when the tape is not set """ pass # # class InvalidSymbolException(Exception): """ Exception thrown when a symbol is not valid """ pass # # class UnknownTransitionException(Exception): """ Exception thrown when there are no specified transition with a given (state, symbol) tuple """ pass
{ "content_hash": "35af376408630f8961cd5115292bc131", "timestamp": "", "source": "github", "line_count": 30, "max_line_length": 72, "avg_line_length": 17.966666666666665, "alnum_prop": 0.6623376623376623, "repo_name": "jainpranav/Turing_Machine_Simulator", "id": "fa3b3d64a1813a3fe2362939554203db235f9889", "size": "568", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Simulator/tmexceptions.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "51102" } ], "symlink_target": "" }
from mpl_toolkits.basemap import Basemap import numpy as np import matplotlib.pyplot as plt # lon_0 is central longitude of projection. # resolution = 'c' means use crude resolution coastlines. m = Basemap(projection='vandg',lon_0=0,resolution='c') m.drawcoastlines() m.fillcontinents(color='coral',lake_color='aqua') # draw parallels and meridians. m.drawparallels(np.arange(-80.,81.,20.)) m.drawmeridians(np.arange(0.,360.,60.)) m.drawmapboundary(fill_color='aqua') plt.title("van der Grinten Projection") plt.show()
{ "content_hash": "20e91e609cb063d8b9e1115ced534670", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 57, "avg_line_length": 37.142857142857146, "alnum_prop": 0.7596153846153846, "repo_name": "guziy/basemap", "id": "03db2a3947a5d2c26fc0a15e4271f09c1e12bab3", "size": "520", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "packages/basemap/doc/users/figures/vandg.py", "mode": "33188", "license": "mit", "language": [ { "name": "Cython", "bytes": "14661" }, { "name": "Python", "bytes": "440405" }, { "name": "Shell", "bytes": "1161" } ], "symlink_target": "" }
"""A setup file for PIP pacakge. Usage: python setup.py bdist_wheel """ import setuptools with open('README.md', 'r') as f: LONG_DESCRIPTION = f.read() setuptools.setup( name='prog_edu_assistant_tools', version='0.3.1', author='Salikh Zakirov', author_email='salikh@gmail.com', description= 'Tools for authoring programming assignments in Jupyter notebooks', long_description=LONG_DESCRIPTION, long_description_content_type='text/markdown', url='https://github.com/google/prog-edu-assistant/tree/master/python/prog_edu_assistant_tools', packages=setuptools.find_packages(), classifiers=[ 'Programming Language :: Python :: 3', 'License :: OSI Approved :: Apache Software License', 'Operating System :: OS Independent', ], install_requires=[ 'IPython', 'Jinja2', ], )
{ "content_hash": "b75f332be180b560d8281fba2ce15c34", "timestamp": "", "source": "github", "line_count": 32, "max_line_length": 99, "avg_line_length": 27.34375, "alnum_prop": 0.6571428571428571, "repo_name": "google/prog-edu-assistant", "id": "70038ed3f9b9c83add25accae8b73e623e8abec0", "size": "875", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "python/prog_edu_assistant_tools/setup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "3205" }, { "name": "Dockerfile", "bytes": "2036" }, { "name": "Go", "bytes": "145593" }, { "name": "JavaScript", "bytes": "5155" }, { "name": "Jupyter Notebook", "bytes": "4357607" }, { "name": "Python", "bytes": "68016" }, { "name": "Shell", "bytes": "7374" }, { "name": "Starlark", "bytes": "35545" } ], "symlink_target": "" }
from django.contrib.auth.models import User from django.db import models class TestManager(models.Manager): def test(self, test): return { "test_name": test.name } class Test(models.Model): ''' Holds meta information about an Account, not used to login. ''' objects = TestManager() user = models.ForeignKey(User, on_delete=models.CASCADE) name = models.CharField(max_length=63, blank=False)
{ "content_hash": "ddf131322fd33a501c1cc9e24327a757", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 63, "avg_line_length": 22.65, "alnum_prop": 0.6578366445916115, "repo_name": "dborstelmann/hackathon-kit", "id": "44e9da83ed541ad32abc7581a36acfb7567819b1", "size": "453", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "api/models/test.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "761" }, { "name": "HTML", "bytes": "1949" }, { "name": "JavaScript", "bytes": "53640" }, { "name": "Python", "bytes": "8534" } ], "symlink_target": "" }
from app import db db.Model.metadata.reflect(db.engine) class Vets(db.Model): __table__ = db.Model.metadata.tables['vets'] def __repr__(self): return '<Vet {}>'.format(self.last_name.encode('utf-8')) class Users(db.Model): __table__ = db.Model.metadata.tables['users'] def __repr__(self): user = self.user_name.encode('utf-8') role = self.role.encode('utf-8') return '<User {}({})>'.format(user, role) class Breeds(db.Model): __table__ = db.Model.metadata.tables['breeds'] def __repr__(self): breed = self.name.encode('utf-8') breed_id = self.id species_id = self.species_id return '<Breed({}) {}({})>'.format(species_id, breed, breed_id) class Species(db.Model): __table__ = db.Model.metadata.tables['species'] def __repr__(self): species = self.name.encode('utf-8') species_id = self.id return '<Species({}) {}>'.format(species_id, species) class Animals(db.Model): __table__ = db.Model.metadata.tables['animals'] @classmethod def get_animal_by_id(self, animal): return Animals.query.filter_by(id=animal)\ .join(Breeds, Species, Owners)\ .add_entity(Breeds)\ .add_entity(Species)\ .add_entity(Owners)\ .order_by(Animals.id)\ .first_or_404() def __repr__(self): animal_id = self.id breed_id = self.breed_id return '<Animal({}) {}({})>'.format(breed_id, self.name.encode('utf-8'), animal_id) class Owners(db.Model): __table__ = db.Model.metadata.tables['owners'] @classmethod def get_owner_by_id(self, owner): return Owners.query.filter_by(id=owner)\ .order_by(Owners.id)\ .first_or_404() def __repr__(self): owner_id = self.id first_name = self.first_name last_name = self.last_name return '<Owner({}) {} {}>'.format(owner_id, first_name.encode('utf-8'), last_name.encode('utf-8')) class Visits(db.Model): __table__ = db.Model.metadata.tables['visits'] def __repr__(self): return '<Visit({}) {} ({})>'.format( self.id, self.date, self.animal_id ) @classmethod def get_visits_by_animal(self, animal): return Visits.query.filter_by(animal_id=animal)\ .join(Vets)\ .add_entity(Vets)\ .order_by(Visits.date)\ .all() def get_all_animals(): return Animals.query.join(Breeds, Species, Owners)\ .add_entity(Breeds)\ .add_entity(Species)\ .add_entity(Owners)\ .order_by(Animals.name) def get_all_breeds(): return Breeds.query.join(Species)\ .add_entity(Species)\ .order_by(Species.name, Breeds.name) def get_all_species(): return Species.query.order_by(Species.name) def get_all_owners(): return Owners.query.order_by(Owners.last_name, Owners.first_name) def get_all_vets(): return Vets.query.order_by(Vets.last_name, Vets.first_name) def get_all_users(): return Users.query.order_by(Users.user_name)
{ "content_hash": "53e78785f7e156fbfee045ff8d20ee1f", "timestamp": "", "source": "github", "line_count": 111, "max_line_length": 106, "avg_line_length": 31.0990990990991, "alnum_prop": 0.5240440324449595, "repo_name": "blahu/v", "id": "783880ebd112d7a89b201c29c8561f7e5cc84a02", "size": "3452", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "app/models.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "7383" }, { "name": "Python", "bytes": "8866" } ], "symlink_target": "" }
import os import tempfile import unittest import logging from pyidf import ValidationLevel import pyidf from pyidf.idf import IDF from pyidf.electric_load_center import GeneratorMicroChp log = logging.getLogger(__name__) class TestGeneratorMicroChp(unittest.TestCase): def setUp(self): self.fd, self.path = tempfile.mkstemp() def tearDown(self): os.remove(self.path) def test_create_generatormicrochp(self): pyidf.validation_level = ValidationLevel.error obj = GeneratorMicroChp() # alpha var_name = "Name" obj.name = var_name # object-list var_performance_parameters_name = "object-list|Performance Parameters Name" obj.performance_parameters_name = var_performance_parameters_name # object-list var_zone_name = "object-list|Zone Name" obj.zone_name = var_zone_name # node var_cooling_water_inlet_node_name = "node|Cooling Water Inlet Node Name" obj.cooling_water_inlet_node_name = var_cooling_water_inlet_node_name # node var_cooling_water_outlet_node_name = "node|Cooling Water Outlet Node Name" obj.cooling_water_outlet_node_name = var_cooling_water_outlet_node_name # node var_air_inlet_node_name = "node|Air Inlet Node Name" obj.air_inlet_node_name = var_air_inlet_node_name # node var_air_outlet_node_name = "node|Air Outlet Node Name" obj.air_outlet_node_name = var_air_outlet_node_name # object-list var_generator_fuel_supply_name = "object-list|Generator Fuel Supply Name" obj.generator_fuel_supply_name = var_generator_fuel_supply_name # object-list var_availability_schedule_name = "object-list|Availability Schedule Name" obj.availability_schedule_name = var_availability_schedule_name idf = IDF() idf.add(obj) idf.save(self.path, check=False) with open(self.path, mode='r') as f: for line in f: log.debug(line.strip()) idf2 = IDF(self.path) self.assertEqual(idf2.generatormicrochps[0].name, var_name) self.assertEqual(idf2.generatormicrochps[0].performance_parameters_name, var_performance_parameters_name) self.assertEqual(idf2.generatormicrochps[0].zone_name, var_zone_name) self.assertEqual(idf2.generatormicrochps[0].cooling_water_inlet_node_name, var_cooling_water_inlet_node_name) self.assertEqual(idf2.generatormicrochps[0].cooling_water_outlet_node_name, var_cooling_water_outlet_node_name) self.assertEqual(idf2.generatormicrochps[0].air_inlet_node_name, var_air_inlet_node_name) self.assertEqual(idf2.generatormicrochps[0].air_outlet_node_name, var_air_outlet_node_name) self.assertEqual(idf2.generatormicrochps[0].generator_fuel_supply_name, var_generator_fuel_supply_name) self.assertEqual(idf2.generatormicrochps[0].availability_schedule_name, var_availability_schedule_name)
{ "content_hash": "2d2ae254794ebbcc010041e9e4beb90d", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 119, "avg_line_length": 43.02857142857143, "alnum_prop": 0.6882470119521913, "repo_name": "rbuffat/pyidf", "id": "98366695ad0a28ee2c5077bdfd87ff192fa69142", "size": "3012", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/test_generatormicrochp.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "22271673" } ], "symlink_target": "" }
from __future__ import absolute_import import sys import types from contextlib import contextmanager from kombu.utils.encoding import str_to_bytes from celery import signature from celery import states from celery import group from celery.backends.cache import CacheBackend, DummyClient from celery.exceptions import ImproperlyConfigured from celery.five import items, string, text_t from celery.utils import uuid from celery.tests.case import ( AppCase, Mock, mask_modules, patch, reset_modules, ) PY3 = sys.version_info[0] == 3 class SomeClass(object): def __init__(self, data): self.data = data class test_CacheBackend(AppCase): def setup(self): self.tb = CacheBackend(backend='memory://', app=self.app) self.tid = uuid() def test_no_backend(self): self.app.conf.CELERY_CACHE_BACKEND = None with self.assertRaises(ImproperlyConfigured): CacheBackend(backend=None, app=self.app) def test_mark_as_done(self): self.assertEqual(self.tb.get_status(self.tid), states.PENDING) self.assertIsNone(self.tb.get_result(self.tid)) self.tb.mark_as_done(self.tid, 42) self.assertEqual(self.tb.get_status(self.tid), states.SUCCESS) self.assertEqual(self.tb.get_result(self.tid), 42) def test_is_pickled(self): result = {'foo': 'baz', 'bar': SomeClass(12345)} self.tb.mark_as_done(self.tid, result) # is serialized properly. rindb = self.tb.get_result(self.tid) self.assertEqual(rindb.get('foo'), 'baz') self.assertEqual(rindb.get('bar').data, 12345) def test_mark_as_failure(self): try: raise KeyError('foo') except KeyError as exception: self.tb.mark_as_failure(self.tid, exception) self.assertEqual(self.tb.get_status(self.tid), states.FAILURE) self.assertIsInstance(self.tb.get_result(self.tid), KeyError) def test_apply_chord(self): tb = CacheBackend(backend='memory://', app=self.app) gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] tb.apply_chord(group(app=self.app), (), gid, {}, result=res) @patch('celery.result.GroupResult.restore') def test_on_chord_part_return(self, restore): tb = CacheBackend(backend='memory://', app=self.app) deps = Mock() deps.__len__ = Mock() deps.__len__.return_value = 2 restore.return_value = deps task = Mock() task.name = 'foobarbaz' self.app.tasks['foobarbaz'] = task task.request.chord = signature(task) gid, res = uuid(), [self.app.AsyncResult(uuid()) for _ in range(3)] task.request.group = gid tb.apply_chord(group(app=self.app), (), gid, {}, result=res) self.assertFalse(deps.join_native.called) tb.on_chord_part_return(task, 'SUCCESS', 10) self.assertFalse(deps.join_native.called) tb.on_chord_part_return(task, 'SUCCESS', 10) deps.join_native.assert_called_with(propagate=True, timeout=3.0) deps.delete.assert_called_with() def test_mget(self): self.tb.set('foo', 1) self.tb.set('bar', 2) self.assertDictEqual(self.tb.mget(['foo', 'bar']), {'foo': 1, 'bar': 2}) def test_forget(self): self.tb.mark_as_done(self.tid, {'foo': 'bar'}) x = self.app.AsyncResult(self.tid, backend=self.tb) x.forget() self.assertIsNone(x.result) def test_process_cleanup(self): self.tb.process_cleanup() def test_expires_as_int(self): tb = CacheBackend(backend='memory://', expires=10, app=self.app) self.assertEqual(tb.expires, 10) def test_unknown_backend_raises_ImproperlyConfigured(self): with self.assertRaises(ImproperlyConfigured): CacheBackend(backend='unknown://', app=self.app) class MyMemcachedStringEncodingError(Exception): pass class MemcachedClient(DummyClient): def set(self, key, value, *args, **kwargs): if PY3: key_t, must_be, not_be, cod = bytes, 'string', 'bytes', 'decode' else: key_t, must_be, not_be, cod = text_t, 'bytes', 'string', 'encode' if isinstance(key, key_t): raise MyMemcachedStringEncodingError( 'Keys must be {0}, not {1}. Convert your ' 'strings using mystring.{2}(charset)!'.format( must_be, not_be, cod)) return super(MemcachedClient, self).set(key, value, *args, **kwargs) class MockCacheMixin(object): @contextmanager def mock_memcache(self): memcache = types.ModuleType('memcache') memcache.Client = MemcachedClient memcache.Client.__module__ = memcache.__name__ prev, sys.modules['memcache'] = sys.modules.get('memcache'), memcache try: yield True finally: if prev is not None: sys.modules['memcache'] = prev @contextmanager def mock_pylibmc(self): pylibmc = types.ModuleType('pylibmc') pylibmc.Client = MemcachedClient pylibmc.Client.__module__ = pylibmc.__name__ prev = sys.modules.get('pylibmc') sys.modules['pylibmc'] = pylibmc try: yield True finally: if prev is not None: sys.modules['pylibmc'] = prev class test_get_best_memcache(AppCase, MockCacheMixin): def test_pylibmc(self): with self.mock_pylibmc(): with reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] self.assertEqual(cache.get_best_memcache()[0].__module__, 'pylibmc') def test_memcache(self): with self.mock_memcache(): with reset_modules('celery.backends.cache'): with mask_modules('pylibmc'): from celery.backends import cache cache._imp = [None] self.assertEqual(cache.get_best_memcache()[0]().__module__, 'memcache') def test_no_implementations(self): with mask_modules('pylibmc', 'memcache'): with reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] with self.assertRaises(ImproperlyConfigured): cache.get_best_memcache() def test_cached(self): with self.mock_pylibmc(): with reset_modules('celery.backends.cache'): from celery.backends import cache cache._imp = [None] cache.get_best_memcache()[0](behaviors={'foo': 'bar'}) self.assertTrue(cache._imp[0]) cache.get_best_memcache()[0]() def test_backends(self): from celery.backends.cache import backends with self.mock_memcache(): for name, fun in items(backends): self.assertTrue(fun()) class test_memcache_key(AppCase, MockCacheMixin): def test_memcache_unicode_key(self): with self.mock_memcache(): with reset_modules('celery.backends.cache'): with mask_modules('pylibmc'): from celery.backends import cache cache._imp = [None] task_id, result = string(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result) def test_memcache_bytes_key(self): with self.mock_memcache(): with reset_modules('celery.backends.cache'): with mask_modules('pylibmc'): from celery.backends import cache cache._imp = [None] task_id, result = str_to_bytes(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result) def test_pylibmc_unicode_key(self): with reset_modules('celery.backends.cache'): with self.mock_pylibmc(): from celery.backends import cache cache._imp = [None] task_id, result = string(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result) def test_pylibmc_bytes_key(self): with reset_modules('celery.backends.cache'): with self.mock_pylibmc(): from celery.backends import cache cache._imp = [None] task_id, result = str_to_bytes(uuid()), 42 b = cache.CacheBackend(backend='memcache', app=self.app) b.store_result(task_id, result, status=states.SUCCESS) self.assertEqual(b.get_result(task_id), result)
{ "content_hash": "9be3b73765d5d01c43c279b980ca075d", "timestamp": "", "source": "github", "line_count": 253, "max_line_length": 79, "avg_line_length": 36.426877470355734, "alnum_prop": 0.5861545138888888, "repo_name": "sunze/py_flask", "id": "051760a8def8d008c1ea5a738f3e3dd7283d40da", "size": "9216", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "venv/lib/python3.4/site-packages/celery/tests/backends/test_cache.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "5939" }, { "name": "CSS", "bytes": "11745" }, { "name": "HTML", "bytes": "34870" }, { "name": "JavaScript", "bytes": "23176" }, { "name": "Mako", "bytes": "7564" }, { "name": "Python", "bytes": "12266826" }, { "name": "Shell", "bytes": "3634" } ], "symlink_target": "" }
import sys import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. #sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'PyTexturePacker' copyright = u'2016, wo1fSea' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.0' # The full version, including alpha/beta/rc tags. release = '1.0 alpha' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. #keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. #html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. #html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. #html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. #html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. #html_additional_pages = {} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ReadtheDocsTemplatedoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). #'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). #'pointsize': '10pt', # Additional stuff for the LaTeX preamble. #'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'PyTexturePacker.tex', u'PyTexturePacker Documentation', u'wo1fSea', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'PyTexturePacker', u'PyTexturePacker Documentation', [u'PyTexturePacker'], 1) ] # If true, show URL addresses after external links. #man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'PyTexturePacker', u'PyTexturePacker Documentation', u'wo1fSea', 'PyTexturePacker', 'a python package', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. #texinfo_appendices = [] # If false, no module index is generated. #texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. #texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. #texinfo_no_detailmenu = False
{ "content_hash": "953f81bc7ceb69961f04b229199b7f92", "timestamp": "", "source": "github", "line_count": 244, "max_line_length": 79, "avg_line_length": 31.85655737704918, "alnum_prop": 0.7078348128135855, "repo_name": "newnon/PyTexturePacker", "id": "72bd2b15eebd451a4fa4afc6a5a128946e984788", "size": "8208", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "docs/conf.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "48424" } ], "symlink_target": "" }
from copy import deepcopy from typing import Any, Awaitable, TYPE_CHECKING from msrest import Deserializer, Serializer from azure.core.rest import AsyncHttpResponse, HttpRequest from azure.mgmt.core import AsyncARMPipelineClient from .. import models from ._configuration import ContainerRegistryManagementClientConfiguration from .operations import Operations, RegistriesOperations, ReplicationsOperations, WebhooksOperations if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential class ContainerRegistryManagementClient: """ContainerRegistryManagementClient. :ivar registries: RegistriesOperations operations :vartype registries: azure.mgmt.containerregistry.v2017_10_01.aio.operations.RegistriesOperations :ivar operations: Operations operations :vartype operations: azure.mgmt.containerregistry.v2017_10_01.aio.operations.Operations :ivar replications: ReplicationsOperations operations :vartype replications: azure.mgmt.containerregistry.v2017_10_01.aio.operations.ReplicationsOperations :ivar webhooks: WebhooksOperations operations :vartype webhooks: azure.mgmt.containerregistry.v2017_10_01.aio.operations.WebhooksOperations :param credential: Credential needed for the client to connect to Azure. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The Microsoft Azure subscription ID. :type subscription_id: str :param base_url: Service URL. Default value is "https://management.azure.com". :type base_url: str :keyword api_version: Api Version. Default value is "2017-10-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. """ def __init__( self, credential: "AsyncTokenCredential", subscription_id: str, base_url: str = "https://management.azure.com", **kwargs: Any ) -> None: self._config = ContainerRegistryManagementClientConfiguration(credential=credential, subscription_id=subscription_id, **kwargs) self._client = AsyncARMPipelineClient(base_url=base_url, config=self._config, **kwargs) client_models = {k: v for k, v in models.__dict__.items() if isinstance(v, type)} self._serialize = Serializer(client_models) self._deserialize = Deserializer(client_models) self._serialize.client_side_validation = False self.registries = RegistriesOperations(self._client, self._config, self._serialize, self._deserialize) self.operations = Operations(self._client, self._config, self._serialize, self._deserialize) self.replications = ReplicationsOperations(self._client, self._config, self._serialize, self._deserialize) self.webhooks = WebhooksOperations(self._client, self._config, self._serialize, self._deserialize) def _send_request( self, request: HttpRequest, **kwargs: Any ) -> Awaitable[AsyncHttpResponse]: """Runs the network request through the client's chained policies. >>> from azure.core.rest import HttpRequest >>> request = HttpRequest("GET", "https://www.example.org/") <HttpRequest [GET], url: 'https://www.example.org/'> >>> response = await client._send_request(request) <AsyncHttpResponse: 200 OK> For more information on this code flow, see https://aka.ms/azsdk/python/protocol/quickstart :param request: The network request you want to make. Required. :type request: ~azure.core.rest.HttpRequest :keyword bool stream: Whether the response payload will be streamed. Defaults to False. :return: The response of your network call. Does not do error handling on your response. :rtype: ~azure.core.rest.AsyncHttpResponse """ request_copy = deepcopy(request) request_copy.url = self._client.format_url(request_copy.url) return self._client.send_request(request_copy, **kwargs) async def close(self) -> None: await self._client.close() async def __aenter__(self) -> "ContainerRegistryManagementClient": await self._client.__aenter__() return self async def __aexit__(self, *exc_details) -> None: await self._client.__aexit__(*exc_details)
{ "content_hash": "e1bbd9aff429bda5849ac67a4f15a955", "timestamp": "", "source": "github", "line_count": 97, "max_line_length": 135, "avg_line_length": 46.55670103092783, "alnum_prop": 0.7174490699734278, "repo_name": "Azure/azure-sdk-for-python", "id": "b50cba9a5905ba075cd1634ddb93975d501a110c", "size": "4984", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2017_10_01/aio/_container_registry_management_client.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
from __future__ import absolute_import import copy from django import forms from django.test import TestCase from widgy.models.links import ( link_registry, get_link_field_from_model, LinkFormMixin, LinkFormField, get_composite_key, convert_linkable_to_choice, LinkRegistry, LinkField ) from ..models import ( LinkableThing, ThingWithLink, AnotherLinkableThing, LinkableThing3, Bucket, VersionPageThrough, ChildThingWithLink ) class TestLinkField(TestCase): def test_inheritance(self): # the only field local to ChildThingWithLink is its pk. The link # field is inherited, so it shouldn't be in local_fields. self.assertFalse(any( i.name == 'linkable_content_type' for i in ChildThingWithLink._meta.local_fields )) def test_copy(self): # a LinkField has a reference to a LinkRegistry. Copying the LinkField # shouldn't copy the registry. Copying the field happens with model # inheritance. registry = LinkRegistry() f1 = LinkField(link_registry=registry) f2 = copy.deepcopy(f1) self.assertIs(f1._link_registry, f2._link_registry) class TestLinkRelations(TestCase): def test_get_all_linkable_classes(self): self.assertIn(LinkableThing, link_registry) self.assertIn(AnotherLinkableThing, link_registry) def test_get_all_linker_classes(self): self.assertIn(ThingWithLink, link_registry.get_all_linker_classes()) self.assertNotIn(Bucket, link_registry.get_all_linker_classes()) self.assertNotIn(VersionPageThrough, link_registry.get_all_linker_classes()) def test_get_all_links_for_obj(self): linkable = LinkableThing.objects.create() self.assertEqual(len(list(link_registry.get_links(linkable))), 0) thing = ThingWithLink.objects.create( link=linkable, ) self.assertEqual(list(link_registry.get_links(linkable)), [thing]) linkable2 = AnotherLinkableThing.objects.create() thing2 = ThingWithLink.objects.create( link=linkable2, ) self.assertEqual(list(link_registry.get_links(linkable2)), [thing2]) def test_get_all_possible_linkables(self): l1 = LinkableThing.objects.create() l2 = LinkableThing.objects.create() l3 = AnotherLinkableThing.objects.create() choices = get_link_field_from_model(ThingWithLink, 'link').get_choices() keyfn = get_composite_key self.assertEqual(sorted(list(choices), key=keyfn), sorted([l3, l1, l2], key=keyfn)) class LinkForm(LinkFormMixin, forms.ModelForm): link = LinkFormField() class Meta: model = ThingWithLink fields = ('link',) class TestLinkForm(TestCase): def test_save_and_create(self): page = LinkableThing.objects.create() form = LinkForm() choice = get_composite_key(page) form = LinkForm({ 'link': choice, }) self.assertTrue(form.is_valid()) instance = form.save(commit=False) self.assertEqual(instance.link, page) form2 = LinkForm({ 'link': choice, }) # save without validating. form2.save(commit=False) def test_choices(self): page1 = LinkableThing.objects.create(name='Z') page2 = LinkableThing.objects.create(name='a') page3 = AnotherLinkableThing.objects.create() page4 = LinkableThing3.objects.create() form = LinkForm() self.assertEqual(form.fields['link'].choices, [ ('Another linkable things', [ convert_linkable_to_choice(page3), ]), ('Linkable things', [ convert_linkable_to_choice(page2), convert_linkable_to_choice(page1), ]), ('ZZZZZ should be last', [ convert_linkable_to_choice(page4), ]), ])
{ "content_hash": "cffa40a88971998182fe8ce63c764946", "timestamp": "", "source": "github", "line_count": 120, "max_line_length": 92, "avg_line_length": 32.858333333333334, "alnum_prop": 0.6398681207202638, "repo_name": "j00bar/django-widgy", "id": "f87ef3d99d209475c016e3276936ce67b3b3895b", "size": "3943", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/core_tests/tests/test_links.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "120796" }, { "name": "HTML", "bytes": "63872" }, { "name": "JavaScript", "bytes": "605261" }, { "name": "Makefile", "bytes": "1150" }, { "name": "Python", "bytes": "941293" } ], "symlink_target": "" }
import click from flask.ext.script import Manager from shady import create_app, app manager = Manager(app) @manager.command def runserver(): "Runs the App" create_app() app.run(host = app.config['SERVE_HOST'], port = app.config['SERVE_PORT'], threaded = app.config['THREADED']) if __name__ == "__main__": manager.run()
{ "content_hash": "19a8f21eef76415ef7071c1143cec0e2", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 46, "avg_line_length": 21.11764705882353, "alnum_prop": 0.6295264623955432, "repo_name": "PrashntS/shady-triangles", "id": "e8e771e16655679714b8f32ad4248d66c70a0059", "size": "448", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "server/manage.py", "mode": "33261", "license": "mit", "language": [ { "name": "CSS", "bytes": "852" }, { "name": "CoffeeScript", "bytes": "6787" }, { "name": "HTML", "bytes": "467" }, { "name": "JavaScript", "bytes": "2384185" }, { "name": "Python", "bytes": "1809" } ], "symlink_target": "" }
from __future__ import absolute_import from collections import OrderedDict from horizon.utils.memoized import memoized from openstack_dashboard.api import neutron from openstack_dashboard.contrib.developer.profiler import api as profiler neutronclient = neutron.neutronclient class IKEPolicy(neutron.NeutronAPIDictWrapper): """Wrapper for neutron VPN IKEPolicy.""" def __init__(self, apiresource): super(IKEPolicy, self).__init__(apiresource) class IPSecPolicy(neutron.NeutronAPIDictWrapper): """Wrapper for neutron VPN IPSecPolicy.""" def __init__(self, apiresource): super(IPSecPolicy, self).__init__(apiresource) class IPSecSiteConnection(neutron.NeutronAPIDictWrapper): """Wrapper for neutron IPSecSiteConnection.""" def __init__(self, apiresource): super(IPSecSiteConnection, self).__init__(apiresource) class VPNService(neutron.NeutronAPIDictWrapper): """Wrapper for neutron VPNService.""" def __init__(self, apiresource): super(VPNService, self).__init__(apiresource) @profiler.trace def vpnservice_create(request, **kwargs): """Create VPNService :param request: request context :param admin_state_up: admin state (default on) :param name: name for VPNService :param description: description for VPNService :param router_id: router id for router of VPNService :param subnet_id: subnet id for subnet of VPNService """ body = {'vpnservice': {'admin_state_up': kwargs['admin_state_up'], 'name': kwargs['name'], 'description': kwargs['description'], 'router_id': kwargs['router_id'], 'subnet_id': kwargs['subnet_id']} } vpnservice = neutronclient(request).create_vpnservice(body).get( 'vpnservice') return VPNService(vpnservice) @profiler.trace def vpnservice_list(request, **kwargs): return _vpnservice_list(request, expand_subnet=True, expand_router=True, expand_conns=True, **kwargs) def _vpnservice_list(request, expand_subnet=False, expand_router=False, expand_conns=False, **kwargs): vpnservices = neutronclient(request).list_vpnservices( **kwargs).get('vpnservices') if expand_subnet: subnets = neutron.subnet_list(request) subnet_dict = OrderedDict((s.id, s) for s in subnets) for s in vpnservices: s['subnet_name'] = subnet_dict.get(s['subnet_id']).cidr if expand_router: routers = neutron.router_list(request) router_dict = OrderedDict((r.id, r) for r in routers) for s in vpnservices: s['router_name'] = router_dict.get(s['router_id']).name_or_id if expand_conns: ipsecsiteconns = _ipsecsiteconnection_list(request, **kwargs) for s in vpnservices: s['ipsecsiteconns'] = [c.id for c in ipsecsiteconns if c.vpnservice_id == s['id']] return [VPNService(v) for v in vpnservices] @profiler.trace def vpnservice_get(request, vpnservice_id): return _vpnservice_get(request, vpnservice_id, expand_subnet=True, expand_router=True, expand_conns=True) def _vpnservice_get(request, vpnservice_id, expand_subnet=False, expand_router=False, expand_conns=False): vpnservice = neutronclient(request).show_vpnservice(vpnservice_id).get( 'vpnservice') if expand_subnet: vpnservice['subnet'] = neutron.subnet_get( request, vpnservice['subnet_id']) if expand_router: vpnservice['router'] = neutron.router_get( request, vpnservice['router_id']) if expand_conns: ipsecsiteconns = _ipsecsiteconnection_list(request) vpnservice['ipsecsiteconns'] = [c for c in ipsecsiteconns if c.vpnservice_id == vpnservice['id']] return VPNService(vpnservice) @profiler.trace def vpnservice_update(request, vpnservice_id, **kwargs): vpnservice = neutronclient(request).update_vpnservice( vpnservice_id, kwargs).get('vpnservice') return VPNService(vpnservice) @profiler.trace def vpnservice_delete(request, vpnservice_id): neutronclient(request).delete_vpnservice(vpnservice_id) @profiler.trace def ikepolicy_create(request, **kwargs): """Create IKEPolicy :param request: request context :param name: name for IKEPolicy :param description: description for IKEPolicy :param auth_algorithm: authorization algorithm for IKEPolicy :param encryption_algorithm: encryption algorithm for IKEPolicy :param ike_version: IKE version for IKEPolicy :param lifetime: Lifetime Units and Value for IKEPolicy :param pfs: Perfect Forward Secrecy for IKEPolicy :param phase1_negotiation_mode: IKE Phase1 negotiation mode for IKEPolicy """ body = {'ikepolicy': {'name': kwargs['name'], 'description': kwargs['description'], 'auth_algorithm': kwargs['auth_algorithm'], 'encryption_algorithm': kwargs['encryption_algorithm'], 'ike_version': kwargs['ike_version'], 'lifetime': kwargs['lifetime'], 'pfs': kwargs['pfs'], 'phase1_negotiation_mode': kwargs['phase1_negotiation_mode']} } ikepolicy = neutronclient(request).create_ikepolicy(body).get( 'ikepolicy') return IKEPolicy(ikepolicy) @profiler.trace def ikepolicy_list(request, **kwargs): return _ikepolicy_list(request, expand_conns=True, **kwargs) def _ikepolicy_list(request, expand_conns=False, **kwargs): ikepolicies = neutronclient(request).list_ikepolicies( **kwargs).get('ikepolicies') if expand_conns: ipsecsiteconns = _ipsecsiteconnection_list(request, **kwargs) for p in ikepolicies: p['ipsecsiteconns'] = [c.id for c in ipsecsiteconns if c.ikepolicy_id == p['id']] return [IKEPolicy(v) for v in ikepolicies] @profiler.trace def ikepolicy_get(request, ikepolicy_id): return _ikepolicy_get(request, ikepolicy_id, expand_conns=True) def _ikepolicy_get(request, ikepolicy_id, expand_conns=False): ikepolicy = neutronclient(request).show_ikepolicy( ikepolicy_id).get('ikepolicy') if expand_conns: ipsecsiteconns = _ipsecsiteconnection_list(request) ikepolicy['ipsecsiteconns'] = [c for c in ipsecsiteconns if c.ikepolicy_id == ikepolicy['id']] return IKEPolicy(ikepolicy) @profiler.trace def ikepolicy_update(request, ikepolicy_id, **kwargs): ikepolicy = neutronclient(request).update_ikepolicy( ikepolicy_id, kwargs).get('ikepolicy') return IKEPolicy(ikepolicy) @profiler.trace def ikepolicy_delete(request, ikepolicy_id): neutronclient(request).delete_ikepolicy(ikepolicy_id) @profiler.trace def ipsecpolicy_create(request, **kwargs): """Create IPSecPolicy :param request: request context :param name: name for IPSecPolicy :param description: description for IPSecPolicy :param auth_algorithm: authorization algorithm for IPSecPolicy :param encapsulation_mode: encapsulation mode for IPSecPolicy :param encryption_algorithm: encryption algorithm for IPSecPolicy :param lifetime: Lifetime Units and Value for IPSecPolicy :param pfs: Perfect Forward Secrecy for IPSecPolicy :param transform_protocol: Transform Protocol for IPSecPolicy """ body = {'ipsecpolicy': {'name': kwargs['name'], 'description': kwargs['description'], 'auth_algorithm': kwargs['auth_algorithm'], 'encapsulation_mode': kwargs['encapsulation_mode'], 'encryption_algorithm': kwargs['encryption_algorithm'], 'lifetime': kwargs['lifetime'], 'pfs': kwargs['pfs'], 'transform_protocol': kwargs['transform_protocol']} } ipsecpolicy = neutronclient(request).create_ipsecpolicy(body).get( 'ipsecpolicy') return IPSecPolicy(ipsecpolicy) @profiler.trace def ipsecpolicy_list(request, **kwargs): return _ipsecpolicy_list(request, expand_conns=True, **kwargs) def _ipsecpolicy_list(request, expand_conns=False, **kwargs): ipsecpolicies = neutronclient(request).list_ipsecpolicies( **kwargs).get('ipsecpolicies') if expand_conns: ipsecsiteconns = _ipsecsiteconnection_list(request, **kwargs) for p in ipsecpolicies: p['ipsecsiteconns'] = [c.id for c in ipsecsiteconns if c.ipsecpolicy_id == p['id']] return [IPSecPolicy(v) for v in ipsecpolicies] @profiler.trace def ipsecpolicy_get(request, ipsecpolicy_id): return _ipsecpolicy_get(request, ipsecpolicy_id, expand_conns=True) def _ipsecpolicy_get(request, ipsecpolicy_id, expand_conns=False): ipsecpolicy = neutronclient(request).show_ipsecpolicy( ipsecpolicy_id).get('ipsecpolicy') if expand_conns: ipsecsiteconns = _ipsecsiteconnection_list(request) ipsecpolicy['ipsecsiteconns'] = [c for c in ipsecsiteconns if (c.ipsecpolicy_id == ipsecpolicy['id'])] return IPSecPolicy(ipsecpolicy) @profiler.trace def ipsecpolicy_update(request, ipsecpolicy_id, **kwargs): ipsecpolicy = neutronclient(request).update_ipsecpolicy( ipsecpolicy_id, kwargs).get('ipsecpolicy') return IPSecPolicy(ipsecpolicy) @profiler.trace def ipsecpolicy_delete(request, ipsecpolicy_id): neutronclient(request).delete_ipsecpolicy(ipsecpolicy_id) @profiler.trace def ipsecsiteconnection_create(request, **kwargs): """Create IPSecSiteConnection :param request: request context :param name: name for IPSecSiteConnection :param description: description for IPSecSiteConnection :param dpd: dead peer detection action, interval and timeout :param ikepolicy_id: IKEPolicy associated with this connection :param initiator: initiator state :param ipsecpolicy_id: IPsecPolicy associated with this connection :param mtu: MTU size for the connection :param peer_address: Peer gateway public address :param peer_cidrs: remote subnet(s) in CIDR format :param peer_id: Peer router identity for authentication" :param psk: Pre-Shared Key string :param vpnservice_id: VPNService associated with this connection :param admin_state_up: admin state (default on) """ body = {'ipsec_site_connection': {'name': kwargs['name'], 'description': kwargs['description'], 'dpd': kwargs['dpd'], 'ikepolicy_id': kwargs['ikepolicy_id'], 'initiator': kwargs['initiator'], 'ipsecpolicy_id': kwargs['ipsecpolicy_id'], 'mtu': kwargs['mtu'], 'peer_address': kwargs['peer_address'], 'peer_cidrs': kwargs['peer_cidrs'], 'peer_id': kwargs['peer_id'], 'psk': kwargs['psk'], 'vpnservice_id': kwargs['vpnservice_id'], 'admin_state_up': kwargs['admin_state_up']} } ipsecsiteconnection = neutronclient(request).create_ipsec_site_connection( body).get('ipsec_site_connection') return IPSecSiteConnection(ipsecsiteconnection) @profiler.trace @memoized def ipsecsiteconnection_list(request, **kwargs): return _ipsecsiteconnection_list(request, expand_ikepolicies=True, expand_ipsecpolicies=True, expand_vpnservices=True, **kwargs) @memoized def _ipsecsiteconnection_list(request, expand_ikepolicies=False, expand_ipsecpolicies=False, expand_vpnservices=False, **kwargs): ipsecsiteconnections = neutronclient(request).list_ipsec_site_connections( **kwargs).get('ipsec_site_connections') if expand_ikepolicies: ikepolicies = _ikepolicy_list(request) policy_dict = OrderedDict((p.id, p) for p in ikepolicies) for c in ipsecsiteconnections: c['ikepolicy_name'] = policy_dict.get(c['ikepolicy_id']).name_or_id if expand_ipsecpolicies: ipsecpolicies = _ipsecpolicy_list(request) policy_dict = OrderedDict((p.id, p) for p in ipsecpolicies) for c in ipsecsiteconnections: c['ipsecpolicy_name'] = policy_dict.get(c['ipsecpolicy_id'] ).name_or_id if expand_vpnservices: vpnservices = _vpnservice_list(request) service_dict = OrderedDict((s.id, s) for s in vpnservices) for c in ipsecsiteconnections: c['vpnservice_name'] = service_dict.get(c['vpnservice_id'] ).name_or_id return [IPSecSiteConnection(v) for v in ipsecsiteconnections] @profiler.trace def ipsecsiteconnection_get(request, ipsecsiteconnection_id): return _ipsecsiteconnection_get(request, ipsecsiteconnection_id, expand_ikepolicies=True, expand_ipsecpolicies=True, expand_vpnservices=True) def _ipsecsiteconnection_get(request, ipsecsiteconnection_id, expand_ikepolicies, expand_ipsecpolicies, expand_vpnservices): ipsecsiteconnection = neutronclient(request).show_ipsec_site_connection( ipsecsiteconnection_id).get('ipsec_site_connection') if expand_ikepolicies: ipsecsiteconnection['ikepolicy'] = _ikepolicy_get( request, ipsecsiteconnection['ikepolicy_id']) if expand_ipsecpolicies: ipsecsiteconnection['ipsecpolicy'] = _ipsecpolicy_get( request, ipsecsiteconnection['ipsecpolicy_id']) if expand_vpnservices: ipsecsiteconnection['vpnservice'] = _vpnservice_get( request, ipsecsiteconnection['vpnservice_id']) return IPSecSiteConnection(ipsecsiteconnection) @profiler.trace def ipsecsiteconnection_update(request, ipsecsiteconnection_id, **kwargs): ipsecsiteconnection = neutronclient(request).update_ipsec_site_connection( ipsecsiteconnection_id, kwargs).get('ipsec_site_connection') return IPSecSiteConnection(ipsecsiteconnection) @profiler.trace def ipsecsiteconnection_delete(request, ipsecsiteconnection_id): neutronclient(request).delete_ipsec_site_connection(ipsecsiteconnection_id)
{ "content_hash": "4eab9850faaa764ba20c5a43f66e220d", "timestamp": "", "source": "github", "line_count": 383, "max_line_length": 79, "avg_line_length": 38.0443864229765, "alnum_prop": 0.6565781346510191, "repo_name": "kogotko/carburetor", "id": "81e11302d014c62754b059fe530f4ab2198a067e", "size": "15177", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "openstack_dashboard/api/vpn.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "9097503" }, { "name": "HTML", "bytes": "1650202" }, { "name": "JavaScript", "bytes": "4712562" }, { "name": "Makefile", "bytes": "557" }, { "name": "Python", "bytes": "5086985" }, { "name": "Shell", "bytes": "18571" } ], "symlink_target": "" }
import os import sqlite3 from unittest import TestCase from contextlib2 import ExitStack from logbook import NullHandler, Logger from six import with_metaclass, iteritems from toolz import flip import pandas as pd import responses from .core import ( create_daily_bar_data, create_minute_bar_data, make_simple_equity_info, tmp_asset_finder, tmp_dir, ) from ..data.data_portal import ( DataPortal, DEFAULT_MINUTE_HISTORY_PREFETCH, DEFAULT_DAILY_HISTORY_PREFETCH, ) from ..data.loader import ( get_benchmark_filename, INDEX_MAPPING, ) from ..data.minute_bars import ( BcolzMinuteBarReader, BcolzMinuteBarWriter, US_EQUITIES_MINUTES_PER_DAY, FUTURES_MINUTES_PER_DAY, ) from ..data.resample import ( minute_frame_to_session_frame, MinuteResampleSessionBarReader ) from ..data.us_equity_pricing import ( BcolzDailyBarReader, BcolzDailyBarWriter, SQLiteAdjustmentReader, SQLiteAdjustmentWriter, ) from ..finance.trading import TradingEnvironment from ..utils import factory from ..utils.classproperty import classproperty from ..utils.final import FinalMeta, final import catalyst from catalyst.assets import Equity, Future from catalyst.finance.asset_restrictions import NoRestrictions from catalyst.pipeline import SimplePipelineEngine from catalyst.pipeline.data import USEquityPricing from catalyst.pipeline.loaders import USEquityPricingLoader from catalyst.pipeline.loaders.testing import make_seeded_random_loader from catalyst.protocol import BarData from catalyst.utils.calendars import ( get_calendar, register_calendar) from catalyst.utils.paths import ensure_directory catalyst_dir = os.path.dirname(catalyst.__file__) class CatalystTestCase(with_metaclass(FinalMeta, TestCase)): """ Shared extensions to core unittest.TestCase. Overrides the default unittest setUp/tearDown functions with versions that use ExitStack to correctly clean up resources, even in the face of exceptions that occur during setUp/setUpClass. Subclasses **should not override setUp or setUpClass**! Instead, they should implement `init_instance_fixtures` for per-test-method resources, and `init_class_fixtures` for per-class resources. Resources that need to be cleaned up should be registered using either `enter_{class,instance}_context` or `add_{class,instance}_callback}. """ _in_setup = False @final @classmethod def setUpClass(cls): # Hold a set of all the "static" attributes on the class. These are # things that are not populated after the class was created like # methods or other class level attributes. cls._static_class_attributes = set(vars(cls)) cls._class_teardown_stack = ExitStack() try: cls._base_init_fixtures_was_called = False cls.init_class_fixtures() assert cls._base_init_fixtures_was_called, ( "CatalystTestCase.init_class_fixtures() was not called.\n" "This probably means that you overrode init_class_fixtures" " without calling super()." ) except: cls.tearDownClass() raise @classmethod def init_class_fixtures(cls): """ Override and implement this classmethod to register resources that should be created and/or torn down on a per-class basis. Subclass implementations of this should always invoke this with super() to ensure that fixture mixins work properly. """ if cls._in_setup: raise ValueError( 'Called init_class_fixtures from init_instance_fixtures.' 'Did you write super(..., self).init_class_fixtures() instead' ' of super(..., self).init_instance_fixtures()?', ) cls._base_init_fixtures_was_called = True @final @classmethod def tearDownClass(cls): # We need to get this before it's deleted by the loop. stack = cls._class_teardown_stack for name in set(vars(cls)) - cls._static_class_attributes: # Remove all of the attributes that were added after the class was # constructed. This cleans up any large test data that is class # scoped while still allowing subclasses to access class level # attributes. delattr(cls, name) stack.close() @final @classmethod def enter_class_context(cls, context_manager): """ Enter a context manager to be exited during the tearDownClass """ if cls._in_setup: raise ValueError( 'Attempted to enter a class context in init_instance_fixtures.' '\nDid you mean to call enter_instance_context?', ) return cls._class_teardown_stack.enter_context(context_manager) @final @classmethod def add_class_callback(cls, callback, *args, **kwargs): """ Register a callback to be executed during tearDownClass. Parameters ---------- callback : callable The callback to invoke at the end of the test suite. """ if cls._in_setup: raise ValueError( 'Attempted to add a class callback in init_instance_fixtures.' '\nDid you mean to call add_instance_callback?', ) return cls._class_teardown_stack.callback(callback, *args, **kwargs) @final def setUp(self): type(self)._in_setup = True self._pre_setup_attrs = set(vars(self)) self._instance_teardown_stack = ExitStack() try: self._init_instance_fixtures_was_called = False self.init_instance_fixtures() assert self._init_instance_fixtures_was_called, ( "CatalystTestCase.init_instance_fixtures() was not" " called.\n" "This probably means that you overrode" " init_instance_fixtures without calling super()." ) except: self.tearDown() raise finally: type(self)._in_setup = False def init_instance_fixtures(self): self._init_instance_fixtures_was_called = True @final def tearDown(self): # We need to get this before it's deleted by the loop. stack = self._instance_teardown_stack for attr in set(vars(self)) - self._pre_setup_attrs: delattr(self, attr) stack.close() @final def enter_instance_context(self, context_manager): """ Enter a context manager that should be exited during tearDown. """ return self._instance_teardown_stack.enter_context(context_manager) @final def add_instance_callback(self, callback): """ Register a callback to be executed during tearDown. Parameters ---------- callback : callable The callback to invoke at the end of each test. """ return self._instance_teardown_stack.callback(callback) def alias(attr_name): """Make a fixture attribute an alias of another fixture's attribute by default. Parameters ---------- attr_name : str The name of the attribute to alias. Returns ------- p : classproperty A class property that does the property aliasing. Examples -------- >>> class C(object): ... attr = 1 ... >>> class D(C): ... attr_alias = alias('attr') ... >>> D.attr 1 >>> D.attr_alias 1 >>> class E(D): ... attr_alias = 2 ... >>> E.attr 1 >>> E.attr_alias 2 """ return classproperty(flip(getattr, attr_name)) class WithDefaultDateBounds(object): """ CatalystTestCase mixin which makes it possible to synchronize date bounds across fixtures. This fixture should always be the last fixture in bases of any fixture or test case that uses it. Attributes ---------- START_DATE : datetime END_DATE : datetime The date bounds to be used for fixtures that want to have consistent dates. """ START_DATE = pd.Timestamp('2016-01-03', tz='utc') END_DATE = pd.Timestamp('2016-12-29', tz='utc') class WithLogger(object): """ CatalystTestCase mixin providing cls.log_handler as an instance-level fixture. After init_instance_fixtures has been called `self.log_handler` will be a new ``logbook.NullHandler``. Methods ------- make_log_handler() -> logbook.LogHandler A class method which constructs the new log handler object. By default this will construct a ``NullHandler``. """ make_log_handler = NullHandler @classmethod def init_class_fixtures(cls): super(WithLogger, cls).init_class_fixtures() cls.log = Logger() cls.log_handler = cls.enter_class_context( cls.make_log_handler().applicationbound(), ) class WithAssetFinder(WithDefaultDateBounds): """ CatalystTestCase mixin providing cls.asset_finder as a class-level fixture. After init_class_fixtures has been called, `cls.asset_finder` is populated with an AssetFinder. Attributes ---------- ASSET_FINDER_EQUITY_SIDS : iterable[int] The default sids to construct equity data for. ASSET_FINDER_EQUITY_SYMBOLS : iterable[str] The default symbols to use for the equities. ASSET_FINDER_EQUITY_START_DATE : datetime The default start date to create equity data for. This defaults to ``START_DATE``. ASSET_FINDER_EQUITY_END_DATE : datetime The default end date to create equity data for. This defaults to ``END_DATE``. Methods ------- make_equity_info() -> pd.DataFrame A class method which constructs the dataframe of equity info to write to the class's asset db. By default this is empty. make_futures_info() -> pd.DataFrame A class method which constructs the dataframe of futures contract info to write to the class's asset db. By default this is empty. make_exchanges_info() -> pd.DataFrame A class method which constructs the dataframe of exchange information to write to the class's assets db. By default this is empty. make_root_symbols_info() -> pd.DataFrame A class method which constructs the dataframe of root symbols information to write to the class's assets db. By default this is empty. make_asset_finder_db_url() -> string A class method which returns the URL at which to create the SQLAlchemy engine. By default provides a URL for an in-memory database. make_asset_finder() -> pd.DataFrame A class method which constructs the actual asset finder object to use for the class. If this method is overridden then the ``make_*_info`` methods may not be respected. See Also -------- catalyst.testing.make_simple_equity_info catalyst.testing.make_jagged_equity_info catalyst.testing.make_rotating_equity_info catalyst.testing.make_future_info catalyst.testing.make_commodity_future_info """ ASSET_FINDER_EQUITY_SIDS = ord('A'), ord('B'), ord('C') ASSET_FINDER_EQUITY_SYMBOLS = None ASSET_FINDER_EQUITY_START_DATE = alias('START_DATE') ASSET_FINDER_EQUITY_END_DATE = alias('END_DATE') @classmethod def _make_info(cls): return None make_futures_info = _make_info make_exchanges_info = _make_info make_root_symbols_info = _make_info make_equity_supplementary_mappings = _make_info del _make_info @classmethod def make_equity_info(cls): register_calendar("TEST", get_calendar("NYSE"), force=True) return make_simple_equity_info( cls.ASSET_FINDER_EQUITY_SIDS, cls.ASSET_FINDER_EQUITY_START_DATE, cls.ASSET_FINDER_EQUITY_END_DATE, cls.ASSET_FINDER_EQUITY_SYMBOLS, ) @classmethod def make_asset_finder_db_url(cls): return 'sqlite:///:memory:' @classmethod def make_asset_finder(cls): """Returns a new AssetFinder Returns ------- asset_finder : catalyst.assets.AssetFinder """ return cls.enter_class_context(tmp_asset_finder( url=cls.make_asset_finder_db_url(), equities=cls.make_equity_info(), futures=cls.make_futures_info(), exchanges=cls.make_exchanges_info(), root_symbols=cls.make_root_symbols_info(), equity_supplementary_mappings=( cls.make_equity_supplementary_mappings() ), )) @classmethod def init_class_fixtures(cls): super(WithAssetFinder, cls).init_class_fixtures() cls.asset_finder = cls.make_asset_finder() class WithTradingCalendars(object): """ CatalystTestCase mixin providing cls.trading_calendar, cls.all_trading_calendars, cls.trading_calendar_for_asset_type as a class-level fixture. After ``init_class_fixtures`` has been called: - `cls.trading_calendar` is populated with a default of the nyse trading calendar for compatibility with existing tests - `cls.all_trading_calendars` is populated with the trading calendars keyed by name, - `cls.trading_calendar_for_asset_type` is populated with the trading calendars keyed by the asset type which uses the respective calendar. Attributes ---------- TRADING_CALENDAR_STRS : iterable iterable of identifiers of the calendars to use. TRADING_CALENDAR_FOR_ASSET_TYPE : dict A dictionary which maps asset type names to the calendar associated with that asset type. """ TRADING_CALENDAR_STRS = ('NYSE',) TRADING_CALENDAR_FOR_ASSET_TYPE = {Equity: 'NYSE', Future: 'us_futures', } TRADING_CALENDAR_FOR_EXCHANGE = {} # For backwards compatibility, exisitng tests and fixtures refer to # `trading_calendar` with the assumption that the value is the NYSE # calendar. TRADING_CALENDAR_PRIMARY_CAL = 'NYSE' @classmethod def init_class_fixtures(cls): super(WithTradingCalendars, cls).init_class_fixtures() cls.trading_calendars = {} for cal_str in ( set(cls.TRADING_CALENDAR_STRS) | {cls.TRADING_CALENDAR_PRIMARY_CAL} ): # Set name to allow aliasing. calendar = get_calendar(cal_str) setattr(cls, '{0}_calendar'.format(cal_str.lower()), calendar) cls.trading_calendars[cal_str] = calendar for asset_type, cal_str in iteritems( cls.TRADING_CALENDAR_FOR_ASSET_TYPE): calendar = get_calendar(cal_str) cls.trading_calendars[asset_type] = calendar for exchange, cal_str in iteritems(cls.TRADING_CALENDAR_FOR_EXCHANGE): register_calendar(exchange, get_calendar(cal_str)) cls.trading_calendars[exchange] = get_calendar(cal_str) cls.trading_calendar = cls.trading_calendars[ cls.TRADING_CALENDAR_PRIMARY_CAL] class WithTradingEnvironment(WithAssetFinder, WithTradingCalendars, WithDefaultDateBounds): """ CatalystTestCase mixin providing cls.env as a class-level fixture. After ``init_class_fixtures`` has been called, `cls.env` is populated with a trading environment whose `asset_finder` is the result of `cls.make_asset_finder`. Attributes ---------- TRADING_ENV_MIN_DATE : datetime The min_date to forward to the constructed TradingEnvironment. TRADING_ENV_MAX_DATE : datetime The max date to forward to the constructed TradingEnvironment. TRADING_ENV_TRADING_CALENDAR : pd.DatetimeIndex The trading calendar to use for the class's TradingEnvironment. TRADING_ENV_FUTURE_CHAIN_PREDICATES : dict The roll predicates to apply when creating contract chains. Methods ------- make_load_function() -> callable A class method that returns the ``load`` argument to pass to the constructor of ``TradingEnvironment`` for this class. The signature for the callable returned is: ``(datetime, pd.DatetimeIndex, str) -> (pd.Series, pd.DataFrame)`` make_trading_environment() -> TradingEnvironment A class method that constructs the trading environment for the class. If this is overridden then ``make_load_function`` or the class attributes may not be respected. See Also -------- :class:`catalyst.finance.trading.TradingEnvironment` """ TRADING_ENV_FUTURE_CHAIN_PREDICATES = None MARKET_DATA_DIR = os.path.join(catalyst_dir, 'resources', 'market_data') @classmethod def make_load_function(cls): def load(*args, **kwargs): symbol = 'SPY' filename = get_benchmark_filename(symbol) source_path = os.path.join(cls.MARKET_DATA_DIR, filename) benchmark_returns = \ pd.Series.from_csv(source_path).tz_localize('UTC') filename = INDEX_MAPPING[symbol][1] source_path = os.path.join(cls.MARKET_DATA_DIR, filename) treasury_curves = \ pd.DataFrame.from_csv(source_path).tz_localize('UTC') # The TradingEnvironment ordinarily uses cached benchmark returns # and treasury curves data, but when running the catalyst tests # this cache is not always updated to include the appropriate dates # required by both the futures and equity calendars. In order to # create more reliable and consistent data throughout the entirety # of the tests, we read static benchmark returns and treasury curve # csv files from source. If a test using the TradingEnvironment # fixture attempts to run outside of the static date range of the # csv files, raise an exception warning the user to either update # the csv files in source or to use a date range within the current # bounds. static_start_date = benchmark_returns.index[0].date() static_end_date = benchmark_returns.index[-1].date() warning_message = ( 'The TradingEnvironment fixture uses static data between ' '{static_start} and {static_end}. To use a start and end date ' 'of {given_start} and {given_end} you will have to update the ' 'files in {resource_dir} to include the missing dates.'.format( static_start=static_start_date, static_end=static_end_date, given_start=cls.START_DATE.date(), given_end=cls.END_DATE.date(), resource_dir=cls.MARKET_DATA_DIR, ) ) if cls.START_DATE.date() < static_start_date or \ cls.END_DATE.date() > static_end_date: raise AssertionError(warning_message) return benchmark_returns, treasury_curves return load @classmethod def make_trading_environment(cls): return TradingEnvironment( load=cls.make_load_function(), asset_db_path=cls.asset_finder.engine, trading_calendar=cls.trading_calendar, future_chain_predicates=cls.TRADING_ENV_FUTURE_CHAIN_PREDICATES, ) @classmethod def init_class_fixtures(cls): super(WithTradingEnvironment, cls).init_class_fixtures() cls.env = cls.make_trading_environment() class WithSimParams(WithTradingEnvironment): """ CatalystTestCase mixin providing cls.sim_params as a class level fixture. The arguments used to construct the trading environment may be overridded by putting ``SIM_PARAMS_{argname}`` in the class dict except for the trading environment which is overridden with the mechanisms provided by ``WithTradingEnvironment``. Attributes ---------- SIM_PARAMS_YEAR : int SIM_PARAMS_CAPITAL_BASE : float SIM_PARAMS_NUM_DAYS : int SIM_PARAMS_DATA_FREQUENCY : {'daily', 'minute'} SIM_PARAMS_EMISSION_RATE : {'daily', 'minute'} Forwarded to ``factory.create_simulation_parameters``. SIM_PARAMS_START : datetime SIM_PARAMS_END : datetime Forwarded to ``factory.create_simulation_parameters``. If not explicitly overridden these will be ``START_DATE`` and ``END_DATE`` See Also -------- catalyst.utils.factory.create_simulation_parameters """ SIM_PARAMS_YEAR = None SIM_PARAMS_CAPITAL_BASE = 1.0e5 SIM_PARAMS_NUM_DAYS = None SIM_PARAMS_DATA_FREQUENCY = 'daily' SIM_PARAMS_EMISSION_RATE = 'daily' SIM_PARAMS_START = alias('START_DATE') SIM_PARAMS_END = alias('END_DATE') @classmethod def make_simparams(cls): return factory.create_simulation_parameters( year=cls.SIM_PARAMS_YEAR, start=cls.SIM_PARAMS_START, end=cls.SIM_PARAMS_END, num_days=cls.SIM_PARAMS_NUM_DAYS, capital_base=cls.SIM_PARAMS_CAPITAL_BASE, data_frequency=cls.SIM_PARAMS_DATA_FREQUENCY, emission_rate=cls.SIM_PARAMS_EMISSION_RATE, trading_calendar=cls.trading_calendar, ) @classmethod def init_class_fixtures(cls): super(WithSimParams, cls).init_class_fixtures() cls.sim_params = cls.make_simparams() class WithTradingSessions(WithTradingCalendars, WithDefaultDateBounds): """ CatalystTestCase mixin providing cls.trading_days, cls.all_trading_sessions as a class-level fixture. After init_class_fixtures has been called, `cls.all_trading_sessions` is populated with a dictionary of calendar name to the DatetimeIndex containing the calendar trading days ranging from: (DATA_MAX_DAY - (cls.TRADING_DAY_COUNT) -> DATA_MAX_DAY) `cls.trading_days`, for compatibility with existing tests which make the assumption that trading days are equity only, defaults to the nyse trading sessions. Attributes ---------- DATA_MAX_DAY : datetime The most recent trading day in the calendar. TRADING_DAY_COUNT : int The number of days to put in the calendar. The default value of ``TRADING_DAY_COUNT`` is 126 (half a trading-year). Inheritors can override TRADING_DAY_COUNT to request more or less data. """ DATA_MIN_DAY = alias('START_DATE') DATA_MAX_DAY = alias('END_DATE') # For backwards compatibility, exisitng tests and fixtures refer to # `trading_days` with the assumption that the value is days of the NYSE # calendar. trading_days = alias('nyse_sessions') @classmethod def init_class_fixtures(cls): super(WithTradingSessions, cls).init_class_fixtures() cls.trading_sessions = {} for cal_str in cls.TRADING_CALENDAR_STRS: trading_calendar = cls.trading_calendars[cal_str] sessions = trading_calendar.sessions_in_range( cls.DATA_MIN_DAY, cls.DATA_MAX_DAY) # Set name for aliasing. setattr(cls, '{0}_sessions'.format(cal_str.lower()), sessions) cls.trading_sessions[cal_str] = sessions for exchange, cal_str in iteritems(cls.TRADING_CALENDAR_FOR_EXCHANGE): trading_calendar = cls.trading_calendars[cal_str] sessions = trading_calendar.sessions_in_range( cls.DATA_MIN_DAY, cls.DATA_MAX_DAY) cls.trading_sessions[exchange] = sessions class WithTmpDir(object): """ CatalystTestCase mixing providing cls.tmpdir as a class-level fixture. After init_class_fixtures has been called, `cls.tmpdir` is populated with a `testfixtures.TempDirectory` object whose path is `cls.TMP_DIR_PATH`. Attributes ---------- TMP_DIR_PATH : str The path to the new directory to create. By default this is None which will create a unique directory in /tmp. """ TMP_DIR_PATH = None @classmethod def init_class_fixtures(cls): super(WithTmpDir, cls).init_class_fixtures() cls.tmpdir = cls.enter_class_context( tmp_dir(path=cls.TMP_DIR_PATH), ) class WithInstanceTmpDir(object): """ CatalystTestCase mixing providing self.tmpdir as an instance-level fixture. After init_instance_fixtures has been called, `self.tmpdir` is populated with a `testfixtures.TempDirectory` object whose path is `cls.TMP_DIR_PATH`. Attributes ---------- INSTANCE_TMP_DIR_PATH : str The path to the new directory to create. By default this is None which will create a unique directory in /tmp. """ INSTANCE_TMP_DIR_PATH = None def init_instance_fixtures(self): super(WithInstanceTmpDir, self).init_instance_fixtures() self.instance_tmpdir = self.enter_instance_context( tmp_dir(path=self.INSTANCE_TMP_DIR_PATH), ) class WithEquityDailyBarData(WithTradingEnvironment): """ CatalystTestCase mixin providing cls.make_equity_daily_bar_data. Attributes ---------- EQUITY_DAILY_BAR_START_DATE : Timestamp The date at to which to start creating data. This defaults to ``START_DATE``. EQUITY_DAILY_BAR_END_DATE = Timestamp The end date up to which to create data. This defaults to ``END_DATE``. EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool If this flag is set, `make_equity_daily_bar_data` will read data from the minute bars defined by `WithEquityMinuteBarData`. The current default is `False`, but could be `True` in the future. Methods ------- make_equity_daily_bar_data() -> iterable[(int, pd.DataFrame)] A class method that returns an iterator of (sid, dataframe) pairs which will be written to the bcolz files that the class's ``BcolzDailyBarReader`` will read from. By default this creates some simple sythetic data with :func:`~catalyst.testing.create_daily_bar_data` See Also -------- WithEquityMinuteBarData catalyst.testing.create_daily_bar_data """ EQUITY_DAILY_BAR_USE_FULL_CALENDAR = False EQUITY_DAILY_BAR_START_DATE = alias('START_DATE') EQUITY_DAILY_BAR_END_DATE = alias('END_DATE') EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = None @classproperty def EQUITY_DAILY_BAR_LOOKBACK_DAYS(cls): # If we're sourcing from minute data, then we almost certainly want the # minute bar calendar to be aligned with the daily bar calendar, so # re-use the same lookback parameter. if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE: return cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS else: return 0 @classmethod def _make_equity_daily_bar_from_minute(cls): assert issubclass(cls, WithEquityMinuteBarData), \ "Can't source daily data from minute without minute data!" assets = cls.asset_finder.retrieve_all(cls.asset_finder.equities_sids) minute_data = dict(cls.make_equity_minute_bar_data()) for asset in assets: yield asset.sid, minute_frame_to_session_frame( minute_data[asset.sid], cls.trading_calendars[Equity]) @classmethod def make_equity_daily_bar_data(cls): # Requires a WithEquityMinuteBarData to come before in the MRO. # Resample that data so that daily and minute bar data are aligned. if cls.EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE: return cls._make_equity_daily_bar_from_minute() else: return create_daily_bar_data( cls.equity_daily_bar_days, cls.asset_finder.equities_sids, ) @classmethod def init_class_fixtures(cls): super(WithEquityDailyBarData, cls).init_class_fixtures() trading_calendar = cls.trading_calendars[Equity] if cls.EQUITY_DAILY_BAR_USE_FULL_CALENDAR: days = trading_calendar.all_sessions else: if trading_calendar.is_session(cls.EQUITY_DAILY_BAR_START_DATE): first_session = cls.EQUITY_DAILY_BAR_START_DATE else: first_session = trading_calendar.minute_to_session_label( pd.Timestamp(cls.EQUITY_DAILY_BAR_START_DATE) ) if cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS > 0: first_session = trading_calendar.sessions_window( first_session, -1 * cls.EQUITY_DAILY_BAR_LOOKBACK_DAYS )[0] days = trading_calendar.sessions_in_range( first_session, cls.EQUITY_DAILY_BAR_END_DATE, ) cls.equity_daily_bar_days = days class WithBcolzEquityDailyBarReader(WithEquityDailyBarData, WithTmpDir): """ CatalystTestCase mixin providing cls.bcolz_daily_bar_path, cls.bcolz_daily_bar_ctable, and cls.bcolz_equity_daily_bar_reader class level fixtures. After init_class_fixtures has been called: - `cls.bcolz_daily_bar_path` is populated with `cls.tmpdir.getpath(cls.BCOLZ_DAILY_BAR_PATH)`. - `cls.bcolz_daily_bar_ctable` is populated with data returned from `cls.make_equity_daily_bar_data`. By default this calls :func:`catalyst.pipeline.loaders.synthetic.make_equity_daily_bar_data`. - `cls.bcolz_equity_daily_bar_reader` is a daily bar reader pointing to the directory that was just written to. Attributes ---------- BCOLZ_DAILY_BAR_PATH : str The path inside the tmpdir where this will be written. EQUITY_DAILY_BAR_LOOKBACK_DAYS : int The number of days of data to add before the first day. This is used when a test needs to use history, in which case this should be set to the largest history window that will be requested. EQUITY_DAILY_BAR_USE_FULL_CALENDAR : bool If this flag is set the ``equity_daily_bar_days`` will be the full set of trading days from the trading environment. This flag overrides ``EQUITY_DAILY_BAR_LOOKBACK_DAYS``. BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD : int If this flag is set, use the value as the `read_all_threshold` parameter to BcolzDailyBarReader, otherwise use the default value. EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE : bool If this flag is set, `make_equity_daily_bar_data` will read data from the minute bar reader defined by a `WithBcolzEquityMinuteBarReader`. Methods ------- make_bcolz_daily_bar_rootdir_path() -> string A class method that returns the path for the rootdir of the daily bars ctable. By default this is a subdirectory BCOLZ_DAILY_BAR_PATH in the shared temp directory. See Also -------- WithBcolzEquityMinuteBarReader WithDataPortal catalyst.testing.create_daily_bar_data """ BCOLZ_DAILY_BAR_PATH = 'daily_equity_pricing.bcolz' BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD = None EQUITY_DAILY_BAR_SOURCE_FROM_MINUTE = False # allows WithBcolzEquityDailyBarReaderFromCSVs to call the # `write_csvs`method without needing to reimplement `init_class_fixtures` _write_method_name = 'write' # What to do when data being written is invalid, e.g. nan, inf, etc. # options are: 'warn', 'raise', 'ignore' INVALID_DATA_BEHAVIOR = 'warn' @classmethod def make_bcolz_daily_bar_rootdir_path(cls): return cls.tmpdir.makedir(cls.BCOLZ_DAILY_BAR_PATH) @classmethod def init_class_fixtures(cls): super(WithBcolzEquityDailyBarReader, cls).init_class_fixtures() cls.bcolz_daily_bar_path = p = cls.make_bcolz_daily_bar_rootdir_path() days = cls.equity_daily_bar_days trading_calendar = cls.trading_calendars[Equity] cls.bcolz_daily_bar_ctable = t = getattr( BcolzDailyBarWriter(p, trading_calendar, days[0], days[-1]), cls._write_method_name, )( cls.make_equity_daily_bar_data(), invalid_data_behavior=cls.INVALID_DATA_BEHAVIOR ) if cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD is not None: cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader( t, cls.BCOLZ_DAILY_BAR_READ_ALL_THRESHOLD) else: cls.bcolz_equity_daily_bar_reader = BcolzDailyBarReader(t) class WithBcolzEquityDailyBarReaderFromCSVs(WithBcolzEquityDailyBarReader): """ CatalystTestCase mixin that provides cls.bcolz_equity_daily_bar_reader from a mapping of sids to CSV file paths. """ _write_method_name = 'write_csvs' def _trading_days_for_minute_bars(calendar, start_date, end_date, lookback_days): first_session = calendar.minute_to_session_label(start_date) if lookback_days > 0: first_session = calendar.sessions_window( first_session, -1 * lookback_days )[0] return calendar.sessions_in_range(first_session, end_date) class _WithMinuteBarDataBase(WithTradingEnvironment): MINUTE_BAR_LOOKBACK_DAYS = 0 MINUTE_BAR_START_DATE = alias('START_DATE') MINUTE_BAR_END_DATE = alias('END_DATE') class WithEquityMinuteBarData(_WithMinuteBarDataBase): """ CatalystTestCase mixin providing cls.equity_minute_bar_days. After init_class_fixtures has been called: - `cls.equity_minute_bar_days` has the range over which data has been generated. Attributes ---------- EQUITY_MINUTE_BAR_LOOKBACK_DAYS : int The number of days of data to add before the first day. This is used when a test needs to use history, in which case this should be set to the largest history window that will be requested. EQUITY_MINUTE_BAR_START_DATE : Timestamp The date at to which to start creating data. This defaults to ``START_DATE``. EQUITY_MINUTE_BAR_END_DATE = Timestamp The end date up to which to create data. This defaults to ``END_DATE``. Methods ------- make_equity_minute_bar_data() -> iterable[(int, pd.DataFrame)] Classmethod producing an iterator of (sid, minute_data) pairs. The default implementation invokes catalyst.testing.core.create_minute_bar_data. See Also -------- WithEquityDailyBarData catalyst.testing.create_minute_bar_data """ EQUITY_MINUTE_BAR_LOOKBACK_DAYS = alias('MINUTE_BAR_LOOKBACK_DAYS') EQUITY_MINUTE_BAR_START_DATE = alias('MINUTE_BAR_START_DATE') EQUITY_MINUTE_BAR_END_DATE = alias('MINUTE_BAR_END_DATE') @classmethod def make_equity_minute_bar_data(cls): trading_calendar = cls.trading_calendars[Equity] return create_minute_bar_data( trading_calendar.minutes_for_sessions_in_range( cls.equity_minute_bar_days[0], cls.equity_minute_bar_days[-1], ), cls.asset_finder.equities_sids, ) @classmethod def init_class_fixtures(cls): super(WithEquityMinuteBarData, cls).init_class_fixtures() trading_calendar = cls.trading_calendars[Equity] cls.equity_minute_bar_days = _trading_days_for_minute_bars( trading_calendar, pd.Timestamp(cls.EQUITY_MINUTE_BAR_START_DATE), pd.Timestamp(cls.EQUITY_MINUTE_BAR_END_DATE), cls.EQUITY_MINUTE_BAR_LOOKBACK_DAYS ) class WithFutureMinuteBarData(_WithMinuteBarDataBase): """ CatalystTestCase mixin providing cls.future_minute_bar_days. After init_class_fixtures has been called: - `cls.future_minute_bar_days` has the range over which data has been generated. Attributes ---------- FUTURE_MINUTE_BAR_LOOKBACK_DAYS : int The number of days of data to add before the first day. This is used when a test needs to use history, in which case this should be set to the largest history window that will be requested. FUTURE_MINUTE_BAR_START_DATE : Timestamp The date at to which to start creating data. This defaults to ``START_DATE``. FUTURE_MINUTE_BAR_END_DATE = Timestamp The end date up to which to create data. This defaults to ``END_DATE``. Methods ------- make_future_minute_bar_data() -> iterable[(int, pd.DataFrame)] A class method that returns a dict mapping sid to dataframe which will be written to into the the format of the inherited class which writes the minute bar data for use by a reader. By default this creates some simple sythetic data with :func:`~catalyst.testing.create_minute_bar_data` See Also -------- catalyst.testing.create_minute_bar_data """ FUTURE_MINUTE_BAR_LOOKBACK_DAYS = alias('MINUTE_BAR_LOOKBACK_DAYS') FUTURE_MINUTE_BAR_START_DATE = alias('MINUTE_BAR_START_DATE') FUTURE_MINUTE_BAR_END_DATE = alias('MINUTE_BAR_END_DATE') @classmethod def make_future_minute_bar_data(cls): trading_calendar = get_calendar('us_futures') return create_minute_bar_data( trading_calendar.minutes_for_sessions_in_range( cls.future_minute_bar_days[0], cls.future_minute_bar_days[-1], ), cls.asset_finder.futures_sids, ) @classmethod def init_class_fixtures(cls): super(WithFutureMinuteBarData, cls).init_class_fixtures() trading_calendar = get_calendar('us_futures') cls.future_minute_bar_days = _trading_days_for_minute_bars( trading_calendar, pd.Timestamp(cls.FUTURE_MINUTE_BAR_START_DATE), pd.Timestamp(cls.FUTURE_MINUTE_BAR_END_DATE), cls.FUTURE_MINUTE_BAR_LOOKBACK_DAYS ) class WithBcolzEquityMinuteBarReader(WithEquityMinuteBarData, WithTmpDir): """ CatalystTestCase mixin providing cls.bcolz_minute_bar_path, cls.bcolz_minute_bar_ctable, and cls.bcolz_equity_minute_bar_reader class level fixtures. After init_class_fixtures has been called: - `cls.bcolz_minute_bar_path` is populated with `cls.tmpdir.getpath(cls.BCOLZ_MINUTE_BAR_PATH)`. - `cls.bcolz_minute_bar_ctable` is populated with data returned from `cls.make_equity_minute_bar_data`. By default this calls :func:`catalyst.pipeline.loaders.synthetic.make_equity_minute_bar_data`. - `cls.bcolz_equity_minute_bar_reader` is a minute bar reader pointing to the directory that was just written to. Attributes ---------- BCOLZ_MINUTE_BAR_PATH : str The path inside the tmpdir where this will be written. Methods ------- make_bcolz_minute_bar_rootdir_path() -> string A class method that returns the path for the directory that contains the minute bar ctables. By default this is a subdirectory BCOLZ_MINUTE_BAR_PATH in the shared temp directory. See Also -------- WithBcolzEquityDailyBarReader WithDataPortal catalyst.testing.create_minute_bar_data """ BCOLZ_EQUITY_MINUTE_BAR_PATH = 'minute_equity_pricing' @classmethod def make_bcolz_equity_minute_bar_rootdir_path(cls): return cls.tmpdir.makedir(cls.BCOLZ_EQUITY_MINUTE_BAR_PATH) @classmethod def init_class_fixtures(cls): super(WithBcolzEquityMinuteBarReader, cls).init_class_fixtures() cls.bcolz_equity_minute_bar_path = p = \ cls.make_bcolz_equity_minute_bar_rootdir_path() days = cls.equity_minute_bar_days writer = BcolzMinuteBarWriter( p, cls.trading_calendars[Equity], days[0], days[-1], US_EQUITIES_MINUTES_PER_DAY ) writer.write(cls.make_equity_minute_bar_data()) cls.bcolz_equity_minute_bar_reader = \ BcolzMinuteBarReader(p) class WithBcolzFutureMinuteBarReader(WithFutureMinuteBarData, WithTmpDir): """ CatalystTestCase mixin providing cls.bcolz_minute_bar_path, cls.bcolz_minute_bar_ctable, and cls.bcolz_equity_minute_bar_reader class level fixtures. After init_class_fixtures has been called: - `cls.bcolz_minute_bar_path` is populated with `cls.tmpdir.getpath(cls.BCOLZ_MINUTE_BAR_PATH)`. - `cls.bcolz_minute_bar_ctable` is populated with data returned from `cls.make_equity_minute_bar_data`. By default this calls :func:`catalyst.pipeline.loaders.synthetic.make_equity_minute_bar_data`. - `cls.bcolz_equity_minute_bar_reader` is a minute bar reader pointing to the directory that was just written to. Attributes ---------- BCOLZ_FUTURE_MINUTE_BAR_PATH : str The path inside the tmpdir where this will be written. Methods ------- make_bcolz_minute_bar_rootdir_path() -> string A class method that returns the path for the directory that contains the minute bar ctables. By default this is a subdirectory BCOLZ_MINUTE_BAR_PATH in the shared temp directory. See Also -------- WithBcolzEquityDailyBarReader WithDataPortal catalyst.testing.create_minute_bar_data """ BCOLZ_FUTURE_MINUTE_BAR_PATH = 'minute_future_pricing' OHLC_RATIOS_PER_SID = None @classmethod def make_bcolz_future_minute_bar_rootdir_path(cls): return cls.tmpdir.makedir(cls.BCOLZ_FUTURE_MINUTE_BAR_PATH) @classmethod def init_class_fixtures(cls): super(WithBcolzFutureMinuteBarReader, cls).init_class_fixtures() trading_calendar = get_calendar('us_futures') cls.bcolz_future_minute_bar_path = p = \ cls.make_bcolz_future_minute_bar_rootdir_path() days = cls.future_minute_bar_days writer = BcolzMinuteBarWriter( p, trading_calendar, days[0], days[-1], FUTURES_MINUTES_PER_DAY, ohlc_ratios_per_sid=cls.OHLC_RATIOS_PER_SID, ) writer.write(cls.make_future_minute_bar_data()) cls.bcolz_future_minute_bar_reader = \ BcolzMinuteBarReader(p) class WithConstantEquityMinuteBarData(WithEquityMinuteBarData): EQUITY_MINUTE_CONSTANT_LOW = 3.0 EQUITY_MINUTE_CONSTANT_OPEN = 4.0 EQUITY_MINUTE_CONSTANT_CLOSE = 5.0 EQUITY_MINUTE_CONSTANT_HIGH = 6.0 EQUITY_MINUTE_CONSTANT_VOLUME = 100.0 @classmethod def make_equity_minute_bar_data(cls): trading_calendar = cls.trading_calendars[Equity] sids = cls.asset_finder.equities_sids minutes = trading_calendar.minutes_for_sessions_in_range( cls.equity_minute_bar_days[0], cls.equity_minute_bar_days[-1], ) frame = pd.DataFrame( { 'open': cls.EQUITY_MINUTE_CONSTANT_OPEN, 'high': cls.EQUITY_MINUTE_CONSTANT_HIGH, 'low': cls.EQUITY_MINUTE_CONSTANT_LOW, 'close': cls.EQUITY_MINUTE_CONSTANT_CLOSE, 'volume': cls.EQUITY_MINUTE_CONSTANT_VOLUME, }, index=minutes, ) return ((sid, frame) for sid in sids) class WithConstantFutureMinuteBarData(WithFutureMinuteBarData): FUTURE_MINUTE_CONSTANT_LOW = 3.0 FUTURE_MINUTE_CONSTANT_OPEN = 4.0 FUTURE_MINUTE_CONSTANT_CLOSE = 5.0 FUTURE_MINUTE_CONSTANT_HIGH = 6.0 FUTURE_MINUTE_CONSTANT_VOLUME = 100.0 @classmethod def make_future_minute_bar_data(cls): trading_calendar = cls.trading_calendars[Future] sids = cls.asset_finder.futures_sids minutes = trading_calendar.minutes_for_sessions_in_range( cls.future_minute_bar_days[0], cls.future_minute_bar_days[-1], ) frame = pd.DataFrame( { 'open': cls.FUTURE_MINUTE_CONSTANT_OPEN, 'high': cls.FUTURE_MINUTE_CONSTANT_HIGH, 'low': cls.FUTURE_MINUTE_CONSTANT_LOW, 'close': cls.FUTURE_MINUTE_CONSTANT_CLOSE, 'volume': cls.FUTURE_MINUTE_CONSTANT_VOLUME, }, index=minutes, ) return ((sid, frame) for sid in sids) class WithAdjustmentReader(WithBcolzEquityDailyBarReader): """ CatalystTestCase mixin providing cls.adjustment_reader as a class level fixture. After init_class_fixtures has been called, `cls.adjustment_reader` will be populated with a new SQLiteAdjustmentReader object. The data that will be written can be passed by overriding `make_{field}_data` where field may be `splits`, `mergers` `dividends`, or `stock_dividends`. The daily bar reader used for this adjustment reader may be customized by overriding `make_adjustment_writer_equity_daily_bar_reader`. This is useful to providing a `MockDailyBarReader`. Methods ------- make_splits_data() -> pd.DataFrame A class method that returns a dataframe of splits data to write to the class's adjustment db. By default this is empty. make_mergers_data() -> pd.DataFrame A class method that returns a dataframe of mergers data to write to the class's adjustment db. By default this is empty. make_dividends_data() -> pd.DataFrame A class method that returns a dataframe of dividends data to write to the class's adjustment db. By default this is empty. make_stock_dividends_data() -> pd.DataFrame A class method that returns a dataframe of stock dividends data to write to the class's adjustment db. By default this is empty. make_adjustment_db_conn_str() -> string A class method that returns the sqlite3 connection string for the database in to which the adjustments will be written. By default this is an in-memory database. make_adjustment_writer_equity_daily_bar_reader() -> pd.DataFrame A class method that returns the daily bar reader to use for the class's adjustment writer. By default this is the class's actual ``bcolz_equity_daily_bar_reader`` as inherited from ``WithBcolzEquityDailyBarReader``. This should probably not be overridden; however, some tests used a ``MockDailyBarReader`` for this. make_adjustment_writer(conn: sqlite3.Connection) -> AdjustmentWriter A class method that constructs the adjustment which will be used to write the data into the connection to be used by the class's adjustment reader. See Also -------- catalyst.testing.MockDailyBarReader """ @classmethod def _make_data(cls): return None make_splits_data = _make_data make_mergers_data = _make_data make_dividends_data = _make_data make_stock_dividends_data = _make_data del _make_data @classmethod def make_adjustment_writer(cls, conn): return SQLiteAdjustmentWriter( conn, cls.make_adjustment_writer_equity_daily_bar_reader(), cls.equity_daily_bar_days, ) @classmethod def make_adjustment_writer_equity_daily_bar_reader(cls): return cls.bcolz_equity_daily_bar_reader @classmethod def make_adjustment_db_conn_str(cls): return ':memory:' @classmethod def init_class_fixtures(cls): super(WithAdjustmentReader, cls).init_class_fixtures() conn = sqlite3.connect(cls.make_adjustment_db_conn_str()) cls.make_adjustment_writer(conn).write( splits=cls.make_splits_data(), mergers=cls.make_mergers_data(), dividends=cls.make_dividends_data(), stock_dividends=cls.make_stock_dividends_data(), ) cls.adjustment_reader = SQLiteAdjustmentReader(conn) class WithEquityPricingPipelineEngine(WithAdjustmentReader, WithTradingSessions): """ Mixin providing the following as a class-level fixtures. - cls.data_root_dir - cls.findata_dir - cls.pipeline_engine - cls.adjustments_db_path """ @classmethod def init_class_fixtures(cls): cls.data_root_dir = cls.enter_class_context(tmp_dir()) cls.findata_dir = cls.data_root_dir.makedir('findata') super(WithEquityPricingPipelineEngine, cls).init_class_fixtures() loader = USEquityPricingLoader( cls.bcolz_equity_daily_bar_reader, SQLiteAdjustmentReader(cls.adjustments_db_path), USEquityPricing, ) def get_loader(column): if column in USEquityPricing.columns: return loader else: raise AssertionError("No loader registered for %s" % column) cls.pipeline_engine = SimplePipelineEngine( get_loader=get_loader, calendar=cls.nyse_sessions, asset_finder=cls.asset_finder, ) @classmethod def make_adjustment_db_conn_str(cls): cls.adjustments_db_path = os.path.join( cls.findata_dir, 'adjustments', cls.END_DATE.strftime("%Y-%m-%d-adjustments.db") ) ensure_directory(os.path.dirname(cls.adjustments_db_path)) return cls.adjustments_db_path class WithSeededRandomPipelineEngine(WithTradingSessions, WithAssetFinder): """ CatalystTestCase mixin providing class-level fixtures for running pipelines against deterministically-generated random data. Attributes ---------- SEEDED_RANDOM_PIPELINE_SEED : int Fixture input. Random seed used to initialize the random state loader. seeded_random_loader : SeededRandomLoader Fixture output. Loader capable of providing columns for catalyst.pipeline.data.testing.TestingDataSet. seeded_random_engine : SimplePipelineEngine Fixture output. A pipeline engine that will use seeded_random_loader as its only data provider. Methods ------- run_pipeline(start_date, end_date) Run a pipeline with self.seeded_random_engine. See Also -------- catalyst.pipeline.loaders.synthetic.SeededRandomLoader catalyst.pipeline.loaders.testing.make_seeded_random_loader catalyst.pipeline.engine.SimplePipelineEngine """ SEEDED_RANDOM_PIPELINE_SEED = 42 @classmethod def init_class_fixtures(cls): super(WithSeededRandomPipelineEngine, cls).init_class_fixtures() cls._sids = cls.asset_finder.sids cls.seeded_random_loader = loader = make_seeded_random_loader( cls.SEEDED_RANDOM_PIPELINE_SEED, cls.trading_days, cls._sids, ) cls.seeded_random_engine = SimplePipelineEngine( get_loader=lambda column: loader, calendar=cls.trading_days, asset_finder=cls.asset_finder, ) def raw_expected_values(self, column, start_date, end_date): """ Get an array containing the raw values we expect to be produced for the given dates between start_date and end_date, inclusive. """ all_values = self.seeded_random_loader.values( column.dtype, self.trading_days, self._sids, ) row_slice = self.trading_days.slice_indexer(start_date, end_date) return all_values[row_slice] def run_pipeline(self, pipeline, start_date, end_date): """ Run a pipeline with self.seeded_random_engine. """ if start_date not in self.trading_days: raise AssertionError("Start date not in calendar: %s" % start_date) if end_date not in self.trading_days: raise AssertionError("End date not in calendar: %s" % end_date) return self.seeded_random_engine.run_pipeline( pipeline, start_date, end_date, ) class WithDataPortal(WithAdjustmentReader, # Ordered so that bcolz minute reader is used first. WithBcolzEquityMinuteBarReader, WithBcolzFutureMinuteBarReader): """ CatalystTestCase mixin providing self.data_portal as an instance level fixture. After init_instance_fixtures has been called, `self.data_portal` will be populated with a new data portal created by passing in the class's trading env, `cls.bcolz_equity_minute_bar_reader`, `cls.bcolz_equity_daily_bar_reader`, and `cls.adjustment_reader`. Attributes ---------- DATA_PORTAL_USE_DAILY_DATA : bool Should the daily bar reader be used? Defaults to True. DATA_PORTAL_USE_MINUTE_DATA : bool Should the minute bar reader be used? Defaults to True. DATA_PORTAL_USE_ADJUSTMENTS : bool Should the adjustment reader be used? Defaults to True. Methods ------- make_data_portal() -> DataPortal Method which returns the data portal to be used for each test case. If this is overridden, the ``DATA_PORTAL_USE_*`` attributes may not be respected. """ DATA_PORTAL_USE_DAILY_DATA = True DATA_PORTAL_USE_MINUTE_DATA = True DATA_PORTAL_USE_ADJUSTMENTS = True DATA_PORTAL_FIRST_TRADING_DAY = None DATA_PORTAL_LAST_AVAILABLE_SESSION = None DATA_PORTAL_LAST_AVAILABLE_MINUTE = None DATA_PORTAL_MINUTE_HISTORY_PREFETCH = DEFAULT_MINUTE_HISTORY_PREFETCH DATA_PORTAL_DAILY_HISTORY_PREFETCH = DEFAULT_DAILY_HISTORY_PREFETCH def make_data_portal(self): if self.DATA_PORTAL_FIRST_TRADING_DAY is None: if self.DATA_PORTAL_USE_MINUTE_DATA: self.DATA_PORTAL_FIRST_TRADING_DAY = ( self.bcolz_equity_minute_bar_reader. first_trading_day) elif self.DATA_PORTAL_USE_DAILY_DATA: self.DATA_PORTAL_FIRST_TRADING_DAY = ( self.bcolz_equity_daily_bar_reader. first_trading_day) return DataPortal( self.env.asset_finder, self.trading_calendar, first_trading_day=self.DATA_PORTAL_FIRST_TRADING_DAY, daily_reader=( self.bcolz_equity_daily_bar_reader if self.DATA_PORTAL_USE_DAILY_DATA else None ), minute_reader=( self.bcolz_equity_minute_bar_reader if self.DATA_PORTAL_USE_MINUTE_DATA else None ), adjustment_reader=( self.adjustment_reader if self.DATA_PORTAL_USE_ADJUSTMENTS else None ), future_minute_reader=( self.bcolz_future_minute_bar_reader if self.DATA_PORTAL_USE_MINUTE_DATA else None ), future_daily_reader=( MinuteResampleSessionBarReader( self.bcolz_future_minute_bar_reader.trading_calendar, self.bcolz_future_minute_bar_reader) if self.DATA_PORTAL_USE_MINUTE_DATA else None ), last_available_session=self.DATA_PORTAL_LAST_AVAILABLE_SESSION, last_available_minute=self.DATA_PORTAL_LAST_AVAILABLE_MINUTE, minute_history_prefetch_length=self. DATA_PORTAL_MINUTE_HISTORY_PREFETCH, daily_history_prefetch_length=self. DATA_PORTAL_DAILY_HISTORY_PREFETCH, ) def init_instance_fixtures(self): super(WithDataPortal, self).init_instance_fixtures() self.data_portal = self.make_data_portal() class WithResponses(object): """ CatalystTestCase mixin that provides self.responses as an instance fixture. After init_instance_fixtures has been called, `self.responses` will be a new `responses.RequestsMock` object. Users may add new endpoints to this with the `self.responses.add` method. """ def init_instance_fixtures(self): super(WithResponses, self).init_instance_fixtures() self.responses = self.enter_instance_context( responses.RequestsMock(), ) class WithCreateBarData(WithDataPortal): CREATE_BARDATA_DATA_FREQUENCY = 'minute' def create_bardata(self, simulation_dt_func, restrictions=None): return BarData( self.data_portal, simulation_dt_func, self.CREATE_BARDATA_DATA_FREQUENCY, self.trading_calendar, restrictions or NoRestrictions() )
{ "content_hash": "ec6cc9d07845a2a9b8002149881b0a00", "timestamp": "", "source": "github", "line_count": 1554, "max_line_length": 79, "avg_line_length": 36.53281853281853, "alnum_prop": 0.6430987106320016, "repo_name": "enigmampc/catalyst", "id": "56a0b4b1febfdd5e2f354f5a9f27277433cc7a77", "size": "56772", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "catalyst/testing/fixtures.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "7014" }, { "name": "Dockerfile", "bytes": "2510" }, { "name": "Emacs Lisp", "bytes": "138" }, { "name": "Jupyter Notebook", "bytes": "229701" }, { "name": "PowerShell", "bytes": "3269" }, { "name": "Python", "bytes": "4279642" }, { "name": "Shell", "bytes": "7469" } ], "symlink_target": "" }
"""Showcase what the output of pymunk.pyglet_util draw methods will look like. See pygame_util_demo.py for a comparison to pygame. """ __docformat__ = "reStructuredText" import pyglet import pymunk import pymunk.pyglet_util from .shapes_for_draw_demos import fill_space window = pyglet.window.Window(1000, 700, vsync=False) space = pymunk.Space() draw_options = pymunk.pyglet_util.DrawOptions() captions = fill_space(space) textbatch = pyglet.graphics.Batch() pyglet.text.Label( "Demo example of shapes drawn by pyglet_util.draw()", x=5, y=5, batch=textbatch, color=(100, 100, 100, 255), ) for caption in captions: x, y = caption[0] y = y - 10 pyglet.text.Label(caption[1], x=x, y=y, batch=textbatch, color=(50, 50, 50, 255)) # otherwise save screenshot wont work _ = pyglet.window.FPSDisplay(window) @window.event def on_draw(): pyglet.gl.glClearColor(255, 255, 255, 255) window.clear() textbatch.draw() space.debug_draw(draw_options) @window.event def on_key_press(symbol, modifiers): if symbol == pyglet.window.key.P: pyglet.image.get_buffer_manager().get_color_buffer().save( "pyglet_util_demo.png" ) pyglet.app.run()
{ "content_hash": "a8262475d7139a9006380523b0483ab0", "timestamp": "", "source": "github", "line_count": 56, "max_line_length": 85, "avg_line_length": 21.767857142857142, "alnum_prop": 0.682526661197703, "repo_name": "viblo/pymunk", "id": "f664d256003364b0fb65da1cc30a368f250bd495", "size": "1219", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "pymunk/examples/pyglet_util_demo.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "1349" }, { "name": "Jupyter Notebook", "bytes": "297305" }, { "name": "Objective-C", "bytes": "2968" }, { "name": "Python", "bytes": "627317" } ], "symlink_target": "" }
from astropy.io import fits import numpy as np import os as os from . import drizzle_position as dp from . import acs_determine_focus as adf from . import acs_3dpsf as acs_3dpsf from scipy.io.idl import readsav from . import rotate_moments as rm import copy as cp from . import directories import sys from . import getIndividualExposures as gie def psf_cor( mom_file, outfile, drizzle_file, wavelength, mult=1, min_rad=1.5, chip=1, constantpsf=0, mscale=0, order=3, n_chip=2): ''' ; ; NAME: rrg_psf_cor ; ; PURPOSE: ; Uses the RRG method to correct moments for PSF effects. ; corrects using a 3rd order polynomial fit to the moments ;the PSF moments should be weighted, this programs corrects for the weighting of the PSF moments ;min_rad is the size of the weighting function used to measure the PSF ; ; INPUTS: ; mom_file- moment file IDL structure produced by rrg_measure_mom ; sex_catalog-SExtractor catalog in fits format ; outfile- output file to store IDL array of variables ; mult multiplier used to find gaussian width from area ; min_rad- stellar radius ; coeff_file text file with the coefficients of the PSF correction ;OUTPUTS: ; stores moments and other info in the output file as an idl structure ; ; MODIFICATION HISTORY: ; ; April 2003 jrhodes ; May 2005 jrhodes added mscale option, only works with constantpsf ; July 2011 dharvey modified to allow for rotated images,(see section ; for more details) ; TO DO : ;CHECK THAT ANGLES ARE CORRECT PLEASE ;mom_file has following structure ;moms={x:x,y:y,xx:xx,yy:yy,xy:xy,xxxx:xxxx,xxxy:xxxy,$ ;xxyy:xxyy,xyyy:xyyy,yyyy:yyyy,radius:radius,sn:sn,back:back,$ ;class:class,area:area,area_ab:area_ab,flags:flags,$ ;a:a,b:b,theta:theta,mag:mag,prob:prob} ''' dirs = directories.return_dirs( ) moms = fits.open(mom_file)[1].data radius = np.sqrt( ( moms.xx + moms.yy)/2.) sigma = cp.copy(moms.radius[moms['galStarFlag']==1]) sigma[ sigma < min_rad ] = min_rad w2=(1./(sigma*sigma)); w4=(1./(2.*sigma*sigma*sigma*sigma)) #; Evaluate moments of PSF #;I will work out the focus myself! muwagahaha #;Before i get the psf moments i need to get the scat catalogue, which #;is found by running tintim_make_scat.pro (for tiny_tim models) print('Getting the psf models from tinytim, cheers Tim!') #need to think about this #tinytim_make_scat, data_dir=dirs.model_dir, wavelength=filter[0], scat=scat scat = readsav( dirs.psf_model_dir+'/TinyTim'+wavelength+'.scat' )['scat'] #so this function interpolates. ''' ;Okay so the way i am going to get the correct psf for each position ;in the drizzled image is. ; 1. Create a grid of positions which covers the entire field of view ;of the drizzles image ;2. For each position in the drizzled image, work out how many images ;cover this position ;3. For each of the images covering this position, work out the psf ;4. Then take the average of the moments for each ''' images = gie.getIndividualExposures( drizzle_file ) if len(images) == 0: raise ValueError('Cant find single exposures of field') nImages = len(images) #Now get the positions in the drizzle frame of ref in the individual #frame of ref print("Getting position of stars & galaxies in each exposure") momsWithDrizzlePosition = \ dp.drizzle_position( drizzle_file, images, moms, dataDir=dirs.data_dir) galaxy_moms = cp.copy(momsWithDrizzlePosition[momsWithDrizzlePosition['galStarFlag'] == 1]) star_moms = cp.copy(momsWithDrizzlePosition[momsWithDrizzlePosition['galStarFlag'] == 0]) uncorrected_xx = galaxy_moms.xx uncorrected_yy = galaxy_moms.yy #Also get the Orientations in terms of the drizzled image, not #WCS #Now loop through each position in the psf grid, #and check if the positionsreffram(i,3,*) is non zero! and interpolate #to that x and y and then average moms #check that the position in the grid is in, then loop through each #image, interpolating the psf to this point, #PsfMoms is an vector of many classes of moments nGalaxies=len(galaxy_moms.x) psf_moms = moments( galaxy_moms.x, galaxy_moms.y, nGalaxies ) FocusArray = np.zeros(nImages) sys.stdout.write("\n") for iImage in range(nImages): sys.stdout.write("Getting PSF for image: %i/%i\r" % \ (iImage+1,nImages)) sys.stdout.flush() #Which positions are in the cluster frame iImage_name = images[iImage].split('/')[-1][0:8] inFrame = galaxy_moms[iImage_name+'_INFRAME'] == 1 #before i determine the psf moments i need to get the focus #position of the image in question #So get the focus position by fitting the true image stars to the #model focus = adf.acs_determine_focus( images[iImage], star_moms, \ drizzle_file, wavelength) #Just keep track of the focii i have used through out FocusArray[iImage] = focus #For all the points in the main drizzled field that are within #iImage, interpolate the psf from the ref fram of the single #image to the X,Y of the drizzled image iPsfMoms=\ acs_3dpsf.acs_3dpsf( galaxy_moms[iImage_name+'_X_IMAGE'][inFrame], galaxy_moms[iImage_name+'_Y_IMAGE'][inFrame], np.zeros(len(galaxy_moms[iImage_name+'_INFRAME'][inFrame]))+focus, \ radius, scat, degree=[3,2,2] ) #now rotate the moments according to the angle in orient iPsfMomsRot = rm.rotate_moments( iPsfMoms, galaxy_moms[iImage_name+'_ORIENTAT'][inFrame]) #CHECK THAT ANGLES ARE CORRECT HERE PLEASE mom_names = list(iPsfMoms.keys()) for iMom in mom_names: #I need to now rotate each moment according to the axis orient #with the drizzled image before stacking. if (iMom != 'degree') & \ (iMom != 'nExposures') &\ (iMom != 'radius') &\ (iMom != 'x') & (iMom != 'y'): psf_moms[iMom][inFrame] += iPsfMomsRot[iMom] #then keep count how many images per position psf_moms['nExposures'][ inFrame] += 1 #then give the position the value of the averaged psf_moms. #Save the focus array focuslist = open(dirs.data_dir+'/FocusArray.txt', "w") for i in range(nImages): ExpName = images[i].split('/')[-1].split('_')[0] focuslist.write( "%s %3.1f \n" % \ (ExpName, FocusArray[i])) #Now take the mean of each moment for iMom in mom_names: if (iMom != 'degree') & \ (iMom != 'nExposures') & \ (iMom != 'radius') &\ (iMom != 'x') & (iMom != 'y'): psf_moms[iMom] /= psf_moms['nExposures'] psf_moms[iMom][psf_moms['nExposures'] == 0] = 0. #Refind e1 and e2, assuming we want <q11>-<q22>/<q11>+<q22> not <e1> psf_moms.e1=(psf_moms.xx-psf_moms.yy)/ \ (psf_moms.xx+psf_moms.yy) psf_moms.e2=2.*psf_moms.xy/\ (psf_moms.xx+psf_moms.yy) #plot the psf model ''' ps, DrizzleFile+'/Psf_Model.ps', /carre, /psfont plot, psf_moms.x, psf_moms.y, title='PSF MODEL FOR '+cluster, $ xtitle='X [PIXELS]', ytitle='Y [PIXELS]', /iso, /nodata plt_evec, psf_moms.x, psf_moms.y, $ psf_moms.e1, psf_moms.e2, /e1e2, $ xscale=1000, yscale=1000 unps ''' #and continue on as per usual... pxxw=psf_moms.xx pxyw=psf_moms.xy pyyw=psf_moms.yy pxxxx=psf_moms.xxxx pxxxy=psf_moms.xxxy pxxyy=psf_moms.xxyy pxyyy=psf_moms.xyyy pyyyy=psf_moms.yyyy w=min_rad #correction for weighgting pxx=(pxxw-(pxxw*pyyw+pxxw*pxxw-pxxxx-pxxyy)/(2.*w**2)) pyy=(pyyw-(pyyw*pyyw+pyyw*pxxw-pyyyy-pxxyy)/(2.*w**2)) pxy=(pxyw-(pxyw*pxxw+pxyw*pyyw-pxyyy-pxxxy)/(2.*w**2)) trace=pxx+pyy a=pxx-trace/2 b=pxy c=pxy d=pyy-trace/2 e=pxxxx f=pxxxy z=pxxyy h=pxyyy k=pyyyy #So the problem is that i am timesing a which is a # [1,10000] psf_vector, we moms which is a vector [a,b] ixxa=a*(1-w2*2*galaxy_moms.xx+w4*\ (galaxy_moms.xxxx-galaxy_moms.xx*galaxy_moms.xx)); ixxb=b*(w4*(galaxy_moms.xxxy-galaxy_moms.xx*galaxy_moms.xy)); ixxc=c*(-w2*(2*galaxy_moms.xy)+\ w4*(galaxy_moms.xxxy-galaxy_moms.xx*galaxy_moms.xy)); ixxd=d*(w4*(galaxy_moms.xxyy-galaxy_moms.xx*galaxy_moms.yy)); iyya=a*(w4*(galaxy_moms.xxyy-galaxy_moms.xx*galaxy_moms.yy)); iyyb=b*(w4*(galaxy_moms.xyyy-galaxy_moms.yy*galaxy_moms.xy)); iyyc=c*(-w2*(2*galaxy_moms.xy)+\ w4*(galaxy_moms.xyyy-galaxy_moms.yy*galaxy_moms.xy)); iyyd=d*(1-w2*2*galaxy_moms.yy+\ w4*(galaxy_moms.yyyy-galaxy_moms.yy*galaxy_moms.yy)); ixya=a*(-w2*galaxy_moms.xy+\ w4*(galaxy_moms.xxxy-galaxy_moms.xy*galaxy_moms.xx)); ixyb=b*(1-w2*galaxy_moms.xx+\ w4*(galaxy_moms.xxyy-galaxy_moms.xy*galaxy_moms.xy)); ixyc=c*(-w2*galaxy_moms.yy+\ w4*(galaxy_moms.xxyy-galaxy_moms.xy*galaxy_moms.xy)); ixyd=d*(-w2*galaxy_moms.xy+\ w4*(galaxy_moms.xyyy-galaxy_moms.xy*galaxy_moms.yy)); ixx_corr=galaxy_moms.xx-ixxa-ixxb-ixxc-ixxd; iyy_corr=galaxy_moms.yy-iyya-iyyb-iyyc-iyyd; ixy_corr=galaxy_moms.xy-ixya-ixyb-ixyc-ixyd; shear=np.sqrt(0.5*(a+d+trace)) if constantpsf: shear=np.zeros(number)+np.sqrt(0.5*(a(0)+d(0)+trace(0))) corrected_moments = moments( galaxy_moms.x, galaxy_moms.y, \ len(galaxy_moms.x)) gw=np.sqrt( (shear*shear*sigma*sigma)/(shear*shear+sigma*sigma)); corrected_moments.xx = ((shear/gw)**4)*(ixx_corr-gw*gw); corrected_moments.yy = ((shear/gw)**4)*(iyy_corr-gw*gw); corrected_moments.xy = ((shear/gw)**4)*(ixy_corr); pxxc=a+trace/2 pyyc=d+trace/2 pxyc=b corrected_moments.xxxx = galaxy_moms.xxxx-e-\ 6*pxxc*galaxy_moms.xx+6*pxxc*pxxc; corrected_moments.xxxy = galaxy_moms.xxxy-f-\ 3*(pxyc*galaxy_moms.xx+pxxc*galaxy_moms.xy)+6*pxyc*pxxc; corrected_moments.xxyy = galaxy_moms.xxyy-z-\ pxxc*galaxy_moms.yy-pyyc*galaxy_moms.xx-\ 4*pxyc*galaxy_moms.xy+2*pxxc*pyyc+4*pxyc*pxyc; corrected_moments.xyyy = galaxy_moms.xyyy-h-\ 3*(pxyc*galaxy_moms.yy+pyyc*galaxy_moms.xy)+6*pxyc*pyyc; corrected_moments.yyyy = galaxy_moms.yyyy-\ k-6*pyyc*galaxy_moms.yy+6*pyyc*pyyc; corrected_moments['e1'] = (corrected_moments.xx-corrected_moments.yy)/\ (corrected_moments.xx+corrected_moments.yy) corrected_moments['e2'] = (2*corrected_moments.xy)/\ (corrected_moments.xx+corrected_moments.yy) #Those moments that were originally zero and -99 make them again hgere for i in list(corrected_moments.keys()): if i in moms.columns.names: corrected_moments[i][galaxy_moms[i] == -99] = -99 corrected_moments[i][galaxy_moms[i] == 0] = 0 galaxy_moms[i] = corrected_moments[i] galaxy_moms['gal_size'] = np.sqrt( (corrected_moments.xx +corrected_moments.yy)/2.) #SOMETHIGN STUPID TO SEE WHAT HAPPENS #SOme weird shit going on, i have to write this out, and then #read it back in galaxy_moms = writeAndRemoveUnusedColums( galaxy_moms) newcol = [ fits.Column(name='shear', format=shear.dtype, array=shear), fits.Column(name='nExposures', format=psf_moms.nExposures.dtype, \ array=psf_moms.nExposures), fits.Column('xx_uncorrected', format=galaxy_moms.xx.dtype, array=uncorrected_xx), fits.Column('yy_uncorrected', format=galaxy_moms.yy.dtype, array=uncorrected_yy)] orig_cols = galaxy_moms.columns new_cols = fits.ColDefs(newcol) hdu = fits.BinTableHDU.from_columns(orig_cols+new_cols) hdu.writeto( outfile, overwrite=True) class moments( dict ): def __init__(self, x, y, n_objects): self.__dict__['x'] = x self.__dict__['y'] = y self.__dict__['e1'] = np.zeros(n_objects) self.__dict__['e2'] = np.zeros(n_objects) self.__dict__['xx'] = np.zeros(n_objects) self.__dict__['xy'] = np.zeros(n_objects) self.__dict__['yy'] = np.zeros(n_objects) self.__dict__['xxxx'] = np.zeros(n_objects) self.__dict__['xxxy'] = np.zeros(n_objects) self.__dict__['xxyy'] = np.zeros(n_objects) self.__dict__['xyyy'] = np.zeros(n_objects) self.__dict__['yyyy'] = np.zeros(n_objects) self.__dict__['nExposures'] = np.zeros(n_objects) def __setitem__(self, key, item): self.__dict__[key] = item def keys(self): return list(self.__dict__.keys()) def __getitem__(self, key): return self.__dict__[key] def writeAndRemoveUnusedColums( moments): momentNames = moments.columns.names columns = [] for i in momentNames: if (not 'INFRAME' in i) & \ (not 'fits_X_IMAGE' in i) & \ (not 'fits_Y_IMAGE' in i) & \ (not 'ORIENTAT' in i ): iColumn = \ fits.Column(i, format=moments[i].dtype, \ array=moments[i]) columns.append(iColumn) new_cols = fits.ColDefs(columns) hdu = fits.BinTableHDU.from_columns(new_cols) hdu.writeto('galaxies.fits',overwrite=True) return fits.open('galaxies.fits')[1].data
{ "content_hash": "0a11fe0cef18a69398a58b4f755e2d8b", "timestamp": "", "source": "github", "line_count": 415, "max_line_length": 104, "avg_line_length": 34.151807228915665, "alnum_prop": 0.5972624003386722, "repo_name": "davidharvey1986/pyRRG", "id": "09321e17e23c9febaead2726403b927bf6b60b61", "size": "14173", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/psf_cor.py", "mode": "33188", "license": "mit", "language": [ { "name": "PowerShell", "bytes": "8321" }, { "name": "Python", "bytes": "5803472" }, { "name": "Shell", "bytes": "3862" } ], "symlink_target": "" }
import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from .. import models class ResourceSkusOperations(object): """ResourceSkusOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: Client Api Version. Constant value: "2017-09-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2017-09-01" self.config = config def list( self, custom_headers=None, raw=False, **operation_config): """Gets the list of Microsoft.Compute SKUs available for your Subscription. :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of ResourceSku :rtype: ~azure.mgmt.compute.v2017_09_01.models.ResourceSkuPaged[~azure.mgmt.compute.v2017_09_01.models.ResourceSku] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list.metadata['url'] path_format_arguments = { 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters) response = self._client.send( request, header_parameters, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.ResourceSkuPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.ResourceSkuPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Compute/skus'}
{ "content_hash": "6e227238b76155cae4f1ce0ad388ab1f", "timestamp": "", "source": "github", "line_count": 93, "max_line_length": 144, "avg_line_length": 39.88172043010753, "alnum_prop": 0.6268535993529253, "repo_name": "lmazuel/azure-sdk-for-python", "id": "a6b8de4df8193352e619b7d72f1538bb561a9257", "size": "4183", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "azure-mgmt-compute/azure/mgmt/compute/v2017_09_01/operations/resource_skus_operations.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "42572767" } ], "symlink_target": "" }
from __future__ import absolute_import from __future__ import division from __future__ import print_function import numpy import argparse import sys import json from tensorflow.examples.tutorials.mnist import input_data import tensorflow as tf import argparse # Import data mnist = input_data.read_data_sets('tmp/tensorflow/mnist/input_data', one_hot=True) # Create the model x = tf.placeholder(tf.float32, [None, 784]) W1 = tf.Variable(tf.truncated_normal([784, 500], stddev=.1)) b1 = tf.Variable(tf.zeros([500])) x1 = x * ( numpy.random.rand( 100, 784 ) < .7 ) h = tf.nn.sigmoid( tf.matmul(x1, W1) + b1 ) W2 = tf.transpose( W1 ) b2 = tf.Variable(tf.zeros([784])) y = tf.matmul(h, W2) + b2 # Define loss and optimizer y_ = tf.placeholder(tf.float32, [None, 784]) cross_entropy_loss = tf.reduce_mean( tf.nn.sigmoid_cross_entropy_with_logits(y, y_) ) train_step = tf.train.GradientDescentOptimizer(0.1).minimize( cross_entropy_loss ) cross_entropy_summary = tf.summary.scalar( 'cross_entropy', cross_entropy_loss ) tf_weights = [ tf.split( 0, 28, W1[:,0:100] ) ] tf_weights_4d = tf.transpose( tf_weights, [ 3, 1, 2, 0 ] ) weights_images = tf.image_summary( 'weights', tf_weights_4d, 100 ) sess = tf.Session() saver = tf.train.Saver() parser = argparse.ArgumentParser() parser.add_argument("-restore", help="restore model from file") parser.add_argument("-save", help="save model to file") parser.add_argument("-logdir", help="logdir") args = parser.parse_args() if ( args.restore is None ): sess.run( tf.global_variables_initializer() ) else: saver.restore( sess, args.restore ) summary_writer = tf.train.SummaryWriter( args.logdir, sess.graph ) saver = tf.train.Saver() # Train step = 0 for _ in range(100000): for _ in range(100): batch_xs, batch_ys = mnist.train.next_batch(100) sess.run(train_step, feed_dict={x: batch_xs, y_: batch_xs}) cross_entropy_summary_str = sess.run( cross_entropy_summary, feed_dict= {x: batch_xs, y_: batch_xs} ) summary_writer.add_summary( cross_entropy_summary_str, global_step=step) wtimgs = sess.run( weights_images, feed_dict= {x: batch_xs, y_: batch_xs} ) summary_writer.add_summary( wtimgs, global_step=step) step = step + 1 save_path = saver.save( sess, args.save ) with open('weights.json', 'w') as outfile: json.dump(sess.run( W1 ).tolist(), outfile)
{ "content_hash": "30e5f8d9e67392ce5985f3aad5332821", "timestamp": "", "source": "github", "line_count": 87, "max_line_length": 103, "avg_line_length": 27.804597701149426, "alnum_prop": 0.6754857379082265, "repo_name": "jfrancis71/TensorFlowApps", "id": "b2d7839efd4490cc4572e4cd4ff033247bba258b", "size": "2609", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "TrainMNISTAutoencoder.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "27146" } ], "symlink_target": "" }
import logging from absl import flags from absl.testing import absltest from google.protobuf import json_format from framework import xds_k8s_testcase from framework import xds_url_map_testcase from framework.helpers import skips logger = logging.getLogger(__name__) flags.adopt_module_key_flags(xds_k8s_testcase) # Type aliases _XdsTestServer = xds_k8s_testcase.XdsTestServer _XdsTestClient = xds_k8s_testcase.XdsTestClient _DumpedXdsConfig = xds_url_map_testcase.DumpedXdsConfig _Lang = skips.Lang _TD_CONFIG_RETRY_WAIT_SEC = 2 class ApiListenerTest(xds_k8s_testcase.RegularXdsKubernetesTestCase): @staticmethod def is_supported(config: skips.TestConfig) -> bool: if config.client_lang == _Lang.PYTHON: # gRPC Python versions prior to v1.43.x don't support handling empty # RDS update. return config.version_gte('v1.43.x') return True def test_api_listener(self) -> None: with self.subTest('00_create_health_check'): self.td.create_health_check() with self.subTest('01_create_backend_services'): self.td.create_backend_service() with self.subTest('02_create_default_url_map'): self.td.create_url_map(self.server_xds_host, self.server_xds_port) with self.subTest('03_create_default_target_proxy'): self.td.create_target_proxy() with self.subTest('04_create_default_forwarding_rule'): self.td.create_forwarding_rule(self.server_xds_port) test_server: _XdsTestServer with self.subTest('05_start_test_server'): test_server = self.startTestServers()[0] with self.subTest('06_add_server_backends_to_backend_services'): self.setupServerBackends() test_client: _XdsTestClient with self.subTest('07_start_test_client'): test_client = self.startTestClient(test_server) with self.subTest('08_test_client_xds_config_exists'): self.assertXdsConfigExists(test_client) with self.subTest('09_test_server_received_rpcs'): self.assertSuccessfulRpcs(test_client) with self.subTest('10_create_alternate_url_map'): self.td.create_alternative_url_map(self.server_xds_host, self.server_xds_port, self.td.backend_service) # Create alternate target proxy pointing to alternate url_map with the same # host name in host rule. The port is fixed because they point to the same backend service. # Therefore we have to choose a non-`0.0.0.0` ip because ip:port needs to be unique. # We also have to set validate_for_proxyless=false because requires `0.0.0.0` ip. # See https://github.com/grpc/grpc-java/issues/8009 with self.subTest('11_create_alternate_target_proxy'): self.td.create_alternative_target_proxy() # Create a second suite of map+tp+fr with the same host name in host rule. # We set fr ip_address to be different from `0.0.0.0` and then set # validate_for_proxyless=false because ip:port needs to be unique. with self.subTest('12_create_alternate_forwarding_rule'): self.td.create_alternative_forwarding_rule(self.server_xds_port, ip_address='10.10.10.10') with self.subTest('13_test_server_received_rpcs_with_two_url_maps'): self.assertSuccessfulRpcs(test_client) raw_config = test_client.csds.fetch_client_status( log_level=logging.INFO) dumped_config = _DumpedXdsConfig( json_format.MessageToDict(raw_config)) previous_route_config_version = dumped_config.rds_version logger.info(('received client config from CSDS with two url maps, ' 'dump config: %s, rds version: %s'), dumped_config, previous_route_config_version) with self.subTest('14_delete_one_url_map_target_proxy_forwarding_rule'): self.td.delete_forwarding_rule() self.td.delete_target_grpc_proxy() self.td.delete_url_map() with self.subTest('15_test_server_continues_to_receive_rpcs'): self.assertRouteConfigUpdateTrafficHandoff( test_client, previous_route_config_version, _TD_CONFIG_RETRY_WAIT_SEC, xds_k8s_testcase._TD_CONFIG_MAX_WAIT_SEC) if __name__ == '__main__': absltest.main(failfast=True)
{ "content_hash": "56ec8df7cc503e562372f6b658d959b6", "timestamp": "", "source": "github", "line_count": 110, "max_line_length": 99, "avg_line_length": 41.75454545454546, "alnum_prop": 0.6383627258872197, "repo_name": "ejona86/grpc", "id": "4e76b03fc2fed350c4b384a5c8de6f75a538b1e7", "size": "5170", "binary": false, "copies": "7", "ref": "refs/heads/master", "path": "tools/run_tests/xds_k8s_test_driver/tests/api_listener_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Awk", "bytes": "5444" }, { "name": "Batchfile", "bytes": "38831" }, { "name": "C", "bytes": "1377708" }, { "name": "C#", "bytes": "106367" }, { "name": "C++", "bytes": "16353334" }, { "name": "CMake", "bytes": "29311" }, { "name": "CSS", "bytes": "1519" }, { "name": "Cython", "bytes": "258768" }, { "name": "DTrace", "bytes": "147" }, { "name": "Dockerfile", "bytes": "179860" }, { "name": "Go", "bytes": "34794" }, { "name": "HTML", "bytes": "14" }, { "name": "Java", "bytes": "13923" }, { "name": "JavaScript", "bytes": "5572" }, { "name": "Objective-C", "bytes": "724357" }, { "name": "Objective-C++", "bytes": "79351" }, { "name": "PHP", "bytes": "486781" }, { "name": "PowerShell", "bytes": "4516" }, { "name": "Python", "bytes": "3814860" }, { "name": "Ruby", "bytes": "650063" }, { "name": "Shell", "bytes": "766652" }, { "name": "Starlark", "bytes": "805915" }, { "name": "Swift", "bytes": "7487" }, { "name": "XSLT", "bytes": "9846" } ], "symlink_target": "" }
from PySide import QtCore, QtGui class Ui_genesis_mwin(object): def setupUi(self, genesis_mwin): genesis_mwin.setObjectName("genesis_mwin") genesis_mwin.resize(1009, 765) self.central_widget = QtGui.QWidget(genesis_mwin) self.central_widget.setObjectName("central_widget") self.central_vbox = QtGui.QVBoxLayout(self.central_widget) self.central_vbox.setObjectName("central_vbox") self.shot_open_pb = QtGui.QPushButton(self.central_widget) self.shot_open_pb.setMinimumSize(QtCore.QSize(200, 32)) self.shot_open_pb.setMaximumSize(QtCore.QSize(200, 16777215)) font = QtGui.QFont() font.setPointSize(10) font.setWeight(75) font.setBold(True) self.shot_open_pb.setFont(font) self.shot_open_pb.setObjectName("shot_open_pb") self.central_vbox.addWidget(self.shot_open_pb) self.asset_open_pb = QtGui.QPushButton(self.central_widget) self.asset_open_pb.setMinimumSize(QtCore.QSize(200, 32)) self.asset_open_pb.setMaximumSize(QtCore.QSize(200, 16777215)) font = QtGui.QFont() font.setPointSize(10) font.setWeight(75) font.setBold(True) self.asset_open_pb.setFont(font) self.asset_open_pb.setObjectName("asset_open_pb") self.central_vbox.addWidget(self.asset_open_pb) self.shot_save_pb = QtGui.QPushButton(self.central_widget) self.shot_save_pb.setMinimumSize(QtCore.QSize(200, 32)) self.shot_save_pb.setMaximumSize(QtCore.QSize(200, 16777215)) font = QtGui.QFont() font.setPointSize(10) font.setWeight(75) font.setBold(True) self.shot_save_pb.setFont(font) self.shot_save_pb.setObjectName("shot_save_pb") self.central_vbox.addWidget(self.shot_save_pb) self.asset_save_pb = QtGui.QPushButton(self.central_widget) self.asset_save_pb.setMinimumSize(QtCore.QSize(200, 32)) self.asset_save_pb.setMaximumSize(QtCore.QSize(200, 16777215)) font = QtGui.QFont() font.setPointSize(10) font.setWeight(75) font.setBold(True) self.asset_save_pb.setFont(font) self.asset_save_pb.setObjectName("asset_save_pb") self.central_vbox.addWidget(self.asset_save_pb) self.asset_descriptor_le = QtGui.QLineEdit(self.central_widget) self.asset_descriptor_le.setObjectName("asset_descriptor_le") self.central_vbox.addWidget(self.asset_descriptor_le) self.asset_descriptor_lb = QtGui.QLabel(self.central_widget) self.asset_descriptor_lb.setObjectName("asset_descriptor_lb") self.central_vbox.addWidget(self.asset_descriptor_lb) self.shot_descriptor_le = QtGui.QLineEdit(self.central_widget) self.shot_descriptor_le.setObjectName("shot_descriptor_le") self.central_vbox.addWidget(self.shot_descriptor_le) self.shot_descriptor_lb = QtGui.QLabel(self.central_widget) self.shot_descriptor_lb.setObjectName("shot_descriptor_lb") self.central_vbox.addWidget(self.shot_descriptor_lb) genesis_mwin.setCentralWidget(self.central_widget) self.statusbar = QtGui.QStatusBar(genesis_mwin) self.statusbar.setObjectName("statusbar") genesis_mwin.setStatusBar(self.statusbar) self.retranslateUi(genesis_mwin) QtCore.QMetaObject.connectSlotsByName(genesis_mwin) def retranslateUi(self, genesis_mwin): genesis_mwin.setWindowTitle(QtGui.QApplication.translate("genesis_mwin", "Genesis", None, QtGui.QApplication.UnicodeUTF8)) self.shot_open_pb.setText(QtGui.QApplication.translate("genesis_mwin", "Open", None, QtGui.QApplication.UnicodeUTF8)) self.asset_open_pb.setText(QtGui.QApplication.translate("genesis_mwin", "Open", None, QtGui.QApplication.UnicodeUTF8)) self.shot_save_pb.setText(QtGui.QApplication.translate("genesis_mwin", "Save/New", None, QtGui.QApplication.UnicodeUTF8)) self.asset_save_pb.setText(QtGui.QApplication.translate("genesis_mwin", "Save/New", None, QtGui.QApplication.UnicodeUTF8)) self.asset_descriptor_lb.setText(QtGui.QApplication.translate("genesis_mwin", "Descriptor:", None, QtGui.QApplication.UnicodeUTF8)) self.shot_descriptor_lb.setText(QtGui.QApplication.translate("genesis_mwin", "Descriptor:", None, QtGui.QApplication.UnicodeUTF8))
{ "content_hash": "5e7d157ecf5bae9e28fae5f3f699c84f", "timestamp": "", "source": "github", "line_count": 79, "max_line_length": 139, "avg_line_length": 55.63291139240506, "alnum_prop": 0.6976109215017064, "repo_name": "JukeboxPipeline/jukebox-core", "id": "aceb2b0199341b84150b62785fe204f72a830b1c", "size": "4691", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/jukeboxcore/addons/genesis/genesis_ui.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Makefile", "bytes": "1221" }, { "name": "Python", "bytes": "890248" }, { "name": "Shell", "bytes": "962" } ], "symlink_target": "" }
""" SchedulerOptions monitors a local .json file for changes and loads it if needed. This file is converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ import datetime import json import os from oslo.config import cfg from cinder.openstack.common.gettextutils import _ from cinder.openstack.common import log as logging from cinder.openstack.common import timeutils scheduler_json_config_location_opt = cfg.StrOpt( 'scheduler_json_config_location', default='', help='Absolute path to scheduler configuration JSON file.') CONF = cfg.CONF CONF.register_opt(scheduler_json_config_location_opt) LOG = logging.getLogger(__name__) class SchedulerOptions(object): """SchedulerOptions monitors a local .json file for changes. The file is reloaded if needed and converted to a data structure and passed into the filtering and weighing functions which can use it for dynamic configuration. """ def __init__(self): super(SchedulerOptions, self).__init__() self.data = {} self.last_modified = None self.last_checked = None def _get_file_handle(self, filename): """Get file handle. Broken out for testing.""" return open(filename) def _get_file_timestamp(self, filename): """Get the last modified datetime. Broken out for testing.""" try: return os.path.getmtime(filename) except os.error as e: LOG.exception(_("Could not stat scheduler options file " "%(filename)s: '%(e)s'"), {'filename': filename, 'e': e}) raise def _load_file(self, handle): """Decode the JSON file. Broken out for testing.""" try: return json.load(handle) except ValueError as e: LOG.exception(_("Could not decode scheduler options: '%s'") % e) return {} def _get_time_now(self): """Get current UTC. Broken out for testing.""" return timeutils.utcnow() def get_configuration(self, filename=None): """Check the json file for changes and load it if needed.""" if not filename: filename = CONF.scheduler_json_config_location if not filename: return self.data if self.last_checked: now = self._get_time_now() if now - self.last_checked < datetime.timedelta(minutes=5): return self.data last_modified = self._get_file_timestamp(filename) if (not last_modified or not self.last_modified or last_modified > self.last_modified): self.data = self._load_file(self._get_file_handle(filename)) self.last_modified = last_modified if not self.data: self.data = {} return self.data
{ "content_hash": "ce34d6a8791573f27a6866bb4afde937", "timestamp": "", "source": "github", "line_count": 90, "max_line_length": 76, "avg_line_length": 32.17777777777778, "alnum_prop": 0.6294889502762431, "repo_name": "github-borat/cinder", "id": "0d405fdf1101411eaca0569693d884b7a75111a6", "size": "3536", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cinder/scheduler/scheduler_options.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "6575951" }, { "name": "Shell", "bytes": "8998" } ], "symlink_target": "" }
__author__ = 'yinjun' """ Definition of TreeNode: class TreeNode: def __init__(self, val): this.val = val this.left, this.right = None, None Example of iterate a tree: iterator = Solution(root) while iterator.hasNext(): node = iterator.next() do something for node """ class Solution: #@param root: The root of binary tree. def __init__(self, root): # write your code here self.stack = [] self.dict = {} self.dictStack = {} if root == None: return if root.right!=None: self.stack.append(root.right) self.dictStack[root.right] = 1 self.stack.append(root) self.dictStack[root] = 1 if root.left!=None: self.stack.append(root.left) self.dictStack[root.left] = 1 self.length = len(self.stack) #@return: True if there has next node, or false def hasNext(self): # write your code here while self.length > 0: p = self.stack.pop() self.dictStack.pop(p) self.length -= 1 if p.left ==None or p.left !=None and p.left in self.dict: self.dict[p] = 1 self.next = p #result.append(p.val) if p.right!=None and p.right not in self.dictStack: self.stack.append(p.right) self.dictStack[p.right] = 1 self.length += 1 return True else: if p.right!=None: self.stack.append(p.right) self.dictStack[p.right] = 1 self.stack.append(p) self.dictStack[p] = 1 if p.left!=None: self.stack.append(p.left) self.dictStack[p.left] = 1 self.length = len(self.stack) return False #@return: return next node def next(self): #write your code here return self.next
{ "content_hash": "bf3479dd750441f76dc1f3a63566a089", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 70, "avg_line_length": 26.03846153846154, "alnum_prop": 0.5022156573116692, "repo_name": "shootsoft/practice", "id": "a947193d13ede649d002a442fedfd7011007d97a", "size": "2031", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lintcode/NineChapters/03/binary-search-tree-iterator.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "722333" } ], "symlink_target": "" }
""" Messaging API API to send messages: - currently SMS, Email & Twitter Messages get sent to the Outbox (& Log) From there, Cron tasks collect them & send them @copyright: 2009-2012 (c) Sahana Software Foundation @license: MIT Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ __all__ = ["S3Msg", "S3Compose"] import datetime import string import urllib from urllib2 import urlopen from gluon import current, redirect from gluon.html import * from s3crud import S3CRUD from s3utils import s3_debug from s3validators import IS_ONE_OF, IS_ONE_OF_EMPTY IDENTITYTRANS = ALLCHARS = string.maketrans("", "") NOTPHONECHARS = ALLCHARS.translate(IDENTITYTRANS, string.digits) NOTTWITTERCHARS = ALLCHARS.translate(IDENTITYTRANS, "%s%s_" % (string.digits, string.letters)) TWITTER_MAX_CHARS = 140 TWITTER_HAS_NEXT_SUFFIX = u' \u2026' TWITTER_HAS_PREV_PREFIX = u'\u2026 ' # ============================================================================= class S3Msg(object): """ Messaging framework """ def __init__(self, modem=None): T = current.T self.modem = modem # http://docs.oasis-open.org/emergency/edxl-have/cs01/xPIL-types.xsd # <xs:simpleType name="CommunicationMediaTypeList"> # <xs:enumeration value="Cellphone"/> # <xs:enumeration value="Fax"/> # <xs:enumeration value="Pager"/> # <xs:enumeration value="Telephone"/> # <xs:enumeration value="VOIP"/> # <xs:simpleType name="ElectronicAddressIdentifierTypeList"> # <xs:enumeration value="AIM"/> # <xs:enumeration value="EMAIL"/> # <xs:enumeration value="GOOGLE"/> # <xs:enumeration value="GIZMO"/> # <xs:enumeration value="ICQ"/> # <xs:enumeration value="JABBER"/> # <xs:enumeration value="MSN"/> # <xs:enumeration value="SIP"/> # <xs:enumeration value="SKYPE"/> # <xs:enumeration value="URL"/> # <xs:enumeration value="XRI"/> # <xs:enumeration value="YAHOO"/> # Full range of contact options self.CONTACT_OPTS = { "EMAIL": T("Email"), "SMS": current.deployment_settings.get_ui_label_mobile_phone(), "HOME_PHONE": T("Home phone"), "WORK_PHONE": T("Work phone"), "FAX": T("Fax"), "SKYPE": T("Skype"), "TWITTER": T("Twitter"), "FACEBOOK": T("Facebook"), "RADIO": T("Radio Callsign"), #"XMPP": "XMPP", "OTHER": T("other") } # Those contact options to which we can send notifications # NB Coded into hrm controller (map_popup) & s3.msg.js self.MSG_CONTACT_OPTS = { "EMAIL": T("Email"), "SMS": current.deployment_settings.get_ui_label_mobile_phone(), "TWITTER": T("Twitter"), #"XMPP": "XMPP", "TWILIO": T("Twilio SMS") } self.GATEWAY_OPTS = { "MODEM": T("Modem"), "SMTP": T("SMTP"), "TROPO": T("Tropo"), "WEB_API": T("Web API") } # ------------------------------------------------------------------------- @staticmethod def sanitise_phone(phone): """ Strip out unnecessary characters from the string: +()- & space """ settings = current.deployment_settings default_country_code = settings.get_L10n_default_country_code() clean = phone.translate(IDENTITYTRANS, NOTPHONECHARS) # If number starts with a 0 then need to remove this & add the country code in if clean[0] == "0": # Add default country code if default_country_code == 39: # Italy keeps 0 after country code clean = "%s%s" % (default_country_code, clean) else: clean = "%s%s" % (default_country_code, string.lstrip(clean, "0")) return clean # ========================================================================= # Inbound Messages # ========================================================================= @staticmethod def receive_msg(subject="", message="", sender="", fromaddress="", system_generated = False, pr_message_method = "EMAIL", ): """ Function to call to drop incoming messages into msg_log """ db = current.db s3db = current.s3db try: message_log_id = s3db.msg_log.insert(inbound = True, subject = subject, message = message, sender = sender, fromaddress = fromaddress, ) except: return False #2) This is not transaction safe - power failure in the middle will cause no message in the outbox try: s3db.msg_channel.insert(message_id = message_log_id, pr_message_method = pr_message_method) except: return False # Explicitly commit DB operations when running from Cron db.commit() return True # ------------------------------------------------------------------------- @staticmethod def parse_import(workflow, source): """ Parse Inbound Messages """ from s3parser import S3Parsing db = current.db s3db = current.s3db ltable = s3db.msg_log wtable = s3db.msg_workflow otable = s3db.msg_outbox ctable = s3db.pr_contact parser = S3Parsing.parser linsert = ltable.insert oinsert = otable.insert contact_method = ctable.contact_method value = ctable.value lid = ltable.id query = (wtable.workflow_task_id == workflow) & \ (wtable.source_task_id == source) records = db(query).select(wtable.source_task_id) reply = "" wflow = "" contact = "" for record in records: query = (ltable.is_parsed == False) & \ (ltable.inbound == True) & \ (ltable.source_task_id == record.source_task_id) rows = db(query).select() for row in rows: message = row.message try: contact = row.sender.split("<")[1].split(">")[0] query = (contact_method == "EMAIL") & \ (value == contact) pe_ids = db(query).select(ctable.pe_id) if not pe_ids: query = (contact_method == "SMS") & \ (value == contact) pe_ids = db(query).select(ctable.pe_id) except: raise ValueError("Source not defined!") reply = parser(workflow, message, contact) if reply: db(lid == row.id).update(reply = reply, is_parsed = True) else: flow = db(lid == row.id).select(ltable.reply, limitby=(0, 1)).first() try: wflow = flow.reply.split("Workflow:")[1].split(".")[0] except: pass if wflow == workflow: reply = "Send help to see how to respond!" db(lid == row.id).update(reply = reply, is_parsed = True) else: reply = "Workflow:%s. Send help to see how to respond!" \ % workflow db(lid == row.id).update(reply = flow.reply + reply) db.commit() return reply = linsert(recipient = row.sender, subject ="Parsed Reply", message = reply) if pe_ids: for pe_id in pe_ids: oinsert(message_id = reply.id, address = contact, pe_id = pe_id.pe_id) db.commit() return # ========================================================================= # Outbound Messages # ========================================================================= def compose(self, type = "SMS", recipient_type = None, recipient = None, #hide = True, subject = "", message = "", url = None, formid = None, ): """ Form to Compose a Message @param type: The default message type: None, EMAIL, SMS or TWITTER @param recipient_type: Send to Persons or Groups? (pr_person or pr_group) @param recipient: The pe_id of the person/group to send the message to - this can also be set by setting one of (in priority order, if multiple found): request.vars.pe_id request.vars.person_id @ToDo request.vars.group_id @ToDo request.vars.hrm_id @ToDo @param subject: The default subject text (for Emails) @param message: The default message text @param url: Redirect to the specified URL() after message sent @param formid: If set, allows multiple forms open in different tabs """ T = current.T vars = current.request.vars s3db = current.s3db ltable = s3db.msg_log otable = s3db.msg_outbox if not url: url = URL(c="msg", f="compose") auth = current.auth if auth.is_logged_in() or auth.basic(): pass else: redirect(URL(c="default", f="user", args="login", vars={"_next" : url})) ltable.subject.default = subject ltable.message.default = message otable.pr_message_method.default = type ltable.pe_id.writable = ltable.pe_id.readable = False ltable.sender.writable = ltable.sender.readable = False ltable.fromaddress.writable = ltable.fromaddress.readable = False ltable.verified.writable = ltable.verified.readable = False ltable.verified_comments.writable = ltable.verified_comments.readable = False ltable.actioned.writable = ltable.actioned.readable = False ltable.actionable.writable = ltable.actionable.readable = False ltable.actioned_comments.writable = ltable.actioned_comments.readable = False ltable.inbound.writable = ltable.inbound.readable = False ltable.is_parsed.writable = ltable.is_parsed.readable = False ltable.reply.writable = ltable.reply.readable = False ltable.source_task_id.writable = ltable.source_task_id.readable = False ltable.subject.label = T("Subject") ltable.message.label = T("Message") #ltable.priority.label = T("Priority") if not recipient: if "pe_id" in vars: recipient = vars.pe_id elif "person_id" in vars: # @ToDo pass elif "group_id" in vars: # @ToDo pass elif "hrm_id" in vars: # @ToDo pass if recipient: ltable.pe_id.default = recipient otable.pe_id.default = recipient ltable.pe_id.requires = IS_ONE_OF_EMPTY(current.db, "pr_pentity.pe_id", multiple=True) else: if recipient_type: # Filter by Recipient Type otable.pe_id.requires = IS_ONE_OF(current.db, "pr_pentity.pe_id", orderby="instance_type", filterby="instance_type", filter_opts=(recipient_type,)) otable.pe_id.comment = DIV(_class="tooltip", _title="%s|%s" % \ (T("Recipients"), T("Please enter the first few letters of the Person/Group for the autocomplete."))) otable.pe_id.writable = True otable.pe_id.label = T("Recipient(s)") def compose_onvalidation(form): """ Set the sender Route the message """ if not vars.pe_id: current.session.error = T("Please enter the recipient(s)") redirect(url) if auth.user: sender_pe_id = auth.user.pe_id else: return if self.send_by_pe_id(vars.pe_id, vars.subject, vars.message, sender_pe_id, vars.pr_message_method): current.session.confirmation = T("Check outbox for the message status") redirect(url) else: current.session.error = T("Error in message") redirect(url) # Source forms crud = current.crud logform = crud.create(ltable, onvalidation = compose_onvalidation, formname = "msg_log/%s" % formid) outboxform = crud.create(otable, formname = "msg_outbox/%s" % formid) # Shortcuts lcustom = logform.custom ocustom = outboxform.custom pe_row = TR(TD(LABEL("%s:" % ocustom.label.pe_id)), _id="msg_outbox_pe_id__row") if recipient: ocustom.widget.pe_id["_class"] = "hide" pe_row.append(TD(ocustom.widget.pe_id, s3db.pr_pentity_represent(recipient, show_label=False))) else: pe_row.append(TD(INPUT(_id="dummy", _class="ac_input", _size="50"), ocustom.widget.pe_id)) pe_row.append(TD(ocustom.comment.pe_id)) # Build a custom form from the 2 source forms form = DIV( lcustom.begin, TABLE( TBODY( TR(TD(LABEL("%s:" % \ ocustom.label.pr_message_method)), TD(ocustom.widget.pr_message_method), TD(ocustom.comment.pr_message_method), _id="msg_outbox_pr_message_method__row" ), pe_row, TR(TD(LABEL("%s:" % lcustom.label.subject)), TD(lcustom.widget.subject), TD(lcustom.comment.subject), _id="msg_log_subject__row" ), TR(TD(LABEL("%s:" % lcustom.label.message)), TD(lcustom.widget.message), TD(lcustom.comment.message), _id="msg_log_message__row" ), # TR(TD(LABEL("%s:" % lcustom.label.priority)), # TD(lcustom.widget.priority), # TD(lcustom.comment.priority), # _id="msg_log_priority__row" # ), TR(TD(), TD(INPUT(_type="submit", _value=T("Send message"), _id="dummy_submit")), _id="submit_record__row" ), ) ), lcustom.end) # Control the Javascript in static/scripts/S3/s3.msg.js if not recipient: s3 = current.response.s3 if recipient_type: s3.js_global.append('''S3.msg_search_url="%s"''' % \ URL(c="msg", f="search", vars={"type":recipient_type})) else: s3.js_global.append('''S3.msg_search_url="%s"''' % \ URL(c="msg", f="search")) s3.jquery_ready.append('''s3_msg_ac_pe_input()''') # Default title # - can be overridden by the calling function title = T("Send Message") return dict(form = form, title = title) # ------------------------------------------------------------------------- @staticmethod def send_by_pe_id(pe_id, subject="", message="", sender_pe_id = None, pr_message_method = "EMAIL", sender="", fromaddress="", system_generated = False): """ Send a single message to a Person Entity (or list thereof) @ToDo: pr_message_method = ALL - look up the pr_contact options available for the pe & send via all @ToDo: This is not transaction safe - power failure in the middle will cause no message in the outbox """ db = current.db s3db = current.s3db # Put the Message in the Log table = s3db.msg_log try: message_log_id = table.insert(pe_id = sender_pe_id, subject = subject, message = message, sender = sender, fromaddress = fromaddress) except: return False # Place the Message in the OutBox table = s3db.msg_outbox if isinstance(pe_id, list): listindex = 0 for id in pe_id: try: table.insert(message_id = message_log_id, pe_id = id, pr_message_method = pr_message_method, system_generated = system_generated) listindex = listindex + 1 except: return listindex else: try: table.insert(message_id = message_log_id, pe_id = pe_id, pr_message_method = pr_message_method, system_generated = system_generated) except: return False # Process OutBox async current.s3task.async("msg_process_outbox", args=[pr_message_method]) return True # ------------------------------------------------------------------------- def process_outbox(self, contact_method="EMAIL"): """ Send Pending Messages from Outbox. If succesful then move from Outbox to Sent. Can be called from Cron @ToDo: contact_method = "ALL" """ db = current.db s3db = current.s3db if contact_method == "SMS": table = s3db.msg_setting settings = db(table.id > 0).select(table.outgoing_sms_handler, limitby=(0, 1)).first() if not settings: raise ValueError("No SMS handler defined!") outgoing_sms_handler = settings.outgoing_sms_handler def dispatch_to_pe_id(pe_id): table = s3db.pr_contact query = (table.pe_id == pe_id) & \ (table.contact_method == contact_method) & \ (table.deleted == False) recipient = db(query).select(table.value, orderby = table.priority, limitby=(0, 1)).first() if recipient: if contact_method == "EMAIL": return self.send_email(recipient.value, subject, message) elif contact_method == "SMS": if outgoing_sms_handler == "WEB_API": return self.send_sms_via_api(recipient.value, message) elif outgoing_sms_handler == "SMTP": return self.send_sms_via_smtp(recipient.value, message) elif outgoing_sms_handler == "MODEM": return self.send_sms_via_modem(recipient.value, message) elif outgoing_sms_handler == "TROPO": # NB This does not mean the message is sent return self.send_text_via_tropo(row.id, message_id, recipient.value, message) else: return False elif contact_method == "TWITTER": return self.send_text_via_twitter(recipient.value, message) return False table = s3db.msg_outbox ltable = s3db.msg_log ptable = s3db.pr_person petable = s3db.pr_pentity query = (table.status == 1) & \ (table.pr_message_method == contact_method) rows = db(query).select() chainrun = False # Used to fire process_outbox again - Used when messages are sent to groups for row in rows: status = True message_id = row.message_id query = (ltable.id == message_id) logrow = db(query).select(limitby=(0, 1)).first() if not logrow: s3_debug("s3msg", "logrow not found") continue # Get message from msg_log message = logrow.message subject = logrow.subject sender_pe_id = logrow.pe_id # Determine list of users entity = row.pe_id query = petable.id == entity entity_type = db(query).select(petable.instance_type, limitby=(0, 1)).first() if entity_type: entity_type = entity_type.instance_type else: s3_debug("s3msg", "Entity type unknown") if entity_type == "pr_group": # Take the entities of it and add in the messaging queue - with # sender as the original sender and marks group email processed # Set system generated = True table3 = s3db.pr_group query = (table3.pe_id == entity) group_id = db(query).select(table3.id, limitby=(0, 1)).first().id table4 = s3db.pr_group_membership query = (table4.group_id == group_id) recipients = db(query).select(table4.person_id) for recipient in recipients: person_id = recipient.person_id query = (ptable.id == person_id) pe_id = db(query).select(ptable.pe_id, limitby=(0, 1)).first().pe_id table.insert(message_id = message_id, pe_id = pe_id, pr_message_method = contact_method, system_generated = True) status = True chainrun = True elif entity_type == "org_organisation": # Take the entities of it and add in the messaging queue - with # sender as the original sender and marks group email processed # Set system generated = True table3 = s3db.org_organisation query = (table3.pe_id == entity) org_id = db(query).select(table3.id, limitby=(0, 1)).first().id table4 = s3db.hrm_human_resource query = (table4.organisation_id == org_id) recipients = db(query).select(table4.person_id) for recipient in recipients: person_id = recipient.person_id uery = (ptable.id == person_id) pe_id = db(query).select(ptable.pe_id, limitby=(0, 1)).first().pe_id table.insert(message_id = message_id, pe_id = pe_id, pr_message_method = contact_method, system_generated = True) status = True chainrun = True if entity_type == "pr_person": # Person status = dispatch_to_pe_id(entity) if status: # Update status to sent in Outbox db(table.id == row.id).update(status=2) # Set message log to actioned db(ltable.id == message_id).update(actioned=True) # Explicitly commit DB operations when running from Cron db.commit() if chainrun : self.process_outbox(contact_method) return # ------------------------------------------------------------------------- # Send Email # ------------------------------------------------------------------------- def send_email(self, to, subject, message, attachments=None, cc=None, bcc=None, reply_to=None, sender="%(sender)s", encoding="utf-8"): """ Function to send Email - simple Wrapper over Web2Py's Email API @ToDo: Better Error checking: http://eden.sahanafoundation.org/ticket/439 """ if not to: return False settings = current.deployment_settings default_sender = settings.get_mail_sender() if not default_sender: s3_debug("Email sending disabled until the Sender address has been set in models/000_config.py") return False limit = settings.get_mail_limit() if limit: db = current.db s3db = current.db table = s3db.msg_limit # Check whether we've reached our daily limit day = datetime.timedelta(hours=24) cutoff = current.request.utcnow - day query = (table.created_on > cutoff) check = db(query).count() if check >= limit: return False # Log the sending table.insert() result = current.mail.send(to, subject=subject, message=message, attachments=attachments, cc=cc, bcc=bcc, reply_to=reply_to, # @ToDo: Once more people have upgrade their web2py #sender=sender, encoding=encoding ) return result # ------------------------------------------------------------------------- def send_email_by_pe_id(self, pe_id, subject="", message="", sender_pe_id=None, # s3_logged_in_person() is useful here sender="", fromaddress="", system_generated=False): """ API wrapper over send_by_pe_id """ return self.send_by_pe_id(pe_id, subject, message, sender_pe_id, "EMAIL", sender, fromaddress, system_generated) # ========================================================================= # SMS # ========================================================================= # ------------------------------------------------------------------------- # OpenGeoSMS # ------------------------------------------------------------------------- @staticmethod def prepare_opengeosms(location_id, code="S", map="google", text=""): """ Function to create an OpenGeoSMS @param: location_id - reference to record in gis_location table @param: code - the type of OpenGeoSMS: S = Sahana SI = Incident Report ST = Task Dispatch @param: map: "google" or "osm" @param: text - the rest of the message Returns the formatted OpenGeoSMS or None if it can't find an appropriate location """ if not location_id: return text db = current.db s3db = current.s3db table = s3db.gis_location query = (table.id == location_id) location = db(query).select(table.lat, table.lon, #table.path, #table.parent, limitby=(0, 1)).first() if not location: return text lat = location.lat lon = location.lon if lat is None or lon is None: # @ToDo: Should we try parents? Or would that not be granular enough anyway? return text code = "GeoSMS=%s" % code if map == "google": url = "http://maps.google.com/?q=%f,%f" % (lat, lon) elif map == "osm": # NB Not sure how this will work in OpenGeoSMS client url = "http://openstreetmap.org?mlat=%f&mlon=%f&zoom=14" % (lat, lon) opengeosms = "%s&%s\n%s" % (url, code, text) return opengeosms # ------------------------------------------------------------------------- @staticmethod def parse_opengeosms(message): """ Function to parse an OpenGeoSMS @param: message - Inbound message to be parsed for OpenGeoSMS. Returns the lat, lon, code and text contained in the message. """ lat = "" lon = "" code = "" text = "" s3db = current.s3db words = string.split(message) if "http://maps.google.com/?q" in words[0]: # Parse OpenGeoSMS pwords = words[0].split("?q=")[1].split(",") lat = pwords[0] lon = pwords[1].split("&")[0] code = pwords[1].split("&")[1].split("=")[1] text = "" for a in range(1, len(words)): text = text + words[a] + " " return lat, lon, code, text # ------------------------------------------------------------------------- # Send SMS # ------------------------------------------------------------------------- def send_sms_via_modem(self, mobile, text=""): """ Function to send SMS via locally-attached Modem - needs to have the cron/sms_handler_modem.py script running """ mobile = self.sanitise_phone(mobile) # Add '+' before country code mobile = "+%s" % mobile try: self.modem.send_sms(mobile, text) return True except KeyError: s3_debug("s3msg", "Modem not available: need to have the cron/sms_handler_modem.py script running") return False # ------------------------------------------------------------------------- def send_sms_via_api(self, mobile, text=""): """ Function to send SMS via Web API """ db = current.db s3db = current.s3db table = s3db.msg_api_settings # Get Configuration query = (table.enabled == True) sms_api = db(query).select(limitby=(0, 1)).first() if not sms_api: return False sms_api_post_config = {} tmp_parameters = sms_api.parameters.split("&") for tmp_parameter in tmp_parameters: sms_api_post_config[tmp_parameter.split("=")[0]] = \ tmp_parameter.split("=")[1] mobile = self.sanitise_phone(mobile) try: sms_api_post_config[sms_api.message_variable] = text sms_api_post_config[sms_api.to_variable] = str(mobile) query = urllib.urlencode(sms_api_post_config) request = urllib.urlopen(sms_api.url, query) output = request.read() return True except: return False # ------------------------------------------------------------------------- def send_sms_via_smtp(self, mobile, text=""): """ Function to send SMS via SMTP NB Different Gateways have different requirements for presence/absence of International code http://en.wikipedia.org/wiki/List_of_SMS_gateways http://www.obviously.com/tech_tips/SMS_Text_Email_Gateway.html """ table = current.s3db.msg_smtp_to_sms_settings query = (table.enabled == True) settings = current.db(query).select(limitby=(0, 1) ).first() if not settings: return False mobile = self.sanitise_phone(mobile) to = "%s@%s" % (mobile, settings.address) try: result = self.send_email(to=to, subject="", message= text) return result except: return False #------------------------------------------------------------------------------------------------- def send_text_via_tropo(self, row_id, message_id, recipient, message, network = "SMS"): """ Send a URL request to Tropo to pick a message up """ db = current.db s3db = current.s3db table = s3db.msg_tropo_settings base_url = "http://api.tropo.com/1.0/sessions" action = "create" query = (table.id == 1) tropo_settings = db(query).select(table.token_messaging, limitby=(0, 1)).first() if tropo_settings: tropo_token_messaging = tropo_settings.token_messaging #tropo_token_voice = tropo_settings.token_voice else: return if network == "SMS": recipient = self.sanitise_phone(recipient) try: s3db.msg_tropo_scratch.insert(row_id = row_id, message_id = message_id, recipient = recipient, message = message, network = network) params = urllib.urlencode([("action", action), ("token", tropo_token_messaging), ("outgoing", "1"), ("row_id", row_id) ]) xml = urlopen("%s?%s" % (base_url, params)).read() # Parse Response (actual message is sent as a response to the POST which will happen in parallel) #root = etree.fromstring(xml) #elements = root.getchildren() #if elements[0].text == "false": # session.error = T("Message sending failed! Reason:") + " " + elements[2].text # redirect(URL(f='index')) #else: # session.flash = T("Message Sent") # redirect(URL(f='index')) except: pass return False # Returning False because the API needs to ask us for the messsage again. # ------------------------------------------------------------------------- def send_sms_by_pe_id(self, pe_id, message="", sender_pe_id=None, # s3_logged_in_person() is useful here sender="", fromaddress="", system_generated=False): """ API wrapper over send_by_pe_id """ return self.send_by_pe_id(pe_id, message, sender_pe_id, "SMS", sender, fromaddress, system_generated, subject="" ) # ------------------------------------------------------------------------- # Twitter # ------------------------------------------------------------------------- @staticmethod def sanitise_twitter_account(account): """ Only keep characters that are legal for a twitter account: letters, digits, and _ """ return account.translate(IDENTITYTRANS, NOTTWITTERCHARS) # ------------------------------------------------------------------------- @staticmethod def break_to_chunks(text, chunk_size=TWITTER_MAX_CHARS, suffix = TWITTER_HAS_NEXT_SUFFIX, prefix = TWITTER_HAS_PREV_PREFIX): """ Breaks text to <=chunk_size long chunks. Tries to do this at a space. All chunks, except for last, end with suffix. All chunks, except for first, start with prefix. """ res = [] current_prefix = "" # first chunk has no prefix while text: if len(current_prefix + text) <= chunk_size: res.append(current_prefix + text) return res else: # break a chunk c = text[:chunk_size - len(current_prefix) - len(suffix)] i = c.rfind(" ") if i > 0: # got a blank c = c[:i] text = text[len(c):].lstrip() res.append((current_prefix + c.rstrip() + suffix)) current_prefix = prefix # from now on, we want a prefix # ------------------------------------------------------------------------- def get_twitter_api(self): """ Initialize Twitter API """ try: import tweepy except ImportError: s3_debug("s3msg", "Tweepy not available, so non-Tropo Twitter support disabled") return None else: self.tweepy = tweepy table = current.s3db.msg_twitter_settings twitter_settings = current.db(table.id > 0).select(table.oauth_key, table.oauth_secret, table.twitter_account, limitby=(0, 1) ).first() if twitter_settings and twitter_settings.twitter_account: settings = current.deployment_settings.msg try: oauth = tweepy.OAuthHandler(settings.twitter_oauth_consumer_key, settings.twitter_oauth_consumer_secret) oauth.set_access_token(twitter_settings.oauth_key, twitter_settings.oauth_secret) twitter_api = tweepy.API(oauth) twitter_account = twitter_settings.twitter_account return dict(twitter_api=twitter_api, twitter_account=twitter_account) except: pass return None # ------------------------------------------------------------------------- def send_text_via_twitter(self, recipient, text=""): """ Function to send text to recipient via direct message (if recipient follows us). Falls back to @mention (leaves less characters for the message). Breaks long text to chunks if needed. @ToDo: Option to Send via Tropo """ # Initialize Twitter API twitter_settings = self.get_twitter_api() if not twitter_settings: # Abort return False tweepy = self.tweepy twitter_api = None if twitter_settings: twitter_api = twitter_settings["twitter_api"] twitter_account = twitter_settings["twitter_account"] if not twitter_api and text: # Abort return False recipient = self.sanitise_twitter_account(recipient) try: can_dm = twitter_api.exists_friendship(recipient, twitter_account) except tweepy.TweepError: # recipient not found return False if can_dm: chunks = self.break_to_chunks(text, TWITTER_MAX_CHARS) for c in chunks: try: # Note: send_direct_message() requires explicit kwargs (at least in tweepy 1.5) # See http://groups.google.com/group/tweepy/msg/790fcab8bc6affb5 twitter_api.send_direct_message(screen_name=recipient, text=c) except tweepy.TweepError: s3_debug("Unable to Tweet DM") else: prefix = "@%s " % recipient chunks = self.break_to_chunks(text, TWITTER_MAX_CHARS - len(prefix)) for c in chunks: try: twitter_api.update_status(prefix + c) except tweepy.TweepError: s3_debug("Unable to Tweet @mention") return True #------------------------------------------------------------------------- def receive_subscribed_tweets(self): """ Function to call to drop the tweets into search_results table - called via cron or twitter_search_results controller """ # Initialize Twitter API twitter_settings = self.get_twitter_api() if not twitter_settings: # Abort return False tweepy = self.tweepy twitter_api = None if twitter_settings: twitter_api = twitter_settings["twitter_api"] if not twitter_api: # Abort return False from s3parser import S3Parsing parser = S3Parsing.parser db = current.db s3db = current.s3db results_table = s3db.msg_twitter_search_results table = s3db.msg_twitter_search rows = db(table.id > 0).select(table.id, table.search_query) # Get the latest updated post time to use it as since_id in twitter search recent_time = results_table.posted_at.max() for row in rows: query = row.search_query try: if recent_time: search_results = twitter_api.search(query, result_type="recent", show_user=True, since_id=recent_time) else: search_results = twitter_api.search(query, result_type="recent", show_user=True) search_results.reverse() id = row.id for result in search_results: # Check if the tweet already exists in the table query = (results_table.posted_by == result.from_user) & \ (results_table.posted_at == result.created_at) tweet_exists = db(query).select(results_table.id, limitby=(0, 1) ).first() if tweet_exists: continue else: tweet = result.text posted_by = result.from_user if result.geo: coordinates = result.geo["coordinates"] else: coordinates = None category, priority, location_id = parser("filter", tweet, posted_by, service="twitter", coordinates=coordinates) results_table.insert(tweet = tweet, category = category, priority = priority, location_id = location_id, posted_by = posted_by, posted_at = result.created_at, twitter_search = id ) except tweepy.TweepError: s3_debug("Unable to get the Tweets for the user search query.") return False # Explicitly commit DB operations when running from Cron db.commit() return True #------------------------------------------------------------------------- def fetch_inbound_email(self, username): """ This is a simple mailbox polling script for the Messaging Module. It is called from the scheduler. @param username: email address of the email source to read from. This uniquely identifies one inbound email task. """ # This is the former cron/email_receive.py. # # @ToDo: If delete_from_server is false, we don't want to download the # same messages repeatedly. Perhaps record time of fetch runs (or use # info from the scheduler_run table), compare w/ message timestamp, as # a filter. That may not be completely accurate, so could check # msg_log for messages close to the last fetch time. Or just advise # people to have a dedicated account to which email is sent, that does # not also need to be read by humans. Or don't delete the fetched mail # until the next run. Or... # # ToDos from the original version: # @ToDo: If there is a need to collect from non-compliant mailers then # suggest using the robust Fetchmail to collect & store in a more # compliant mailer! # @ToDo: This doesn't handle MIME attachments. import socket, email db = current.db s3db = current.s3db inbound_status_table = s3db.msg_inbound_email_status inbox_table = s3db.msg_email_inbox log_table = s3db.msg_log source_task_id = username # Read-in configuration from Database settings = db(s3db.msg_inbound_email_settings.username == username).select(limitby=(0, 1)).first() if not settings: return "Username %s not scheduled." % username host = settings.server protocol = settings.protocol ssl = settings.use_ssl port = settings.port username = settings.username password = settings.password delete = settings.delete_from_server if protocol == "pop3": import poplib # http://docs.python.org/library/poplib.html try: if ssl: p = poplib.POP3_SSL(host, port) else: p = poplib.POP3(host, port) except socket.error, e: error = "Cannot connect: %s" % e print error # Store status in the DB try: id = db().select(inbound_status_table.id, limitby=(0, 1)).first().id db(inbound_status_table.id == id).update(status=error) except: inbound_status_table.insert(status=error) # Explicitly commit DB operations when running from Cron db.commit() return True try: # Attempting APOP authentication... p.apop(username, password) except poplib.error_proto: # Attempting standard authentication... try: p.user(username) p.pass_(password) except poplib.error_proto, e: print "Login failed:", e # Store status in the DB try: id = db().select(inbound_status_table.id, limitby=(0, 1)).first().id db(inbound_status_table.id == id).update(status="Login failed: %s" % e) except: inbound_status_table.insert(status="Login failed: %s" % e) # Explicitly commit DB operations when running from Cron db.commit() return True dellist = [] mblist = p.list()[1] for item in mblist: number, octets = item.split(" ") # Retrieve the message (storing it in a list of lines) lines = p.retr(number)[1] # Create an e-mail object representing the message msg = email.message_from_string("\n".join(lines)) # Parse out the 'From' Header sender = msg["from"] # Parse out the 'Subject' Header if "subject" in msg: subject = msg["subject"] else: subject = "" # Parse out the 'Body' textParts = msg.get_payload() body = textParts[0] # Store in DB inbox_table.insert(sender=sender, subject=subject, body=body) log_table.insert(sender=sender, subject=subject, message=body, \ source_task_id=source_task_id, inbound=True) if delete: # Add it to the list of messages to delete later dellist.append(number) # Explicitly commit DB operations when running from Cron. # @ToDo: Still needed when running under Scheduler? db.commit() # Iterate over the list of messages to delete for number in dellist: p.dele(number) p.quit() elif protocol == "imap": import imaplib # http://docs.python.org/library/imaplib.html try: if ssl: M = imaplib.IMAP4_SSL(host, port) else: M = imaplib.IMAP4(host, port) except socket.error, e: error = "Cannot connect: %s" % e print error # Store status in the DB try: id = db().select(inbound_status_table.id, limitby=(0, 1)).first().id db(inbound_status_table.id == id).update(status=error) except: inbound_status_table.insert(status=error) # Explicitly commit DB operations when running from Cron # @ToDo: Still needed when running under Scheduler? db.commit() return True try: M.login(username, password) except M.error, e: error = "Login failed: %s" % e print error # Store status in the DB try: id = db().select(inbound_status_table.id, limitby=(0, 1)).first().id db(inbound_status_table.id == id).update(status=error) except: inbound_status_table.insert(status=error) # Explicitly commit DB operations when running from Cron db.commit() return True dellist = [] # Select inbox M.select() # Search for Messages to Download typ, data = M.search(None, "ALL") for num in data[0].split(): typ, msg_data = M.fetch(num, "(RFC822)") for response_part in msg_data: if isinstance(response_part, tuple): msg = email.message_from_string(response_part[1]) # Parse out the 'From' Header sender = msg["from"] # Parse out the 'Subject' Header if "subject" in msg: subject = msg["subject"] else: subject = "" # Parse out the 'Body' textParts = msg.get_payload() body = textParts[0] # Store in DB inbox_table.insert(sender=sender, subject=subject, body=body) log_table.insert(sender=sender, subject=subject, \ message=body, source_task_id=source_task_id, \ inbound = True) if delete: # Add it to the list of messages to delete later dellist.append(num) # Explicitly commit DB operations when running from Cron db.commit() # Iterate over the list of messages to delete for number in dellist: typ, response = M.store(number, "+FLAGS", r"(\Deleted)") M.close() M.logout() # ============================================================================= @staticmethod def source_id(username): """ Extracts the source_task_id from a given message. """ db = current.db table = db["scheduler_task"] records = db(table.id > 0).select() for record in records: if record.vars.split(":") == ["{\"username\""," \"%s\"}" %username] : return record.id # ============================================================================= @staticmethod def twilio_inbound_sms(account_name): """ Fetches the inbound sms from twilio API.""" s3db = current.s3db db = current.db ttable = s3db.msg_twilio_inbound_settings query = (ttable.account_name == account_name) & \ (ttable.deleted == False) account = db(query).select(limitby=(0,1)).first() if account: url = account.url account_sid = account.account_sid auth_token = account.auth_token url += "/%s/SMS/Messages.json"%str(account_sid) import urllib, urllib2 # this creates a password manager passman = urllib2.HTTPPasswordMgrWithDefaultRealm() passman.add_password(None, url, account_sid, auth_token) # create the AuthHandler authhandler = urllib2.HTTPBasicAuthHandler(passman) opener = urllib2.build_opener(authhandler) urllib2.install_opener(opener) downloaded_sms = [] itable = s3db.msg_twilio_inbox ltable = s3db.msg_log query = itable.deleted == False messages = db(query).select(itable.sid) downloaded_sms = [message.sid for message in messages] try: smspage = urllib2.urlopen(url) import json minsert = itable.insert linsert = ltable.insert sms_list = json.loads(smspage.read()) for sms in sms_list["sms_messages"]: if (sms["direction"] == "inbound") and \ (sms["sid"] not in downloaded_sms): sender = "<" + sms["from"] + ">" minsert(sid=sms["sid"],body=sms["body"], \ status=sms["status"],sender=sender, \ received_on=sms["date_sent"]) linsert(sender=sender, message=sms["body"], \ source_task_id=account_name, inbound=True) except urllib2.HTTPError, e: return "Error:" + str(e.code) db.commit() return # ============================================================================= class S3Compose(S3CRUD): """ RESTful method for messaging """ # ------------------------------------------------------------------------- def apply_method(self, r, **attr): """ API entry point @param r: the S3Request instance @param attr: controller attributes for the request """ if r.http in ("GET", "POST"): output = self.compose(r, **attr) else: r.error(405, current.manager.ERROR.BAD_METHOD) return output # ------------------------------------------------------------------------- def compose(self, r, **attr): """ Generate a form to send a message @param r: the S3Request instance @param attr: controller attributes for the request """ T = current.T auth = current.auth url = r.url() self.url = url # @ToDo: Use API if auth.is_logged_in() or auth.basic(): pass else: redirect(URL(c="default", f="user", args="login", vars={"_next" : url})) if not current.deployment_settings.has_module("msg"): current.session.error = T("Cannot send messages if Messaging module disabled") redirect(URL(f="index")) if not auth.permission.has_permission("update", c="msg"): current.session.error = T("You do not have permission to send messages") redirect(URL(f="index")) #_vars = r.get_vars self.recipients = None form = self._compose_form() # @ToDo: A 2nd Filter form # if form.accepts(r.post_vars, current.session, # formname="compose", # keepvalues=True): # query, errors = self._process_filter_options(form) # if r.http == "POST" and not errors: # self.resource.add_filter(query) # _vars = form.vars # Apply method resource = self.resource representation = r.representation if self.method == "compose": #output = dict(items=items) output = dict(form=form) else: r.error(501, current.manager.ERROR.BAD_METHOD) # Complete the page if representation == "html": title = self.crud_string(self.tablename, "title_compose") if not title: title = T("Send Message") # subtitle = self.crud_string(self.tablename, "subtitle_compose") # if not subtitle: # subtitle = "" # Maintain RHeader for consistency if "rheader" in attr: rheader = attr["rheader"](r) if rheader: output["rheader"] = rheader output["title"] = title #output["subtitle"] = subtitle #output["form"] = form #current.response.view = self._view(r, "list_create.html") current.response.view = self._view(r, "create.html") return output # ------------------------------------------------------------------------- def _compose_onvalidation(self, form): """ Set the sender Route the message """ T = current.T auth = current.auth msg = current.msg session = current.session vars = current.request.post_vars recipients = self.recipients if not recipients: if not vars.pe_id: session.error = T("Please enter the recipient(s)") redirect(self.url) else: recipients = vars.pe_id table = current.s3db.pr_person if auth.user: sender_pe_id = auth.user.pe_id else: return if msg.send_by_pe_id(recipients, vars.subject, vars.message, sender_pe_id, vars.pr_message_method): session.confirmation = T("Check outbox for the message status") redirect(self.url) else: session.error = T("Error in message") redirect(self.url) # ------------------------------------------------------------------------- def _compose_form(self): """ Creates the form for composing the message """ resource = self.resource table = resource.table T = current.T db = current.db s3db = current.s3db crud = current.crud s3 = current.response.s3 ltable = s3db.msg_log otable = s3db.msg_outbox # @ToDo: read request.get_vars.message? #ltable.message.default = message # See if we have defined a custom recipient type for this table # pr_person or pr_group recipient_type = self._config("msg_recipient_type", None) # See if we have defined a custom default contact method for this table type = self._config("msg_contact_method", "SMS") otable.pr_message_method.default = type ltable.pe_id.writable = ltable.pe_id.readable = False ltable.sender.writable = ltable.sender.readable = False ltable.fromaddress.writable = ltable.fromaddress.readable = False ltable.verified.writable = ltable.verified.readable = False ltable.verified_comments.writable = ltable.verified_comments.readable = False ltable.actioned.writable = ltable.actioned.readable = False ltable.actionable.writable = ltable.actionable.readable = False ltable.actioned_comments.writable = ltable.actioned_comments.readable = False ltable.subject.label = T("Subject") ltable.message.label = T("Message") #ltable.priority.label = T("Priority") if "pe_id" in table: field = "pe_id" elif "person_id" in table: field = "person_id$pe_id" elif "group_id" in table: field = None # "group_id$pe_id"? else: field = None if field: records = resource.select([field]) if records: rfield = resource.resolve_selector(field) items = resource.extract(records, [field]) recipients = [item[rfield.colname] for item in items] else: recipients = [] if recipients: self.recipients = recipients ltable.pe_id.default = recipients otable.pe_id.default = recipients ltable.pe_id.requires = IS_ONE_OF_EMPTY(db, "pr_pentity.pe_id", multiple=True) else: if recipient_type: # Filter by Recipient Type otable.pe_id.requires = IS_ONE_OF(db, "pr_pentity.pe_id", orderby="instance_type", filterby="instance_type", filter_opts=(recipient_type,)) otable.pe_id.comment = DIV(_class="tooltip", _title="%s|%s" % \ (T("Recipients"), T("Please enter the first few letters of the Person/Group for the autocomplete."))) otable.pe_id.writable = True otable.pe_id.label = T("Recipient(s)") # Source forms logform = crud.create(ltable, onvalidation = self._compose_onvalidation) outboxform = crud.create(otable) # Shortcuts lcustom = logform.custom ocustom = outboxform.custom pe_row = TR(TD(LABEL("%s:" % ocustom.label.pe_id)), _id="msg_outbox_pe_id__row") if recipients: if len(recipients) == 1: represent = s3.pr_pentity_represent(recipients[0], show_label=False) else: # @ToDo: This should be the filter results represent = T("Multiple") pe_row.append(TD(represent)) else: # @ToDo: This should be an S3Search form pe_row.append(TD(INPUT(_id="dummy", _class="ac_input", _size="50"), ocustom.widget.pe_id)) pe_row.append(TD(ocustom.comment.pe_id)) # Build a custom form from the 2 source forms form = DIV( lcustom.begin, TABLE( TBODY( TR(TD(LABEL("%s:" % \ ocustom.label.pr_message_method)), TD(ocustom.widget.pr_message_method), TD(ocustom.comment.pr_message_method), _id="msg_outbox_pr_message_method__row" ), pe_row, TR(TD(LABEL("%s:" % lcustom.label.subject)), TD(lcustom.widget.subject), TD(lcustom.comment.subject), _id="msg_log_subject__row" ), TR(TD(LABEL("%s:" % lcustom.label.message)), TD(lcustom.widget.message), TD(lcustom.comment.message), _id="msg_log_message__row" ), # TR(TD(LABEL("%s:" % lcustom.label.priority)), # TD(lcustom.widget.priority), # TD(lcustom.comment.priority), # _id="msg_log_priority__row" # ), TR(TD(), TD(INPUT(_type="submit", _value=T("Send message"), _id="dummy_submit")), _id="submit_record__row" ), ) ), lcustom.end) # Control the Javascript in static/scripts/S3/s3.msg.js if not recipients: if recipient_type: s3.js_global.append('''S3.msg_search_url="%s"''' % \ URL(c="msg", f="search", vars={"type":recipient_type})) else: s3.js_global.append('''S3.msg_search_url="%s"''' % \ URL(c="msg", f="search")) s3.jquery_ready.append('''s3_msg_ac_pe_input()''') return form # END =========================================================================
{ "content_hash": "c78c5bec9252aaf8634695ae431b3572", "timestamp": "", "source": "github", "line_count": 1783, "max_line_length": 124, "avg_line_length": 40.08020190689849, "alnum_prop": 0.4456431999776108, "repo_name": "vgupta6/Project-2", "id": "54febc79f745f30d56da3d21dbebfc13f58c650f", "size": "71532", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "modules/s3/s3msg.py", "mode": "33188", "license": "mit", "language": [ { "name": "JavaScript", "bytes": "15540599" }, { "name": "PHP", "bytes": "15220" }, { "name": "Perl", "bytes": "2202" }, { "name": "Python", "bytes": "23301481" }, { "name": "Racket", "bytes": "166" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('curation', '0015_auto_20160725_1102'), ] operations = [ migrations.AddField( model_name='video', name='thumbnail', field=models.FileField(default=None, upload_to='thumbnails', blank=True), ), ]
{ "content_hash": "1a7a1e68ce30db5d3dfa3ea450dd9fa2", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 85, "avg_line_length": 23, "alnum_prop": 0.606280193236715, "repo_name": "webisteme/instavision", "id": "f2a22430d3f0983696d91686529356967e17fce6", "size": "438", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "instavision/curation/migrations/0016_video_thumbnail.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
from JumpScale9 import j import time import libtmux as tmuxp import os JSBASE = j.application.jsbase_get_class() # from .Pane import Pane from .Window import Window class Session(JSBASE): def __init__(self, session): # if j.core.db is None: # j.clients.redis.core_start() # j.core.db = j.clients.redis.get() JSBASE.__init__(self) self.id = session.get("session_id") self.name = session.get("session_name") self.mgmt = session self.reload() def reload(self): self.windows = [] for w in self.mgmt.list_windows(): self.windows.append(Window(self, w)) def window_remove(self, name): windows = self.mgmt.list_windows() if len(windows) < 2: self.window_get(name="ignore", removeIgnore=False) for w in self.mgmt.windows: wname = w.get("window_name") if name == wname: w.kill_window() # j.core.db.delete("tmux:pane:%s" % self.name) self.reload() def window_exists(self, name): for window in self.windows: if window.name == name: return True return False def window_get(self, name, start_directory=None, attach=False, reset=False, removeIgnore=True): # from pudb import set_trace; set_trace() if reset: self.window_remove(name) for window in self.windows: if window.name == name: # is right construct, means we found a window, now we can safely remove ignore if self.window_exists("ignore") and removeIgnore: self.window_remove("ignore") return window self.logger.debug("create window:%s" % name) # j.core.db.delete("tmux:pane:%s" % name) res = self.mgmt.new_window( name, start_directory=start_directory, attach=attach) window = Window(self, res) self.windows.append(window) window.select() # when only 1 pane then ignore had to be created again if self.window_exists("ignore") and removeIgnore: self.window_remove("ignore") return window def kill(self): raise j.exceptions.RuntimeError("kill") def __repr__(self): return ("session:%s:%s" % (self.id, self.name)) __str__ = __repr__
{ "content_hash": "0eddd242e5da45249dd39fa20f33f491", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 99, "avg_line_length": 29.85, "alnum_prop": 0.5728643216080402, "repo_name": "Jumpscale/core9", "id": "3be7165eede63bb00be5f8878bd4e11894b3713b", "size": "2389", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "JumpScale9/tools/tmux/Session.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Cap'n Proto", "bytes": "7695" }, { "name": "Lua", "bytes": "31125" }, { "name": "Python", "bytes": "1171144" }, { "name": "Shell", "bytes": "42008" } ], "symlink_target": "" }
""" screen_size property """ from rebulk.remodule import re from rebulk import Rebulk, Rule, RemoveMatch from ..common.validators import seps_surround from ..common import dash, seps def screen_size(): """ Builder for rebulk object. :return: Created Rebulk object :rtype: Rebulk """ def conflict_solver(match, other): """ Conflict solver for most screen_size. """ if other.name == 'screen_size': if 'resolution' in other.tags: # The chtouile to solve conflict in "720 x 432" string matching both 720p pattern int_value = _digits_re.findall(match.raw)[-1] if other.value.startswith(int_value): return match return other return '__default__' rebulk = Rebulk().string_defaults(ignore_case=True).regex_defaults(flags=re.IGNORECASE) rebulk.defaults(name="screen_size", validator=seps_surround, conflict_solver=conflict_solver) rebulk.regex(r"(?:\d{3,}(?:x|\*))?360(?:i|p?x?)", value="360p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?368(?:i|p?x?)", value="368p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?480(?:i|p?x?)", value="480p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?576(?:i|p?x?)", value="576p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?720(?:i|p?(?:50|60)?x?)", value="720p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?720(?:p(?:50|60)?x?)", value="720p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?720p?hd", value="720p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?900(?:i|p?x?)", value="900p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080i", value="1080i") rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080p?x?", value="1080p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080(?:p(?:50|60)?x?)", value="1080p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?1080p?hd", value="1080p") rebulk.regex(r"(?:\d{3,}(?:x|\*))?2160(?:i|p?x?)", value="4K") rebulk.string('4k', value='4K') _digits_re = re.compile(r'\d+') rebulk.defaults(name="screen_size", validator=seps_surround) rebulk.regex(r'\d{3,}-?(?:x|\*)-?\d{3,}', formatter=lambda value: 'x'.join(_digits_re.findall(value)), abbreviations=[dash], tags=['resolution'], conflict_solver=lambda match, other: '__default__' if other.name == 'screen_size' else other) rebulk.rules(ScreenSizeOnlyOne, RemoveScreenSizeConflicts) return rebulk class ScreenSizeOnlyOne(Rule): """ Keep a single screen_size pet filepath part. """ consequence = RemoveMatch def when(self, matches, context): to_remove = [] for filepart in matches.markers.named('path'): screensize = list(reversed(matches.range(filepart.start, filepart.end, lambda match: match.name == 'screen_size'))) if len(screensize) > 1: to_remove.extend(screensize[1:]) return to_remove class RemoveScreenSizeConflicts(Rule): """ Remove season and episode matches which conflicts with screen_size match. """ consequence = RemoveMatch def when(self, matches, context): to_remove = [] for filepart in matches.markers.named('path'): screensize = matches.range(filepart.start, filepart.end, lambda match: match.name == 'screen_size', 0) if not screensize: continue conflicts = matches.conflicting(screensize, lambda match: match.name in ('season', 'episode')) if not conflicts: continue video_profile = matches.range(screensize.end, filepart.end, lambda match: match.name == 'video_profile', 0) if video_profile and not matches.holes(screensize.end, video_profile.start, predicate=lambda h: h.value and h.value.strip(seps)): to_remove.extend(conflicts) date = matches.previous(screensize, lambda match: match.name == 'date', 0) if date and not matches.holes(date.end, screensize.start, predicate=lambda h: h.value and h.value.strip(seps)): to_remove.extend(conflicts) return to_remove
{ "content_hash": "5fd56fe89078852f8382cd4289eb8df8", "timestamp": "", "source": "github", "line_count": 106, "max_line_length": 119, "avg_line_length": 40.075471698113205, "alnum_prop": 0.565442561205273, "repo_name": "pannal/Subliminal.bundle", "id": "b7732ab61c7878f23a55186477d2bc6a8210c1bd", "size": "4294", "binary": false, "copies": "10", "ref": "refs/heads/master", "path": "Contents/Libraries/Shared/guessit/rules/properties/screen_size.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "3012769" }, { "name": "Python", "bytes": "3311785" }, { "name": "Shell", "bytes": "273" } ], "symlink_target": "" }
import json import urlparse from django.conf import settings from django.utils.http import urlencode import requests from django_statsd.clients import statsd import olympia.core.logger from olympia import amo from olympia.addons.models import Addon from . import data log = olympia.core.logger.getLogger('z.amo') def call_recommendation_server(telemetry_id, locale, platform): params = [(key, value) for key, value in ( ('locale', locale), ('platform', platform)) if value] endpoint = urlparse.urljoin( settings.RECOMMENDATION_ENGINE_URL, '%s/%s%s' % (telemetry_id, '?' if params else '', urlencode(params))) log.debug(u'Calling recommendation server: {0}'.format(endpoint)) try: with statsd.timer('services.recommendations'): response = requests.get( endpoint, timeout=settings.RECOMMENDATION_ENGINE_TIMEOUT) if response.status_code != 200: raise requests.exceptions.RequestException() except requests.exceptions.RequestException as e: log.error(u'Calling recommendation engine failed: {0}'.format(e)) statsd.incr('services.recommendations.fail') return [] else: statsd.incr('services.recommendations.success') return json.loads(response.content).get('results', []) def get_recommendations(telemetry_id, locale, platform): guids = call_recommendation_server(telemetry_id, locale, platform) ids = (Addon.objects.public().filter(guid__in=guids) .values_list('id', flat=True)) return [data.DiscoItem(addon_id=id_, is_recommendation=True) for id_ in ids] def replace_extensions(source, replacements): replacements = list(replacements) # copy so we can pop it. return [replacements.pop(0) if item.type == amo.ADDON_EXTENSION and replacements else item for item in source]
{ "content_hash": "3accdf714fa309c16b8135b17c4e8ed4", "timestamp": "", "source": "github", "line_count": 55, "max_line_length": 77, "avg_line_length": 35.61818181818182, "alnum_prop": 0.6615620214395099, "repo_name": "tsl143/addons-server", "id": "6b411e6d233aea6748d522a3efaa6e1b12a9f081", "size": "1959", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/olympia/discovery/utils.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "798448" }, { "name": "HTML", "bytes": "711178" }, { "name": "JavaScript", "bytes": "1078877" }, { "name": "Makefile", "bytes": "811" }, { "name": "PLSQL", "bytes": "1074" }, { "name": "PLpgSQL", "bytes": "2381" }, { "name": "Python", "bytes": "4665043" }, { "name": "SQLPL", "bytes": "559" }, { "name": "Shell", "bytes": "7175" }, { "name": "Smarty", "bytes": "1881" } ], "symlink_target": "" }
import os from . import errors from .ssladapter import ssladapter class TLSConfig(object): cert = None ca_cert = None verify = None ssl_version = None def __init__(self, client_cert=None, ca_cert=None, verify=None, ssl_version=None, assert_hostname=None, assert_fingerprint=None): # Argument compatibility/mapping with # https://docs.docker.com/engine/articles/https/ # This diverges from the Docker CLI in that users can specify 'tls' # here, but also disable any public/default CA pool verification by # leaving tls_verify=False self.ssl_version = ssl_version self.assert_hostname = assert_hostname self.assert_fingerprint = assert_fingerprint # "tls" and "tls_verify" must have both or neither cert/key files # In either case, Alert the user when both are expected, but any are # missing. if client_cert: try: tls_cert, tls_key = client_cert except ValueError: raise errors.TLSParameterError( 'client_config must be a tuple of' ' (client certificate, key file)' ) if not (tls_cert and tls_key) or (not os.path.isfile(tls_cert) or not os.path.isfile(tls_key)): raise errors.TLSParameterError( 'Path to a certificate and key files must be provided' ' through the client_config param' ) self.cert = (tls_cert, tls_key) # If verify is set, make sure the cert exists self.verify = verify self.ca_cert = ca_cert if self.verify and self.ca_cert and not os.path.isfile(self.ca_cert): raise errors.TLSParameterError( 'Invalid CA certificate provided for `tls_ca_cert`.' ) def configure_client(self, client): client.ssl_version = self.ssl_version if self.verify and self.ca_cert: client.verify = self.ca_cert else: client.verify = self.verify if self.cert: client.cert = self.cert client.mount('https://', ssladapter.SSLAdapter( ssl_version=self.ssl_version, assert_hostname=self.assert_hostname, assert_fingerprint=self.assert_fingerprint, ))
{ "content_hash": "5a41a9e9ffc1ad09cb788a9fb75ec24f", "timestamp": "", "source": "github", "line_count": 70, "max_line_length": 77, "avg_line_length": 34.55714285714286, "alnum_prop": 0.5812319140140554, "repo_name": "vitalyisaev2/docker-py", "id": "83b0ff7ef1e545d5f60ea8a1c2509bae2954adcf", "size": "2419", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "docker/tls.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "2227" }, { "name": "Python", "bytes": "353714" } ], "symlink_target": "" }
<<<<<<< HEAD <<<<<<< HEAD from test import support import unittest import dummy_threading as _threading import time class DummyThreadingTestCase(unittest.TestCase): class TestThread(_threading.Thread): def run(self): global running global sema global mutex # Uncomment if testing another module, such as the real 'threading' # module. #delay = random.random() * 2 delay = 0 if support.verbose: print('task', self.name, 'will run for', delay, 'sec') sema.acquire() mutex.acquire() running += 1 if support.verbose: print(running, 'tasks are running') mutex.release() time.sleep(delay) if support.verbose: print('task', self.name, 'done') mutex.acquire() running -= 1 if support.verbose: print(self.name, 'is finished.', running, 'tasks are running') mutex.release() sema.release() def setUp(self): self.numtasks = 10 global sema sema = _threading.BoundedSemaphore(value=3) global mutex mutex = _threading.RLock() global running running = 0 self.threads = [] def test_tasks(self): for i in range(self.numtasks): t = self.TestThread(name="<thread %d>"%i) self.threads.append(t) t.start() if support.verbose: print('waiting for all tasks to complete') for t in self.threads: t.join() if support.verbose: print('all tasks done') def test_main(): support.run_unittest(DummyThreadingTestCase) if __name__ == '__main__': test_main() ======= from test import support import unittest import dummy_threading as _threading import time class DummyThreadingTestCase(unittest.TestCase): class TestThread(_threading.Thread): def run(self): global running global sema global mutex # Uncomment if testing another module, such as the real 'threading' # module. #delay = random.random() * 2 delay = 0 if support.verbose: print('task', self.name, 'will run for', delay, 'sec') sema.acquire() mutex.acquire() running += 1 if support.verbose: print(running, 'tasks are running') mutex.release() time.sleep(delay) if support.verbose: print('task', self.name, 'done') mutex.acquire() running -= 1 if support.verbose: print(self.name, 'is finished.', running, 'tasks are running') mutex.release() sema.release() def setUp(self): self.numtasks = 10 global sema sema = _threading.BoundedSemaphore(value=3) global mutex mutex = _threading.RLock() global running running = 0 self.threads = [] def test_tasks(self): for i in range(self.numtasks): t = self.TestThread(name="<thread %d>"%i) self.threads.append(t) t.start() if support.verbose: print('waiting for all tasks to complete') for t in self.threads: t.join() if support.verbose: print('all tasks done') def test_main(): support.run_unittest(DummyThreadingTestCase) if __name__ == '__main__': test_main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453 ======= from test import support import unittest import dummy_threading as _threading import time class DummyThreadingTestCase(unittest.TestCase): class TestThread(_threading.Thread): def run(self): global running global sema global mutex # Uncomment if testing another module, such as the real 'threading' # module. #delay = random.random() * 2 delay = 0 if support.verbose: print('task', self.name, 'will run for', delay, 'sec') sema.acquire() mutex.acquire() running += 1 if support.verbose: print(running, 'tasks are running') mutex.release() time.sleep(delay) if support.verbose: print('task', self.name, 'done') mutex.acquire() running -= 1 if support.verbose: print(self.name, 'is finished.', running, 'tasks are running') mutex.release() sema.release() def setUp(self): self.numtasks = 10 global sema sema = _threading.BoundedSemaphore(value=3) global mutex mutex = _threading.RLock() global running running = 0 self.threads = [] def test_tasks(self): for i in range(self.numtasks): t = self.TestThread(name="<thread %d>"%i) self.threads.append(t) t.start() if support.verbose: print('waiting for all tasks to complete') for t in self.threads: t.join() if support.verbose: print('all tasks done') def test_main(): support.run_unittest(DummyThreadingTestCase) if __name__ == '__main__': test_main() >>>>>>> b875702c9c06ab5012e52ff4337439b03918f453
{ "content_hash": "5316368ee58196143bcdf28d45c6dc01", "timestamp": "", "source": "github", "line_count": 198, "max_line_length": 79, "avg_line_length": 28.085858585858585, "alnum_prop": 0.5358748426541989, "repo_name": "ArcherSys/ArcherSys", "id": "eab66baeb316528ada221a7b62bd24b8560cdc2e", "size": "5561", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Lib/test/test_dummy_threading.py", "mode": "33188", "license": "mit", "language": [], "symlink_target": "" }
import tempfile import unittest #from .log import * #from .parsed_trace_events import * from log import * from parsed_trace_events import * class TraceTest(unittest.TestCase): def __init__(self, *args): """ Infrastructure for running tests of the tracing system. Does not actually run any tests. Look at subclasses for those. """ unittest.TestCase.__init__(self, *args) self._file = None def go(self, cb): """ Enables tracing, runs the provided callback, and if successful, returns a TraceEvents object with the results. """ self._file = tempfile.NamedTemporaryFile() trace_enable(open(self._file.name, 'a+')) try: cb() finally: trace_disable() e = ParsedTraceEvents(trace_filename = self._file.name) self._file.close() self._file = None return e @property def trace_filename(self): return self._file.name def tearDown(self): if trace_is_enabled(): trace_disable() if self._file: self._file.close()
{ "content_hash": "50acc8f991fb7ba5b559196a0b689771", "timestamp": "", "source": "github", "line_count": 45, "max_line_length": 77, "avg_line_length": 22.77777777777778, "alnum_prop": 0.6487804878048781, "repo_name": "SummerLW/Perf-Insight-Report", "id": "7047e0eae862b2e9ab008321b0a1ec793694cc00", "size": "1187", "binary": false, "copies": "10", "ref": "refs/heads/test", "path": "tools/py_trace_event/py_trace_event/trace_event_impl/trace_test.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "C", "bytes": "3598" }, { "name": "C++", "bytes": "6411" }, { "name": "CSS", "bytes": "14952" }, { "name": "HTML", "bytes": "27508823" }, { "name": "JavaScript", "bytes": "75587" }, { "name": "Python", "bytes": "4638631" }, { "name": "Shell", "bytes": "2124" } ], "symlink_target": "" }
""" @authors: Sergei Garbuzov @status: Development @version: 1.1.0 """ import time import json from pysdn.controller.controller import Controller from pysdn.openflowdev.ofswitch import OFSwitch from pysdn.common.status import STATUS from pysdn.common.utils import load_dict_from_file def of_demo_2(): f = "cfg.yml" d = {} if(load_dict_from_file(f, d) is False): print("Config file '%s' read error: " % f) exit() try: ctrlIpAddr = d['ctrlIpAddr'] ctrlPortNum = d['ctrlPortNum'] ctrlUname = d['ctrlUname'] ctrlPswd = d['ctrlPswd'] nodeName = d['nodeName'] rundelay = d['rundelay'] except: print ("Failed to get Controller device attributes") exit(0) print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<") print ("<<< Demo 2 Start") print ("<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<") print ("\n") print ("<<< Creating Controller instance") time.sleep(rundelay) ctrl = Controller(ctrlIpAddr, ctrlPortNum, ctrlUname, ctrlPswd, None) print ("'Controller':") print ctrl.brief_json() print ("\n") print ("<<< Get information about OpenFlow node '%s'" % nodeName) time.sleep(rundelay) ofswitch = OFSwitch(ctrl, nodeName) result = ofswitch.get_switch_info() status = result.get_status() if(status.eq(STATUS.OK)): print ("Node '%s' generic info:" % nodeName) info = result.get_data() print json.dumps(info, indent=4) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief().lower()) exit(0) print ("\n") result = ofswitch.get_features_info() status = result.get_status() if(status.eq(STATUS.OK)): print ("Node '%s' features:" % nodeName) features = result.get_data() print json.dumps(features, indent=4) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief().lower()) exit(0) print ("\n") result = ofswitch.get_ports_list() status = result.get_status() if(status.eq(STATUS.OK)): ports = result.get_data() print ("Node '%s' ports list:" % nodeName) print json.dumps(ports, indent=4, sort_keys=True) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief().lower()) exit(0) print ("\n") result = ofswitch.get_ports_brief_info() status = result.get_status() if(status.eq(STATUS.OK)): print ("Node '%s' ports brief information:" % nodeName) info = result.get_data() print json.dumps(info, indent=4, sort_keys=True) else: print ("\n") print ("!!!Demo terminated, reason: %s" % status.brief().lower()) exit(0) print ("\n") print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") print (">>> Demo End") print (">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>") if __name__ == "__main__": of_demo_2()
{ "content_hash": "d4a52805714a31fe86a3ee7b48198f4a", "timestamp": "", "source": "github", "line_count": 105, "max_line_length": 73, "avg_line_length": 29.076190476190476, "alnum_prop": 0.5312807075008189, "repo_name": "brocade/pysdn", "id": "251d17835d3ad06fc0e75b086cd507c45f3cfe9f", "size": "4627", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "samples/sampleopenflow/demos/demo2.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "Perl", "bytes": "2452" }, { "name": "Python", "bytes": "529708" } ], "symlink_target": "" }
""" The MIT License (MIT) Copyright (c) 2015-2016 Rapptz Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ class Colour: """Represents a Discord role colour. This class is similar to an (red, green, blue) tuple. There is an alias for this called Color. Supported operations: +-----------+----------------------------------------+ | Operation | Description | +===========+========================================+ | x == y | Checks if two colours are equal. | +-----------+----------------------------------------+ | x != y | Checks if two colours are not equal. | +-----------+----------------------------------------+ | hash(x) | Return the colour's hash. | +-----------+----------------------------------------+ | str(x) | Returns the hex format for the colour. | +-----------+----------------------------------------+ Attributes ------------ value : int The raw integer colour value. """ __slots__ = [ 'value' ] def __init__(self, value): self.value = value def _get_byte(self, byte): return (self.value >> (8 * byte)) & 0xff def __eq__(self, other): return isinstance(other, Colour) and self.value == other.value def __ne__(self, other): return not self.__eq__(other) def __str__(self): return '#{:0>6x}'.format(self.value) def __hash__(self): return hash(self.value) @property def r(self): """Returns the red component of the colour.""" return self._get_byte(2) @property def g(self): """Returns the green component of the colour.""" return self._get_byte(1) @property def b(self): """Returns the blue component of the colour.""" return self._get_byte(0) def to_tuple(self): """Returns an (r, g, b) tuple representing the colour.""" return (self.r, self.g, self.b) @classmethod def default(cls): """A factory method that returns a :class:`Colour` with a value of 0.""" return cls(0) @classmethod def teal(cls): """A factory method that returns a :class:`Colour` with a value of ``0x1abc9c``.""" return cls(0x1abc9c) @classmethod def dark_teal(cls): """A factory method that returns a :class:`Colour` with a value of ``0x11806a``.""" return cls(0x11806a) @classmethod def green(cls): """A factory method that returns a :class:`Colour` with a value of ``0x2ecc71``.""" return cls(0x2ecc71) @classmethod def dark_green(cls): """A factory method that returns a :class:`Colour` with a value of ``0x1f8b4c``.""" return cls(0x1f8b4c) @classmethod def blue(cls): """A factory method that returns a :class:`Colour` with a value of ``0x3498db``.""" return cls(0x3498db) @classmethod def dark_blue(cls): """A factory method that returns a :class:`Colour` with a value of ``0x206694``.""" return cls(0x206694) @classmethod def purple(cls): """A factory method that returns a :class:`Colour` with a value of ``0x9b59b6``.""" return cls(0x9b59b6) @classmethod def dark_purple(cls): """A factory method that returns a :class:`Colour` with a value of ``0x71368a``.""" return cls(0x71368a) @classmethod def magenta(cls): """A factory method that returns a :class:`Colour` with a value of ``0xe91e63``.""" return cls(0xe91e63) @classmethod def dark_magenta(cls): """A factory method that returns a :class:`Colour` with a value of ``0xad1457``.""" return cls(0xad1457) @classmethod def gold(cls): """A factory method that returns a :class:`Colour` with a value of ``0xf1c40f``.""" return cls(0xf1c40f) @classmethod def dark_gold(cls): """A factory method that returns a :class:`Colour` with a value of ``0xc27c0e``.""" return cls(0xc27c0e) @classmethod def orange(cls): """A factory method that returns a :class:`Colour` with a value of ``0xe67e22``.""" return cls(0xe67e22) @classmethod def dark_orange(cls): """A factory method that returns a :class:`Colour` with a value of ``0xa84300``.""" return cls(0xa84300) @classmethod def red(cls): """A factory method that returns a :class:`Colour` with a value of ``0xe74c3c``.""" return cls(0xe74c3c) @classmethod def dark_red(cls): """A factory method that returns a :class:`Colour` with a value of ``0x992d22``.""" return cls(0x992d22) @classmethod def lighter_grey(cls): """A factory method that returns a :class:`Colour` with a value of ``0x95a5a6``.""" return cls(0x95a5a6) @classmethod def dark_grey(cls): """A factory method that returns a :class:`Colour` with a value of ``0x607d8b``.""" return cls(0x607d8b) @classmethod def light_grey(cls): """A factory method that returns a :class:`Colour` with a value of ``0x979c9f``.""" return cls(0x979c9f) @classmethod def darker_grey(cls): """A factory method that returns a :class:`Colour` with a value of ``0x546e7a``.""" return cls(0x546e7a) Color = Colour
{ "content_hash": "a34386b3699e3e016a0e30d9aea60c68", "timestamp": "", "source": "github", "line_count": 196, "max_line_length": 91, "avg_line_length": 32.53061224489796, "alnum_prop": 0.5895545796737767, "repo_name": "4rchenix/paradis", "id": "76b8bf06d964c9e5591330752a9dab1ca0594da4", "size": "6401", "binary": false, "copies": "18", "ref": "refs/heads/master", "path": "discord/colour.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "478968" } ], "symlink_target": "" }
"""Test library of Hidden Linear Function circuits.""" import unittest import numpy as np from qiskit.test.base import QiskitTestCase from qiskit.circuit import QuantumCircuit from qiskit.circuit.exceptions import CircuitError from qiskit.circuit.library import HiddenLinearFunction from qiskit.quantum_info import Operator class TestHiddenLinearFunctionLibrary(QiskitTestCase): """Test library of Hidden Linear Function circuits.""" def assertHLFIsCorrect(self, hidden_function, hlf): """Assert that the HLF circuit produces the correct matrix. Number of qubits is equal to the number of rows (or number of columns) of hidden_function. """ num_qubits = len(hidden_function) hidden_function = np.asarray(hidden_function) simulated = Operator(hlf) expected = np.zeros((2**num_qubits, 2**num_qubits), dtype=complex) for i in range(2**num_qubits): i_qiskit = int(bin(i)[2:].zfill(num_qubits)[::-1], 2) x_vec = np.asarray(list(map(int, bin(i)[2:].zfill(num_qubits)[::-1]))) expected[i_qiskit, i_qiskit] = 1j ** ( np.dot(x_vec.transpose(), np.dot(hidden_function, x_vec)) ) qc = QuantumCircuit(num_qubits) qc.h(range(num_qubits)) qc = Operator(qc) expected = qc.compose(Operator(expected)).compose(qc) self.assertTrue(expected.equiv(simulated)) def test_hlf(self): """Test if the HLF matrix produces the right matrix.""" hidden_function = [[1, 1, 0], [1, 0, 1], [0, 1, 1]] hlf = HiddenLinearFunction(hidden_function) self.assertHLFIsCorrect(hidden_function, hlf) def test_non_symmetric_raises(self): """Test that adjacency matrix is required to be symmetric.""" with self.assertRaises(CircuitError): HiddenLinearFunction([[1, 1, 0], [1, 0, 1], [1, 1, 1]]) if __name__ == "__main__": unittest.main()
{ "content_hash": "cd2865b65708604639dc3205ccfb36e7", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 82, "avg_line_length": 37.0188679245283, "alnum_prop": 0.6416921508664628, "repo_name": "QISKit/qiskit-sdk-py", "id": "c971c560827e310ffc5594096c9a967ce3061bf3", "size": "2446", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "test/python/circuit/library/test_hidden_linear_function.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "2582" }, { "name": "C++", "bytes": "327518" }, { "name": "CMake", "bytes": "19294" }, { "name": "Makefile", "bytes": "5608" }, { "name": "Pascal", "bytes": "2444" }, { "name": "Python", "bytes": "1312801" }, { "name": "Shell", "bytes": "8385" } ], "symlink_target": "" }
"""Tests that walk through Course Builder pages.""" __author__ = 'Mike Gainer (mgainer@google.com)' import cgi import re from common import crypto from common import utils as common_utils from controllers import sites from models import courses from models import models from models import transforms from modules.dashboard import course_settings from modules.dashboard import filer from tests.functional import actions from tests.functional.actions import assert_contains from tests.functional.actions import assert_does_not_contain COURSE_NAME = 'admin_settings' COURSE_TITLE = 'Admin Settings' ADMIN_EMAIL = 'admin@foo.com' NAMESPACE = 'ns_%s' % COURSE_NAME BASE_URL = '/' + COURSE_NAME ADMIN_SETTINGS_URL = '/%s%s' % ( COURSE_NAME, course_settings.HtmlHookRESTHandler.URI) TEXT_ASSET_URL = '/%s%s' % ( COURSE_NAME, filer.TextAssetRESTHandler.URI) STUDENT_EMAIL = 'student@foo.com' SETTINGS_URL = '/%s/dashboard?action=settings&tab=admin_prefs' % COURSE_NAME class AdminSettingsTests(actions.TestBase): def setUp(self): super(AdminSettingsTests, self).setUp() actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL, COURSE_TITLE) actions.login(ADMIN_EMAIL) def test_defaults(self): prefs = models.StudentPreferencesDAO.load_or_create() self.assertEquals(False, prefs.show_hooks) def test_settings_page(self): response = self.get(SETTINGS_URL) self.assertIn('Show hook edit buttons: False', response.body) with common_utils.Namespace(NAMESPACE): prefs = models.StudentPreferencesDAO.load_or_create() prefs.show_hooks = True models.StudentPreferencesDAO.save(prefs) response = self.get(SETTINGS_URL) self.assertIn('Show hook edit buttons: True', response.body) class WelcomePageTests(actions.TestBase): def setUp(self): super(WelcomePageTests, self).setUp() self.auto_deploy = sites.ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE sites.ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE = False def tearDown(self): sites.ApplicationContext.AUTO_DEPLOY_DEFAULT_COURSE = self.auto_deploy super(WelcomePageTests, self).tearDown() def test_welcome_page(self): actions.login(ADMIN_EMAIL, is_admin=True) response = self.get('/') self.assertEqual(response.status_int, 302) self.assertEqual( response.headers['location'], 'http://localhost/admin/welcome') response = self.get('/admin/welcome?action=welcome') assert_contains('Welcome to Course Builder', response.body) assert_contains('/admin/welcome?action=add_first_course', response.body) assert_contains('/admin/welcome?action=explore_sample', response.body) def test_explore_sample_course(self): actions.login(ADMIN_EMAIL, is_admin=True) response = self.post( '/admin/welcome?action=explore_sample', params={'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token( 'explore_sample')}) self.assertEqual(response.status_int, 302) self.assertEqual( response.headers['location'], 'http://localhost/sample/dashboard') response = self.get('/sample/dashboard') assert_contains('Power Searching with Google', response.body) assert_does_not_contain('explore_sample', response.body) def test_create_new_course(self): actions.login(ADMIN_EMAIL, is_admin=True) response = self.post( '/admin/welcome?action=add_first_course', params={'xsrf_token': crypto.XsrfTokenManager.create_xsrf_token( 'add_first_course')}) self.assertEqual(response.status_int, 302) self.assertEqual( response.headers['location'], 'http://localhost/first/dashboard') response = self.get('/first/dashboard') assert_contains('My First Course', response.body) response = self.get('/admin/welcome?action=welcome') assert_does_not_contain('add_first_course', response.body) def test_explore_sample_course_idempotent(self): self.test_explore_sample_course() self.test_explore_sample_course() self.test_create_new_course() response = self.get('/') self.assertEqual(response.status_int, 302) self.assertEqual( response.headers['location'], 'http://localhost/sample/course?use_last_location=true') def test_create_new_course_idempotent(self): self.test_create_new_course() self.test_create_new_course() self.test_explore_sample_course() response = self.get('/') self.assertEqual(response.status_int, 302) self.assertEqual( response.headers['location'], 'http://localhost/first/course?use_last_location=true') class HtmlHookTest(actions.TestBase): def setUp(self): super(HtmlHookTest, self).setUp() context = actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL, COURSE_TITLE) self.course = courses.Course(None, context) actions.login(ADMIN_EMAIL, is_admin=True) self.xsrf_token = crypto.XsrfTokenManager.create_xsrf_token( course_settings.HtmlHookRESTHandler.XSRF_ACTION) def test_hook_edit_button_presence(self): # Turn preference on; expect to see hook editor button with common_utils.Namespace(NAMESPACE): prefs = models.StudentPreferencesDAO.load_or_create() prefs.show_hooks = True models.StudentPreferencesDAO.save(prefs) response = self.get(BASE_URL) self.assertIn('gcb-html-hook-edit', response.body) # Turn preference off; expect editor button not present. with common_utils.Namespace(NAMESPACE): prefs = models.StudentPreferencesDAO.load_or_create() prefs.show_hooks = False models.StudentPreferencesDAO.save(prefs) response = self.get(BASE_URL) self.assertNotIn('gcb-html-hook-edit', response.body) def test_non_admin_permissions_failures(self): actions.login(STUDENT_EMAIL) student_xsrf_token = crypto.XsrfTokenManager.create_xsrf_token( course_settings.HtmlHookRESTHandler.XSRF_ACTION) response = self.get(ADMIN_SETTINGS_URL) self.assertEquals(200, response.status_int) payload = transforms.loads(response.body) self.assertEquals(401, payload['status']) self.assertEquals('Access denied.', payload['message']) response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({ 'key': 'base:after_body_tag_begins', 'xsrf_token': cgi.escape(student_xsrf_token), 'payload': '{}'})}) payload = transforms.loads(response.body) self.assertEquals(401, payload['status']) self.assertEquals('Access denied.', payload['message']) response = self.delete(ADMIN_SETTINGS_URL + '?xsrf_token=' + cgi.escape(student_xsrf_token)) self.assertEquals(200, response.status_int) payload = transforms.loads(response.body) self.assertEquals(401, payload['status']) self.assertEquals('Access denied.', payload['message']) def test_malformed_requests(self): response = self.put(ADMIN_SETTINGS_URL, {}) payload = transforms.loads(response.body) self.assertEquals(400, payload['status']) self.assertEquals('Missing "request" parameter.', payload['message']) response = self.put(ADMIN_SETTINGS_URL, {'request': 'asdfasdf'}) payload = transforms.loads(response.body) self.assertEquals(400, payload['status']) self.assertEquals('Malformed "request" parameter.', payload['message']) response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({ 'xsrf_token': cgi.escape(self.xsrf_token)})}) payload = transforms.loads(response.body) self.assertEquals(400, payload['status']) self.assertEquals('Request missing "key" parameter.', payload['message']) response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({ 'xsrf_token': cgi.escape(self.xsrf_token), 'key': 'base:after_body_tag_begins'})}) payload = transforms.loads(response.body) self.assertEquals(400, payload['status']) self.assertEquals('Request missing "payload" parameter.', payload['message']) response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({ 'xsrf_token': cgi.escape(self.xsrf_token), 'key': 'base:after_body_tag_begins', 'payload': 'asdfsdfasdf'})}) payload = transforms.loads(response.body) self.assertEquals(400, payload['status']) self.assertEquals('Malformed "payload" parameter.', payload['message']) response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({ 'xsrf_token': cgi.escape(self.xsrf_token), 'key': 'base:after_body_tag_begins', 'payload': '{}'})}) payload = transforms.loads(response.body) self.assertEquals(400, payload['status']) self.assertEquals('Payload missing "hook_content" parameter.', payload['message']) def test_get_unknown_hook_content(self): # Should be safe (but unhelpful) to ask for no hook. response = transforms.loads(self.get(ADMIN_SETTINGS_URL).body) payload = transforms.loads(response['payload']) self.assertIsNone(payload['hook_content']) def test_get_defaulted_hook_content(self): url = '%s?key=%s' % ( ADMIN_SETTINGS_URL, cgi.escape('base:after_body_tag_begins')) response = transforms.loads(self.get(url).body) self.assertEquals(200, response['status']) self.assertEquals('Success.', response['message']) payload = transforms.loads(response['payload']) self.assertEquals('<!-- base.after_body_tag_begins -->', payload['hook_content']) def test_page_has_defaulted_hook_content(self): response = self.get(BASE_URL) self.assertIn('<!-- base.after_body_tag_begins -->', response.body) def test_set_hook_content(self): html_text = '<table><tbody><tr><th>;&lt;&gt;</th></tr></tbody></table>' response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({ 'xsrf_token': cgi.escape(self.xsrf_token), 'key': 'base:after_body_tag_begins', 'payload': transforms.dumps( {'hook_content': html_text})})}) self.assertEquals(200, response.status_int) response = transforms.loads(response.body) self.assertEquals(200, response['status']) self.assertEquals('Saved.', response['message']) # And verify that the changed text appears on course pages. # NOTE that text is as-is; no escaping of special HTML # characters should have been done. response = self.get(BASE_URL) self.assertIn(html_text, response.body) def test_delete_default_content_ineffective(self): response = self.get(BASE_URL) self.assertIn('<!-- base.after_body_tag_begins -->', response.body) url = '%s?key=%s&xsrf_token=%s' % ( ADMIN_SETTINGS_URL, cgi.escape('base:after_body_tag_begins'), cgi.escape(self.xsrf_token)) response = transforms.loads(self.delete(url).body) self.assertEquals(200, response['status']) self.assertEquals('Deleted.', response['message']) response = self.get(BASE_URL) self.assertIn('<!-- base.after_body_tag_begins -->', response.body) def test_manipulate_non_default_item(self): html_text = '<table><tbody><tr><th>;&lt;&gt;</th></tr></tbody></table>' new_hook_name = 'html:some_new_hook' # Verify that content prior to setting is blank. url = '%s?key=%s&xsrf_token=%s' % ( ADMIN_SETTINGS_URL, cgi.escape(new_hook_name), cgi.escape(self.xsrf_token)) response = transforms.loads(self.get(url).body) payload = transforms.loads(response['payload']) self.assertIsNone(payload['hook_content']) # Set the content. response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({ 'xsrf_token': cgi.escape(self.xsrf_token), 'key': new_hook_name, 'payload': transforms.dumps( {'hook_content': html_text})})}) self.assertEquals(200, response.status_int) response = transforms.loads(response.body) self.assertEquals(200, response['status']) self.assertEquals('Saved.', response['message']) # Verify that content after setting is as expected url = '%s?key=%s&xsrf_token=%s' % ( ADMIN_SETTINGS_URL, cgi.escape(new_hook_name), cgi.escape(self.xsrf_token)) response = transforms.loads(self.get(url).body) payload = transforms.loads(response['payload']) self.assertEquals(html_text, payload['hook_content']) # Delete the content. response = transforms.loads(self.delete(url).body) self.assertEquals(200, response['status']) self.assertEquals('Deleted.', response['message']) # Verify that content after setting is None. url = '%s?key=%s&xsrf_token=%s' % ( ADMIN_SETTINGS_URL, cgi.escape(new_hook_name), cgi.escape(self.xsrf_token)) response = transforms.loads(self.get(url).body) payload = transforms.loads(response['payload']) self.assertIsNone(payload['hook_content']) def test_add_new_hook_to_page(self): hook_name = 'html:my_new_hook' html_text = '<table><tbody><tr><th>;&lt;&gt;</th></tr></tbody></table>' key = 'views/base.html' url = '%s?key=%s' % ( TEXT_ASSET_URL, cgi.escape(key)) # Get base page template. response = transforms.loads(self.get(url).body) xsrf_token = response['xsrf_token'] payload = transforms.loads(response['payload']) contents = payload['contents'] # Add hook specification to page content. contents = contents.replace( '<body data-gcb-page-locale="{{ page_locale }}">', '<body data-gcb-page-locale="{{ page_locale }}">\n' + '{{ html_hooks.insert(\'%s\') | safe }}' % hook_name) self.put(TEXT_ASSET_URL, {'request': transforms.dumps({ 'xsrf_token': cgi.escape(xsrf_token), 'key': key, 'payload': transforms.dumps({'contents': contents})})}) # Verify that new hook appears on page. response = self.get(BASE_URL) self.assertIn('id="%s"' % re.sub('[^a-zA-Z-]', '-', hook_name), response.body) # Verify that modified hook content appears on page response = self.put(ADMIN_SETTINGS_URL, {'request': transforms.dumps({ 'xsrf_token': cgi.escape(self.xsrf_token), 'key': hook_name, 'payload': transforms.dumps( {'hook_content': html_text})})}) response = self.get(BASE_URL) self.assertIn(html_text, response.body) def test_student_admin_hook_visibility(self): actions.login(STUDENT_EMAIL, is_admin=False) with common_utils.Namespace(NAMESPACE): prefs = models.StudentPreferencesDAO.load_or_create() prefs.show_hooks = True models.StudentPreferencesDAO.save(prefs) response = self.get(BASE_URL) self.assertNotIn('gcb-html-hook-edit', response.body) actions.login(ADMIN_EMAIL, is_admin=True) with common_utils.Namespace(NAMESPACE): prefs = models.StudentPreferencesDAO.load_or_create() prefs.show_hooks = True models.StudentPreferencesDAO.save(prefs) response = self.get(BASE_URL) self.assertIn('gcb-html-hook-edit', response.body) class JinjaContextTest(actions.TestBase): def setUp(self): super(JinjaContextTest, self).setUp() actions.simple_add_course(COURSE_NAME, ADMIN_EMAIL, COURSE_TITLE) actions.login(ADMIN_EMAIL, is_admin=True) def _get_jinja_context_text(self, response): root = self.parse_html_string(response.text) div = root.find('body/div[last()]') return ''.join(div.itertext()) def test_show_jina_context_presence(self): # Turn preference on; expect to see context dump. with common_utils.Namespace(NAMESPACE): prefs = models.StudentPreferencesDAO.load_or_create() prefs.show_jinja_context = True models.StudentPreferencesDAO.save(prefs) self.assertIn('is_read_write_course:', self._get_jinja_context_text(self.get(BASE_URL))) # Turn preference off; expect context dump not present. with common_utils.Namespace(NAMESPACE): prefs = models.StudentPreferencesDAO.load_or_create() prefs.show_jinja_context = False models.StudentPreferencesDAO.save(prefs) self.assertNotIn('is_read_write_course:', self._get_jinja_context_text(self.get(BASE_URL))) def test_student_jinja_context_visibility(self): actions.login(STUDENT_EMAIL, is_admin=False) with common_utils.Namespace(NAMESPACE): prefs = models.StudentPreferencesDAO.load_or_create() prefs.show_jinja_context = True models.StudentPreferencesDAO.save(prefs) self.assertNotIn('is_read_write_course:', self._get_jinja_context_text(self.get(BASE_URL)))
{ "content_hash": "28dd143748478391b7922aefeb17a9d7", "timestamp": "", "source": "github", "line_count": 424, "max_line_length": 80, "avg_line_length": 42.58254716981132, "alnum_prop": 0.6239822763777347, "repo_name": "wavemind/gcb17ml", "id": "efc12927a0226f44a7c51a34790b195d25c73802", "size": "18653", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/functional/admin_settings.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "62209" }, { "name": "JavaScript", "bytes": "425162" }, { "name": "Python", "bytes": "3344249" }, { "name": "Shell", "bytes": "23773" } ], "symlink_target": "" }
from unittest import mock from neutron_lib.api.definitions import port_security from neutron_lib.plugins import constants from neutron_lib.plugins import directory from neutron.db import portsecurity_db as pd from neutron.db import portsecurity_db_common as pdc from neutron.tests import base common = pdc.PortSecurityDbCommon class FakePlugin(pd.PortSecurityDbMixin): supported_extension_aliases = [port_security.ALIAS] class PortSecurityDbMixinTestCase(base.BaseTestCase): def setUp(self): super(PortSecurityDbMixinTestCase, self).setUp() self.plugin = FakePlugin() directory.add_plugin(constants.CORE, self.plugin) @mock.patch.object(common, '_extend_port_security_dict') def test__extend_port_security_dict_relies_on_common(self, extend): response = mock.Mock() dbdata = mock.Mock() self.plugin._extend_port_security_dict(response, dbdata) extend.assert_called_once_with(response, dbdata) @mock.patch.object(common, '_extend_port_security_dict') def test__extend_port_security_dict_ignored_if_extension_disabled(self, extend): response = mock.Mock() dbdata = mock.Mock() self.plugin.supported_extension_aliases = [] self.plugin._extend_port_security_dict(response, dbdata) self.assertFalse(extend.called)
{ "content_hash": "0ef19eaeb2c4453f01069dde89cd19f9", "timestamp": "", "source": "github", "line_count": 40, "max_line_length": 78, "avg_line_length": 35.25, "alnum_prop": 0.6907801418439716, "repo_name": "mahak/neutron", "id": "a89d856518881c0c5a820c50a747c4c58ce30454", "size": "1983", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "neutron/tests/unit/db/test_portsecurity_db.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jinja", "bytes": "2773" }, { "name": "Mako", "bytes": "1047" }, { "name": "Python", "bytes": "15942116" }, { "name": "Ruby", "bytes": "1257" }, { "name": "Shell", "bytes": "83270" } ], "symlink_target": "" }
__author__ = "Sven Kreiss, Kyle Cranmer" __version__ = "0.1" __description__ = """ Creates a counting model using HistFactory for the H2ph, HZZ4l and HWWlvlv channels in ATLAS. The numbers are taken from these papers and conf notes: - http://arxiv.org/pdf/1307.1427v1.pdf (combination paper) - https://cds.cern.ch/record/1523698/files/ATLAS-CONF-2013-012.pdf (2ph conf note) - http://cds.cern.ch/record/1523699/files/ATLAS-CONF-2013-013.pdf (4l conf note) """ import optparse parser = optparse.OptionParser(version=__version__, description=__description__) parser.add_option("-q", "--quiet", dest="verbose", action="store_false", default=True, help="Quiet output.") options,args = parser.parse_args() import os import ROOT ROOT.gROOT.SetBatch( True ) container = [] def factory_2phCombPaper_category( name, nData, nBackground, nGGFTTH, nVBFVH, ggFTheory = 0.08, nJets = 0 ): """ Creates a category with the structure used for 2ph in combination paper table 5. """ channel = ROOT.RooStats.HistFactory.Channel( name ) container.append(channel) channel.SetData(nData) background = ROOT.RooStats.HistFactory.Sample("background") background.SetValue(nBackground) channel.AddSample(background) container.append(background) signalGGFttH = ROOT.RooStats.HistFactory.Sample("signalGGFttH") signalGGFttH.SetValue(nGGFTTH) signalGGFttH.AddNormFactor("mu", 1, 0, 6) signalGGFttH.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGFttH.AddNormFactor("muT_2ph", 1, -5, 10) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) if nJets == 2: signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH2in", 1.0-ggFTheory, 1.0+ggFTheory) signalGGFttH.AddOverallSys("H2ph_deltaPhiModeling", 0.9, 1.1) signalGGFttH.AddOverallSys("H2ph_etaModeling", 0.93, 1.07) if nJets == 2: signalGGFttH.AddOverallSys("UE_2jet", 0.92, 1.08) channel.AddSample(signalGGFttH) container.append(signalGGFttH) signalVBFVH = ROOT.RooStats.HistFactory.Sample("signalVBFVH") signalVBFVH.SetValue(nVBFVH) signalVBFVH.AddNormFactor("mu", 1, 0, 6) signalVBFVH.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBFVH.AddNormFactor("muW_2ph", 1, -5, 10) channel.AddSample(signalVBFVH) container.append(signalVBFVH) return channel # From the combination paper. Problem is that the constraint on ggF is actually very # loose with these numbers (S/B for untagged category is bad). So these categories should # not be used here. Better use the set of functions below for numbers from the 2ph note. def channel_2phCombPaper_looseHighMass2Jets(): return factory_2phCombPaper_category( "H2phLooseHighMass2Jets", 41, 28, 2.3, 2.7, ggFTheory=0.28, nJets=2 ) def channel_2phCombPaper_tightHighMass2Jets(): return factory_2phCombPaper_category( "H2phTightHighMass2Jets", 23, 13, 1.8, 5.9, ggFTheory=0.48, nJets=2 ) def channel_2phCombPaper_lowMass2Jets(): return factory_2phCombPaper_category( "H2phTightLowMass2Jets", 19, 21, 1.5, 1.46, ggFTheory=0.30, nJets=2 ) def channel_2phCombPaper_EtMissSignificance(): return factory_2phCombPaper_category( "H2phEtMissSignificance", 8, 4, 0.14, 1.0 ) def channel_2phCombPaper_lepton(): return factory_2phCombPaper_category( "H2phLepton", 20, 12, 0.50, 2.11 ) def channel_2phCombPaper_untagged(): return factory_2phCombPaper_category( "H2phUntagged", 14248, 13582, 321, 30 ) def factory_2phConfNote_category( name, nData, nBackground, nGGFTTH, nVBFVH, ggFTheory = 0.08, nJets = 0 ): """ Creates a category with the structure used in 2ph conf note in table 1. """ # shrink uncertainties (1.0 for "do nothing") su = 1.0 # adjust for better agreement with published ATLAS result nGGFTTH *= 1.12 nVBFVH *= 1.05 # make sure that the systematics have the same for signal and sideband region. nameSys = name.replace("Side","") channel = ROOT.RooStats.HistFactory.Channel( name ) container.append(channel) channel.SetData(nData) background = ROOT.RooStats.HistFactory.Sample("background") background.SetValue(nBackground) #background.AddOverallSys("H2ph_"+nameSys+"_backgroundModeling", 0.90, 1.10) channel.AddSample(background) container.append(background) signalGGFttH = ROOT.RooStats.HistFactory.Sample("signalGGFttH") signalGGFttH.SetValue(nGGFTTH) signalGGFttH.AddNormFactor("mu", 1, 0, 6) signalGGFttH.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGFttH.AddNormFactor("muT_2ph", 1, -5, 10) signalGGFttH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) if nJets == 2: signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH2in", 1.0 - su*ggFTheory, 1.0 + su*ggFTheory) signalGGFttH.AddOverallSys("H2ph_deltaPhiModeling", 1.0 - su*0.1, 1.0 + su*0.1) signalGGFttH.AddOverallSys("H2ph_etaModeling", 1.0 - su*0.07, 1.0 + su*0.07) if nJets == 2: signalGGFttH.AddOverallSys("UE_2jet", 1.0 - su*0.08, 1.0 + su*0.08) channel.AddSample(signalGGFttH) container.append(signalGGFttH) signalVBFVH = ROOT.RooStats.HistFactory.Sample("signalVBFVH") signalVBFVH.SetValue(nVBFVH) signalVBFVH.AddNormFactor("mu", 1, 0, 6) signalVBFVH.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBFVH.AddNormFactor("muW_2ph", 1, -5, 10) signalVBFVH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) channel.AddSample(signalVBFVH) container.append(signalVBFVH) return channel # Numbers from the 2ph conf note for the untagged categories. Taking the absolute numbers from table 2 # and it's 90%-of-expected-signal-range, but using the fractional breakdown in production modes form table 1. # # For sideband, get expected background from narrow window using the observed events ratio in the # Conv. transition category (because it has the lowest S/B): 1 - 2554/14864 = 0.828 # Or using the unconv central low ptt: 1 - 881/(10900-51.8) = 0.919 def channel_2phConfNote_looseHighMass2Jets(): return factory_2phConfNote_category( "H2phLooseHighMass2Jets", 40, 28, 4.8*0.451, 4.8*(1-0.451), ggFTheory=0.28, nJets=2 ) def channel_2phConfNote_tightHighMass2Jets(): return factory_2phConfNote_category( "H2phTightHighMass2Jets", 24, 13, 7.3*0.238, 7.3*(1-0.238), ggFTheory=0.48, nJets=2 ) def channel_2phConfNote_lowMass2Jets(): return factory_2phConfNote_category( "H2phTightLowMass2Jets", 21, 21, 3.0*0.500, 3.0*(1-0.500), ggFTheory=0.30, nJets=2 ) def channel_2phConfNote_EtMissSignificance(): return factory_2phConfNote_category( "H2phEtMissSignificance", 8, 4, 1.1*0.162, 1.1*(1-0.162) ) def channel_2phConfNote_lepton(): return factory_2phConfNote_category( "H2phLepton", 19, 12, 2.6*0.208, 2.6*(1-0.208) ) def channel_2phConfNote_unconvCentralLowPTt(): return factory_2phConfNote_category( "H2phUnconvCentralLowPTt", 911, 881, 46.6*0.935, 46.6*(1-0.935) ) def channel_2phConfNote_unconvCentralHighPTt(): return factory_2phConfNote_category( "H2phUnconvCentralHighPTt", 49, 44, 7.1*0.807, 7.1*(1-0.807) ) def channel_2phConfNote_unconvRestLowPTt(): return factory_2phConfNote_category( "H2phUnconvRestLowPTt", 4611, 4347, 97.1*0.933, 97.1*(1-0.933) ) def channel_2phConfNote_unconvRestHighPTt(): return factory_2phConfNote_category( "H2phUnconvRestHighPTt", 292, 247, 14.4*0.792, 14.4*(1-0.792) ) def channel_2phConfNote_convCentralLowPTt(): return factory_2phConfNote_category( "H2phConvCentralLowPTt", 722, 687, 29.8*0.938, 29.8*(1-0.938) ) def channel_2phConfNote_convCentralHighPTt(): return factory_2phConfNote_category( "H2phConvCentralHighPTt", 39, 31, 4.6*0.804, 4.6*(1-0.804) ) def channel_2phConfNote_convRestLowPTt(): return factory_2phConfNote_category( "H2phConvRestLowPTt", 4865, 4657, 88.0*0.933, 88.0*(1-0.933) ) def channel_2phConfNote_convRestHighPTt(): return factory_2phConfNote_category( "H2phConvRestHighPTt", 276, 266, 12.9*0.788, 12.9*(1-0.788) ) def channel_2phConfNote_convTransition(): return factory_2phConfNote_category( "H2phConvTransition", 2554, 2499, 36.1*0.909, 36.1*(1-0.909) ) def channel_4lVBFLike(): """ VBF-like category: - 1 observed - 0.71 +/- 0.10 expected signel (60% from VBF), cross check with table 2: 0.43 and 0.28 - S/B = 5 - sys unc from table 4: - theory xs: ggF (21%), VBF (4%), ZZ (35%) - ue: ggF (19%), VBF (4%) - jes: ggF (14%), VBF (10%), ZZ (10%) """ channel = ROOT.RooStats.HistFactory.Channel("HZZ4lVBFLike") container.append(channel) channel.SetData(1.0) signalVBF = ROOT.RooStats.HistFactory.Sample("signalVBF") signalVBF.SetValue(0.71 * 0.6) # 0.71 total signal expected with 60% from VBF signalVBF.AddNormFactor("mu", 1, 0, 6) signalVBF.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBF.AddNormFactor("muW_4l", 1, -5, 10) signalVBF.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalVBF.AddOverallSys("thxs_VBF", 0.96, 1.04) signalVBF.AddOverallSys("UE", 0.96, 1.04) signalVBF.AddOverallSys("JES", 0.90, 1.10) channel.AddSample(signalVBF) container.append(signalVBF) signalGGF = ROOT.RooStats.HistFactory.Sample("signalGGF") signalGGF.SetValue(0.71 * 0.4) # 0.71 total signal expected with 40% from ggF signalGGF.AddNormFactor("mu", 1, 0, 6) signalGGF.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGF.AddNormFactor("muT_4l", 1, -5, 10) signalGGF.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalGGF.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) signalGGF.AddOverallSys("QCDscale_Higgs_ggH2in", 0.79, 1.21) signalGGF.AddOverallSys("UE", 0.81, 1.19) signalGGF.AddOverallSys("JES", 0.86, 1.14) channel.AddSample(signalGGF) container.append(signalGGF) # to optimize agreement with official ATLAS result, remove ZZ bkg completely # ZZ = ROOT.RooStats.HistFactory.Sample("ZZ") # ZZ.SetValue( (0.71-0.65) / 5.0 ) # S/B=5 # channel.AddSample(ZZ) # container.append(ZZ) return channel def channel_4lVHLike(): """ VH-like category: - 0 observed - ggF: 0.06 events, VBF-VH: 0.14 events, ZZ: 0.69 events """ channel = ROOT.RooStats.HistFactory.Channel("HZZ4lVHLike") container.append(channel) channel.SetData(0.0) signalVBF = ROOT.RooStats.HistFactory.Sample("signalVBF") signalVBF.SetValue(0.14) signalVBF.AddNormFactor("mu", 1, 0, 6) signalVBF.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBF.AddNormFactor("muW_4l", 1, -5, 10) signalVBF.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) # signalVBF.AddOverallSys("thxs_VBF", 0.96, 1.04) # signalVBF.AddOverallSys("UE", 0.96, 1.04) # signalVBF.AddOverallSys("JES", 0.90, 1.10) channel.AddSample(signalVBF) container.append(signalVBF) signalGGF = ROOT.RooStats.HistFactory.Sample("signalGGF") signalGGF.SetValue(0.06) signalGGF.AddNormFactor("mu", 1, 0, 6) signalGGF.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGF.AddNormFactor("muT_4l", 1, -5, 10) signalGGF.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalGGF.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) # signalGGF.AddOverallSys("QCDscale_Higgs_ggH2in", 0.79, 1.21) # signalGGF.AddOverallSys("UE", 0.81, 1.19) # signalGGF.AddOverallSys("JES", 0.86, 1.14) channel.AddSample(signalGGF) container.append(signalGGF) ZZ = ROOT.RooStats.HistFactory.Sample("ZZ") ZZ.SetValue( 0.69 ) # ZZ.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) #ZZ.AddOverallSys("JES", 0.90, 1.10) channel.AddSample(ZZ) container.append(ZZ) return channel def channel_4lggFLike_4mu(): """ ggF-like categories: - sys unc from table 1 in combination paper: - QCDscale: ggF (8%), VBF/VH (1%), ttH(+4 -9%) - PDF + alpha_s: ggF (8%), VBF/VH (4%) data from table 7 (combination paper): - 4mu: observed 13, signal 6.3, ZZ 2.8, Z+jets 0.55 Subtracted out the 0.85 expected signal events for VBF and VH categories. """ channel = ROOT.RooStats.HistFactory.Channel("HZZ4lggFLike_4mu") container.append(channel) channel.SetData(13.0) signalGGF = ROOT.RooStats.HistFactory.Sample("signalGGF") signalGGF.SetValue((6.3-0.85-1.0) * 0.92) signalGGF.AddNormFactor("mu", 1, 0, 6) signalGGF.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGF.AddNormFactor("muT_4l", 1, -5, 10) signalGGF.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalGGF.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) signalGGF.AddOverallSys("PDFalphas", 0.92, 1.08) #signalGGF.AddOverallSys("UE", 0.81, 1.19) channel.AddSample(signalGGF) container.append(signalGGF) signalVBF = ROOT.RooStats.HistFactory.Sample("signalVBF") signalVBF.SetValue((6.3-0.85) * 0.08) signalVBF.AddNormFactor("mu", 1, 0, 6) signalVBF.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBF.AddNormFactor("muW_4l", 1, -5, 10) signalVBF.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalVBF.AddOverallSys("PDFalphas", 0.92, 1.08) #signalVBF.AddOverallSys("UE", 0.81, 1.19) channel.AddSample(signalVBF) container.append(signalVBF) ZZ = ROOT.RooStats.HistFactory.Sample("ZZ") ZZ.SetValue( 2.8 ) # ZZ.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) channel.AddSample(ZZ) container.append(ZZ) Zjets = ROOT.RooStats.HistFactory.Sample("Zjets") Zjets.SetValue( 0.55 ) #Zjets.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) # ---- data driven channel.AddSample(Zjets) container.append(Zjets) return channel def channel_4lggFLike_2e2mu(): """ ggF-like categories: - sys unc from table 1 in combination paper: - QCDscale: ggF (8%), VBF/VH (1%), ttH(+4 -9%) - PDF + alpha_s: ggF (8%), VBF/VH (4%) data from table 7 (combination paper): - 2e2mu: observed 13, signal 7.0, ZZ 3.5, Z+jets 2.11 Subtracted out the 0.85 expected signal events for VBF and VH categories. """ channel = ROOT.RooStats.HistFactory.Channel("HZZ4lggFLike_2e2mu") container.append(channel) channel.SetData(13.0) signalGGF = ROOT.RooStats.HistFactory.Sample("signalGGF") signalGGF.SetValue((7.0-0.85-1.0) * 0.92) signalGGF.AddNormFactor("mu", 1, 0, 6) signalGGF.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGF.AddNormFactor("muT_4l", 1, -5, 10) signalGGF.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalGGF.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) signalGGF.AddOverallSys("PDFalphas", 0.92, 1.08) #signalGGF.AddOverallSys("UE", 0.81, 1.19) channel.AddSample(signalGGF) container.append(signalGGF) signalVBF = ROOT.RooStats.HistFactory.Sample("signalVBF") signalVBF.SetValue((7.0-0.85) * 0.08) signalVBF.AddNormFactor("mu", 1, 0, 6) signalVBF.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBF.AddNormFactor("muW_4l", 1, -5, 10) signalVBF.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalVBF.AddOverallSys("PDFalphas", 0.92, 1.08) #signalVBF.AddOverallSys("UE", 0.81, 1.19) channel.AddSample(signalVBF) container.append(signalVBF) ZZ = ROOT.RooStats.HistFactory.Sample("ZZ") ZZ.SetValue( 3.5 ) # ZZ.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) channel.AddSample(ZZ) container.append(ZZ) Zjets = ROOT.RooStats.HistFactory.Sample("Zjets") Zjets.SetValue( 2.11 ) channel.AddSample(Zjets) container.append(Zjets) return channel def channel_4lggFLike_4e(): """ ggF-like categories: - sys unc from table 1 in combination paper: - QCDscale: ggF (8%), VBF/VH (1%), ttH(+4 -9%) - PDF + alpha_s: ggF (8%), VBF/VH (4%) data from table 7 (combination paper): - 4e: observed 6, signal 2.6, ZZ 1.2, Z+jets 1.11 Subtracted out the 0.85 expected signal events for VBF and VH categories. """ channel = ROOT.RooStats.HistFactory.Channel("HZZ4lggFLike_4e") container.append(channel) channel.SetData(6.0) signalGGF = ROOT.RooStats.HistFactory.Sample("signalGGF") signalGGF.SetValue((6.3-0.85-1.0) * 0.92) signalGGF.AddNormFactor("mu", 1, 0, 6) signalGGF.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGF.AddNormFactor("muT_4l", 1, -5, 10) signalGGF.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalGGF.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) signalGGF.AddOverallSys("PDFalphas", 0.92, 1.08) #signalGGF.AddOverallSys("UE", 0.81, 1.19) channel.AddSample(signalGGF) container.append(signalGGF) signalVBF = ROOT.RooStats.HistFactory.Sample("signalVBF") signalVBF.SetValue((6.3-0.85) * 0.08) signalVBF.AddNormFactor("mu", 1, 0, 6) signalVBF.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBF.AddNormFactor("muW_4l", 1, -5, 10) signalVBF.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalVBF.AddOverallSys("PDFalphas", 0.92, 1.08) #signalVBF.AddOverallSys("UE", 0.81, 1.19) channel.AddSample(signalVBF) container.append(signalVBF) ZZ = ROOT.RooStats.HistFactory.Sample("ZZ") ZZ.SetValue( 1.2 ) # ZZ.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) channel.AddSample(ZZ) container.append(ZZ) Zjets = ROOT.RooStats.HistFactory.Sample("Zjets") Zjets.SetValue( 1.11 ) channel.AddSample(Zjets) container.append(Zjets) return channel # From the combination paper: # "The VBF process contributes 2%, # 12% and 81% of the predicted signal in the Njet=0,=1, # and >= 2 final states, respectively" # # The cross sections are more like 87.8% (ggF) versus 12.2% (VBF) which is not used here. def channel_lvlv_0jet(): """ Mostly based on table 8 of the combination paper for the uncertainties and table 9 for the event counts. """ channel = ROOT.RooStats.HistFactory.Channel( "HWWlvlv0Jet" ) container.append(channel) channel.SetData(831) background = ROOT.RooStats.HistFactory.Sample("background") background.SetValue(739*1.02) # background.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) # background.AddOverallSys("JES", 0.98, 1.02) channel.AddSample(background) container.append(background) signalGGFttH = ROOT.RooStats.HistFactory.Sample("signalGGFttH") signalGGFttH.SetValue(100*1.00*0.98) # increase by a factor for better agreement with ATLAS contour signalGGFttH.AddNormFactor("mu", 1, 0, 6) signalGGFttH.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGFttH.AddNormFactor("muT_lvlv", 1, -5, 10) signalGGFttH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH1in", 0.90, 1.10) signalGGFttH.AddOverallSys("QCDscale_Higgs_acceptance", 0.96, 1.04) signalGGFttH.AddOverallSys("UE", 0.97, 1.03) signalGGFttH.AddOverallSys("JES", 0.95, 1.05) channel.AddSample(signalGGFttH) container.append(signalGGFttH) signalVBFVH = ROOT.RooStats.HistFactory.Sample("signalVBFVH") signalVBFVH.SetValue(100*1.000*0.02) # increase by a factor for better agreement with ATLAS contour signalVBFVH.AddNormFactor("mu", 1, 0, 6) signalVBFVH.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBFVH.AddNormFactor("muW_lvlv", 1, -5, 10) signalVBFVH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalVBFVH.AddOverallSys("UE", 0.97, 1.03) signalVBFVH.AddOverallSys("JES", 0.95, 1.05) channel.AddSample(signalVBFVH) container.append(signalVBFVH) return channel def channel_lvlv_1jet(): """ Mostly based on table 8 of the combination paper for the uncertainties and table 9 for the event counts. """ channel = ROOT.RooStats.HistFactory.Channel( "HWWlvlv1Jet" ) container.append(channel) channel.SetData(309) background = ROOT.RooStats.HistFactory.Sample("background") background.SetValue(261*1.02) # background.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) # background.AddOverallSys("JES", 0.97, 1.03) channel.AddSample(background) container.append(background) signalGGFttH = ROOT.RooStats.HistFactory.Sample("signalGGFttH") signalGGFttH.SetValue(41*1.00*0.88) # increase by a factor for better agreement with ATLAS contour signalGGFttH.AddNormFactor("mu", 1, 0, 6) signalGGFttH.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGFttH.AddNormFactor("muT_lvlv", 1, -5, 10) signalGGFttH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH1in", 1.27, 0.77) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH2in", 1.15, 0.85) signalGGFttH.AddOverallSys("QCDscale_Higgs_acceptance", 0.96, 1.04) signalGGFttH.AddOverallSys("UE", 1.10, 0.90) signalGGFttH.AddOverallSys("JES", 0.98, 1.02) channel.AddSample(signalGGFttH) container.append(signalGGFttH) signalVBFVH = ROOT.RooStats.HistFactory.Sample("signalVBFVH") signalVBFVH.SetValue(41*1.000*0.12) # increase by a factor for better agreement with ATLAS contour signalVBFVH.AddNormFactor("mu", 1, 0, 6) signalVBFVH.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBFVH.AddNormFactor("muW_lvlv", 1, -5, 10) signalVBFVH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalVBFVH.AddOverallSys("UE", 1.10, 0.90) signalVBFVH.AddOverallSys("JES", 0.98, 1.02) channel.AddSample(signalVBFVH) container.append(signalVBFVH) return channel def channel_lvlv_2jet(): """ Mostly based on table 8 of the combination paper for the uncertainties and table 9 for the event counts. """ channel = ROOT.RooStats.HistFactory.Channel( "HWWlvlv2Jet" ) container.append(channel) channel.SetData(55) background = ROOT.RooStats.HistFactory.Sample("background") background.SetValue(36*1.1) # background.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) # background.AddOverallSys("JES", 0.93, 1.07) channel.AddSample(background) container.append(background) signalGGFttH = ROOT.RooStats.HistFactory.Sample("signalGGFttH") signalGGFttH.SetValue(10.9*1.00*0.19) # increase by a factor for better agreement with ATLAS contour signalGGFttH.AddNormFactor("mu", 1, 0, 6) signalGGFttH.AddNormFactor("mu_XS8_ggF", 1, -5, 10) signalGGFttH.AddNormFactor("muT_lvlv", 1, -5, 10) signalGGFttH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH", 0.87, 1.13) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH2in", 0.96, 1.04) signalGGFttH.AddOverallSys("QCDscale_Higgs_ggH3in", 0.96, 1.04) signalGGFttH.AddOverallSys("QCDscale_Higgs_acceptance_2jet", 0.97, 1.03) signalGGFttH.AddOverallSys("UE_2jet", 0.95, 1.05) signalGGFttH.AddOverallSys("JES", 0.94, 1.06) channel.AddSample(signalGGFttH) container.append(signalGGFttH) signalVBFVH = ROOT.RooStats.HistFactory.Sample("signalVBFVH") signalVBFVH.SetValue(10.9*1.000*0.81) # increase by a factor for better agreement with ATLAS contour signalVBFVH.AddNormFactor("mu", 1, 0, 6) signalVBFVH.AddNormFactor("mu_XS8_VBF", 1, -5, 10) signalVBFVH.AddNormFactor("muW_lvlv", 1, -5, 10) signalVBFVH.AddOverallSys("ATLAS_LUMI_2012", 1.0-0.036, 1.0+0.036) signalVBFVH.AddOverallSys("UE_2jet", 0.95, 1.05) signalVBFVH.AddOverallSys("JES", 0.94, 1.06) channel.AddSample(signalVBFVH) container.append(signalVBFVH) return channel def makeMeasurement( name="comb", H2ph=True, HZZ4l=True, HWWlvlv=True, outDir="../output/atlas_counting/", prefix="standard" ): meas = ROOT.RooStats.HistFactory.Measurement(name, name) container.append( meas ) meas.SetOutputFilePrefix(outDir+prefix) meas.SetPOI("mu") meas.AddConstantParam("Lumi") # 2ph does not have lumi uncertainty. Need to introduce separate systematics meas.AddConstantParam("mu_XS8_ggF") meas.AddConstantParam("mu_XS8_VBF") meas.AddConstantParam("muT_2ph") meas.AddConstantParam("muW_2ph") meas.AddConstantParam("muT_4l") meas.AddConstantParam("muW_4l") meas.AddConstantParam("muT_lvlv") meas.AddConstantParam("muW_lvlv") meas.SetLumi(1.0) meas.SetLumiRelErr(0.036) meas.SetExportOnly(True) if H2ph: # meas.AddChannel( channel_2phCombPaper_looseHighMass2Jets() ) # meas.AddChannel( channel_2phCombPaper_tightHighMass2Jets() ) # meas.AddChannel( channel_2phCombPaper_lowMass2Jets() ) # meas.AddChannel( channel_2phCombPaper_EtMissSignificance() ) # meas.AddChannel( channel_2phCombPaper_lepton() ) # meas.AddChannel( channel_2phCombPaper_untagged() ) meas.AddChannel( channel_2phConfNote_looseHighMass2Jets() ) meas.AddChannel( channel_2phConfNote_tightHighMass2Jets() ) meas.AddChannel( channel_2phConfNote_lowMass2Jets() ) meas.AddChannel( channel_2phConfNote_EtMissSignificance() ) meas.AddChannel( channel_2phConfNote_lepton() ) meas.AddChannel( channel_2phConfNote_unconvCentralLowPTt() ) meas.AddChannel( channel_2phConfNote_unconvCentralHighPTt() ) meas.AddChannel( channel_2phConfNote_unconvRestLowPTt() ) meas.AddChannel( channel_2phConfNote_unconvRestHighPTt() ) meas.AddChannel( channel_2phConfNote_convCentralLowPTt() ) meas.AddChannel( channel_2phConfNote_convCentralHighPTt() ) meas.AddChannel( channel_2phConfNote_convRestLowPTt() ) meas.AddChannel( channel_2phConfNote_convRestHighPTt() ) meas.AddChannel( channel_2phConfNote_convTransition() ) if HZZ4l: meas.AddChannel( channel_4lVBFLike() ) meas.AddChannel( channel_4lVHLike() ) meas.AddChannel( channel_4lggFLike_4mu() ) meas.AddChannel( channel_4lggFLike_2e2mu() ) meas.AddChannel( channel_4lggFLike_4e() ) if HWWlvlv: meas.AddChannel( channel_lvlv_0jet() ) meas.AddChannel( channel_lvlv_1jet() ) meas.AddChannel( channel_lvlv_2jet() ) #meas.CollectHistograms() meas.PrintTree() print( "Creating output directory." ) os.system( "mkdir -p "+outDir ) meas.PrintXML(outDir+name+"_xml", meas.GetOutputFilePrefix()); ROOT.RooStats.HistFactory.MakeModelAndMeasurementFast(meas); # change from std histfactory naming print("mv "+outDir+"/"+prefix+"_combined_"+name+"_model.root "+outDir+"/"+name+".root") os.system("mv "+outDir+"/"+prefix+"_combined_"+name+"_model.root "+outDir+"/"+name+".root") print( "Done "+name+"." ) if __name__ == "__main__": makeMeasurement( "comb" ) makeMeasurement( "2ph", H2ph=True, HZZ4l=False, HWWlvlv=False ) makeMeasurement( "4l", H2ph=False, HZZ4l=True, HWWlvlv=False ) makeMeasurement( "lvlv", H2ph=False, HZZ4l=False, HWWlvlv=True )
{ "content_hash": "f2ce78c50712134f2e9e12ffd47fe524", "timestamp": "", "source": "github", "line_count": 678, "max_line_length": 127, "avg_line_length": 37.727138643067846, "alnum_prop": 0.731029360021893, "repo_name": "svenkreiss/decouple", "id": "c17d46cf2285dd91fbedf1f651874ca16ba71625", "size": "25637", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ModelGenerators/atlasCountingModel.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "254433" } ], "symlink_target": "" }
from simplekv._compat import BytesIO from simplekv.memory import DictStore from simplekv.decorator import PrefixDecorator import pytest from basic_store import BasicStore class TestPrefixDecorator(BasicStore): @pytest.fixture(params=[ 'short_', 'loooooooooooooooooooooooooooooooooooooooooooooooooooooooooong_', 'nounderscore', '_129073ashd812g', ]) def prefix(self, request): return request.param @pytest.fixture(params=['prefix2_', 'zz', ]) def prefix2(self, request): # these are used when multiple prefixes in a single store # are requested return request.param @pytest.fixture(params=[True, False]) def store(self, request, prefix): base_store = DictStore() # do we add extra keys to the underlying store? if request.param: base_store.put(u'some_other_value', b'data1') base_store.put(u'ends_with_short_', b'data2') base_store.put(u'xx', b'data3') base_store.put(u'test', b'data4') return PrefixDecorator(prefix, base_store) def test_put_returns_correct_key(self, store, prefix, key, value): assert key == store.put(key, value) def test_put_sets_prefix(self, store, prefix, key, value): full_key = prefix + key key == store.put(key, value) assert store._dstore.get(full_key) == value def test_put_file_returns_correct_key(self, store, prefix, key, value): assert key == store.put_file(key, BytesIO(value)) def test_put_file_sets_prefix(self, store, prefix, key, value): full_key = prefix + key key == store.put_file(key, BytesIO(value)) assert store._dstore.get(full_key) == value def test_multiple_prefixes_one_store(self, store, prefix, prefix2, key, value): base_store = store._dstore store2 = PrefixDecorator(prefix2, base_store) pv = value + prefix.encode('ascii') pv2 = value + prefix2.encode('ascii') # put in with each prefix store.put(key, pv) store2.put(key, pv2) assert key in store assert key in store2 assert prefix + key in base_store assert prefix2 + key in base_store assert len(store.keys()) == 1 assert len(store2.keys()) == 1 assert store.get(key) == pv assert store2.get(key) == pv2
{ "content_hash": "3250277f7e513cd6b6395172e96033f1", "timestamp": "", "source": "github", "line_count": 78, "max_line_length": 75, "avg_line_length": 31.333333333333332, "alnum_prop": 0.6145662847790507, "repo_name": "fmarczin/simplekv", "id": "63a9a2cc8669f3b87492eebc468b2aa5b5ae6215", "size": "2444", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/test_prefix_decorator.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "118367" }, { "name": "Shell", "bytes": "247" } ], "symlink_target": "" }
class MidiOutStream: """ MidiOutstream is Basically an eventhandler. It is the most central class in the Midi library. You use it both for writing events to an output stream, and as an event handler for an input stream. This makes it extremely easy to take input from one stream and send it to another. Ie. if you want to read a Midi file, do some processing, and send it to a midiport. All time values are in absolute values from the opening of a stream. To calculate time values, please use the MidiTime and MidiDeltaTime classes. """ def __init__(self): # the time is rather global, so it needs to be stored # here. Otherwise there would be no really simple way to # calculate it. The alternative would be to have each event # handler do it. That sucks even worse! self._absolute_time = 0 self._relative_time = 0 self._current_track = 0 self._running_status = None # time handling event handlers. They should be overwritten with care def update_time(self, new_time=0, relative=1): """ Updates the time, if relative is true, new_time is relative, else it's absolute. """ if relative: self._relative_time = new_time self._absolute_time += new_time else: self._relative_time = new_time - self._absolute_time self._absolute_time = new_time def reset_time(self): """ reset time to 0 """ self._relative_time = 0 self._absolute_time = 0 def rel_time(self): "Returns the relative time" return self._relative_time def abs_time(self): "Returns the absolute time" return self._absolute_time # running status methods def reset_run_stat(self): "Invalidates the running status" self._running_status = None def set_run_stat(self, new_status): "Set the new running status" self._running_status = new_status def get_run_stat(self): "Set the new running status" return self._running_status # track handling event handlers def set_current_track(self, new_track): "Sets the current track number" self._current_track = new_track def get_current_track(self): "Returns the current track number" return self._current_track ##################### ## Midi events def channel_message(self, message_type, channel, data): """The default event handler for channel messages""" pass def note_on(self, channel=0, note=0x40, velocity=0x40): """ channel: 0-15 note, velocity: 0-127 """ pass def note_off(self, channel=0, note=0x40, velocity=0x40): """ channel: 0-15 note, velocity: 0-127 """ pass def aftertouch(self, channel=0, note=0x40, velocity=0x40): """ channel: 0-15 note, velocity: 0-127 """ pass def continuous_controller(self, channel, controller, value): """ channel: 0-15 controller, value: 0-127 """ pass def patch_change(self, channel, patch): """ channel: 0-15 patch: 0-127 """ pass def channel_pressure(self, channel, pressure): """ channel: 0-15 pressure: 0-127 """ pass def pitch_bend(self, channel, value): """ channel: 0-15 value: 0-16383 """ pass ##################### ## System Exclusive def system_exclusive(self, data): """ data: list of values in range(128) """ pass ##################### ## Common events def song_position_pointer(self, value): """ value: 0-16383 """ pass def song_select(self, songNumber): """ songNumber: 0-127 """ pass def tuning_request(self): """ No values passed """ pass def midi_time_code(self, msg_type, values): """ msg_type: 0-7 values: 0-15 """ pass ######################### # header does not really belong here. But anyhoo!!! def header(self, format=0, nTracks=1, division=96): """ format: type of midi file in [1,2] nTracks: number of tracks division: timing division """ pass def eof(self): """ End of file. No more events to be processed. """ pass ##################### ## meta events def meta_event(self, meta_type, data): """ Handles any undefined meta events """ pass def start_of_track(self, n_track=0): """ n_track: number of track """ pass def end_of_track(self): """ n_track: number of track """ pass def sequence_number(self, value): """ value: 0-16383 """ pass def text(self, text): """ Text event text: string """ pass def copyright(self, text): """ Copyright notice text: string """ pass def sequence_name(self, text): """ Sequence/track name text: string """ pass def instrument_name(self, text): """ text: string """ pass def lyric(self, text): """ text: string """ pass def marker(self, text): """ text: string """ pass def cuepoint(self, text): """ text: string """ pass def midi_ch_prefix(self, channel): """ channel: midi channel for subsequent data (deprecated in the spec) """ pass def midi_port(self, value): """ value: Midi port (deprecated in the spec) """ pass def tempo(self, value): """ value: 0-2097151 tempo in us/quarternote (to calculate value from bpm: int(60,000,000.00 / BPM)) """ pass def smtp_offset(self, hour, minute, second, frame, framePart): """ hour, minute, second: 3 bytes specifying the hour (0-23), minutes (0-59) and seconds (0-59), respectively. The hour should be encoded with the SMPTE format, just as it is in MIDI Time Code. frame: A byte specifying the number of frames per second (one of : 24, 25, 29, 30). framePart: A byte specifying the number of fractional frames, in 100ths of a frame (even in SMPTE-based tracks using a different frame subdivision, defined in the MThd chunk). """ pass def time_signature(self, nn, dd, cc, bb): """ nn: Numerator of the signature as notated on sheet music dd: Denominator of the signature as notated on sheet music The denominator is a negative power of 2: 2 = quarter note, 3 = eighth, etc. cc: The number of MIDI clocks in a metronome click bb: The number of notated 32nd notes in a MIDI quarter note (24 MIDI clocks) """ pass def key_signature(self, sf, mi): """ sf: is a byte specifying the number of flats (-ve) or sharps (+ve) that identifies the key signature (-7 = 7 flats, -1 = 1 flat, 0 = key of C, 1 = 1 sharp, etc). mi: is a byte specifying a major (0) or minor (1) key. """ pass def sequencer_specific(self, data): """ data: The data as byte values """ pass ##################### ## realtime events def timing_clock(self): """ No values passed """ pass def song_start(self): """ No values passed """ pass def song_stop(self): """ No values passed """ pass def song_continue(self): """ No values passed """ pass def active_sensing(self): """ No values passed """ pass def system_reset(self): """ No values passed """ pass if __name__ == '__main__': midiOut = MidiOutStream() midiOut.update_time(0,0) midiOut.note_on(0, 63, 127) midiOut.note_off(0, 63, 127)
{ "content_hash": "4039d2767524f78e22adb5830e7fbbfc", "timestamp": "", "source": "github", "line_count": 470, "max_line_length": 74, "avg_line_length": 19.848936170212767, "alnum_prop": 0.47850787865794836, "repo_name": "feynmanliang/bachbot", "id": "c128fa6cbbdd8281a7c90ac00d1c2cdd4f858340", "size": "9359", "binary": false, "copies": "18", "ref": "refs/heads/master", "path": "scripts/rnnrbm/midi/MidiOutStream.py", "mode": "33188", "license": "mit", "language": [ { "name": "Dockerfile", "bytes": "5825" }, { "name": "Lua", "bytes": "47425" }, { "name": "Python", "bytes": "137827" }, { "name": "Shell", "bytes": "7071" } ], "symlink_target": "" }
import sys import urlparse as urlparse_module from urlparse import parse_qs, urlparse, urlunparse import zmq from botocore.session import get_session from cs.eyrie import Vassal, ZMQChannel, script_main from cs.eyrie.interfaces import IKafka from cs.eyrie.transistor import ( CLOSED, TRANSIENT_ERRORS, AsyncSQSClient, PailfileSource, RDKafkaSource, SQSSource, StreamSource, ZMQSource, Gate, Transistor, RDKafkaDrain, SQSDrain, StreamDrain, ZMQDrain, ) try: from hadoop.io import SequenceFile except ImportError: SequenceFile = None from pyramid.path import DottedNameResolver from pyramid.settings import asbool, aslist from tornado import gen from tornado.httpclient import AsyncHTTPClient # http://api.zeromq.org/4-2:zmq-connect ZMQ_TRANSPORTS = { 'epgm', 'inproc', 'ipc', 'pgm', 'tcp', 'vmci', } def _register_scheme(scheme): for method in filter(lambda s: s.startswith('uses_'), dir(urlparse_module)): getattr(urlparse_module, method).append(scheme) class Actuator(Vassal): channels = dict( Vassal.channels, input=ZMQChannel( # This is configured dynamically at runtime endpoint=None, socket_type=zmq.PULL, ), output=ZMQChannel( # This is configured dynamically at runtime endpoint=None, socket_type=zmq.PUSH, ), ) title = "(rf:actuator)" app_name = 'actuator' args = [ ( ('--input',), dict( help="Source to be used as input", required=True, nargs='+', type=urlparse, ) ), ( ('--output',), dict( help="Destination of input data", required=True, type=urlparse, ) ), ( ('--inflight',), dict( help="Maximum number of messages to keep in flight", required=False, default=500, type=int, ) ), ( ('--transducer',), dict( help="Dotted-path to function to transform input messages to output", default='cs.eyrie.transistor.get_last_element', ) ), ( ('--transducer-config',), dict( help="Arguments passed to transducer at startup", default=[], required=False, nargs='*', ) ), ] def __init__(self, **kwargs): kwargs['init_db'] = False kwargs['init_streams'] = False self.streams = {} super(Actuator, self).__init__(**kwargs) self.transistor = self.init_transistor(**kwargs) def init_kafka_drain(self, **kwargs): from confluent_kafka import Producer params = parse_qs(kwargs['output'].query) bootstrap_servers = params['bootstrap_servers'] list_bootstrap_servers = aslist(bootstrap_servers[0].replace(',', ' ')) if len(list_bootstrap_servers) > 1: bootstrap_servers = list_bootstrap_servers else: bootstrap_servers = params['bootstrap_servers'] return RDKafkaDrain( self.logger, self.loop, Producer({ 'api.version.request': True, 'bootstrap.servers': ','.join(bootstrap_servers), 'default.topic.config': {'produce.offset.report': True}, # The lambda is necessary to return control to the main Tornado # thread 'error_cb': lambda err: self.loop.add_callback(self.onKafkaError, err), 'group.id': params['group_name'][-1], # See: https://github.com/edenhill/librdkafka/issues/437 'log.connection.close': False, 'queue.buffering.max.ms': 1000, 'queue.buffering.max.messages': kwargs['inflight'], }), kwargs['output'].netloc, ) def init_kafka_source(self, **kwargs): from confluent_kafka import Consumer params = {} for parsed_url in kwargs['input']: url_params = parse_qs(parsed_url.query) for key, val in url_params.items(): params.setdefault(key, []).extend(val) bootstrap_servers = params['bootstrap_servers'] list_bootstrap_servers = aslist(bootstrap_servers[0].replace(',', ' ')) if len(list_bootstrap_servers) > 1: bootstrap_servers = list_bootstrap_servers else: bootstrap_servers = params['bootstrap_servers'] offset_reset = params.get('offset_reset') if offset_reset: offset_reset = offset_reset[-1] else: offset_reset = 'largest' strategy = params.get('partition_strategy') if strategy: strategy = strategy[-1] else: strategy = 'roundrobin' return RDKafkaSource( self.logger, self.loop, kwargs['gate'], Consumer({ 'api.version.request': True, 'bootstrap.servers': ','.join(bootstrap_servers), #'debug': 'all', 'default.topic.config': { 'auto.offset.reset': offset_reset, 'enable.auto.commit': True, 'offset.store.method': 'broker', 'produce.offset.report': True, }, 'enable.partition.eof': False, # The lambda is necessary to return control to the main Tornado # thread 'error_cb': lambda err: self.loop.add_callback(self.onKafkaError, err), 'group.id': params['group_name'][0], # See: https://github.com/edenhill/librdkafka/issues/437 'log.connection.close': False, 'max.in.flight': kwargs['inflight'], 'partition.assignment.strategy': strategy, 'queue.buffering.max.ms': 1000, }), *[url.netloc for url in kwargs['input']] ) def init_pailfile_source(self, **kwargs): return PailfileSource( self.logger, self.loop, kwargs['gate'], SequenceFile.Reader(kwargs['input'][0].path), ) def _init_sqs_client(self, parsed_url, **kwargs): params = parse_qs(parsed_url.query) session = get_session() queue_url = params.get('queue_url') if queue_url: queue_url = queue_url[-1] else: queue_url = None region = params.get('region') if region: region = region[-1] else: region = None return AsyncSQSClient( session, self.logger, queue_name=parsed_url.netloc, queue_url=queue_url, region=region, http_client=AsyncHTTPClient( self.loop, force_instance=True, defaults=dict( request_timeout=AsyncSQSClient.long_poll_timeout+5, ) ) ) def init_sqs_drain(self, **kwargs): sqs_client = self._init_sqs_client(kwargs['output'], **kwargs) return SQSDrain( self.logger, self.loop, sqs_client, ) def init_sqs_source(self, **kwargs): sqs_client = self._init_sqs_client(kwargs['input'][0], **kwargs) return SQSSource( self.logger, self.loop, kwargs['gate'], sqs_client, ) def init_stream_drain(self, **kwargs): return StreamDrain( self.logger, self.loop, sys.stdout, ) def init_stream_source(self, **kwargs): return StreamSource( self.logger, self.loop, kwargs['gate'], sys.stdin, ) def init_transistor(self, **kwargs): if kwargs['output'].scheme == 'file' and \ kwargs['output'].netloc == '-': del self.channels['output'] drain = self.init_stream_drain(**kwargs) elif kwargs['output'].scheme.lower() in ZMQ_TRANSPORTS: drain = self.init_zmq_drain(**kwargs) elif kwargs['output'].scheme == 'kafka': del self.channels['output'] drain = self.init_kafka_drain(**kwargs) elif kwargs['output'].scheme == 'sqs': del self.channels['output'] drain = self.init_sqs_drain(**kwargs) else: raise ValueError( 'Unsupported drain scheme: {}'.format(kwargs['output'].scheme) ) # The gate "has" a drain; # a source "has" a gate resolver = DottedNameResolver() transducer = resolver.maybe_resolve(kwargs['transducer']) if kwargs['transducer_config']: transducer = transducer(*kwargs['transducer_config']) kwargs['gate'] = Gate( self.logger, self.loop, drain, transducer, ) if not kwargs['input'][0].scheme and kwargs['input'][0].path == '-': del self.channels['input'] source = self.init_stream_source(**kwargs) elif kwargs['input'][0].scheme == 'file': del self.channels['input'] source = self.init_pailfile_source(**kwargs) elif kwargs['input'][0].scheme.lower() in ZMQ_TRANSPORTS: source = self.init_zmq_source(**kwargs) elif kwargs['input'][0].scheme == 'kafka': del self.channels['input'] source = self.init_kafka_source(**kwargs) elif kwargs['input'][0].scheme == 'sqs': del self.channels['input'] source = self.init_sqs_source(**kwargs) else: raise ValueError( 'Unsupported source scheme: {}'.format(kwargs['input'].scheme) ) return Transistor( self.logger, self.loop, kwargs['gate'], source, drain, ) def _init_zmq_socket(self, parsed_url, channel, **kwargs): # Reconstruct ZMQ endpoint, sans query parameters endpoint = urlunparse((parsed_url.scheme, parsed_url.netloc, parsed_url.path, None, None, None)) params = parse_qs(parsed_url.query) bind = params.get('bind') if bind: bind = asbool(bind[0]) else: bind = False socket_type = params.get('socket_type') if socket_type: socket_type = socket_type[0] else: socket_type = kwargs['default_socket_type'] channel = ZMQChannel(**dict( vars(channel), bind=bind, endpoint=endpoint, socket_type=getattr(zmq, socket_type.upper()), )) socket = self.context.socket(channel.socket_type) if channel.socket_type == zmq.SUB: socket.setsockopt(zmq.SUBSCRIBE, '') socket.set_hwm(kwargs['inflight']) if bind: socket.bind(endpoint) else: socket.connect(endpoint) return socket def init_zmq_drain(self, **kwargs): kwargs['default_socket_type'] = 'push' socket = self._init_zmq_socket(kwargs['output'], self.channels['output'], **kwargs) return ZMQDrain( self.logger, self.loop, socket, ) def init_zmq_source(self, **kwargs): kwargs['default_socket_type'] = 'pull' socket = self._init_zmq_socket(kwargs['input'][0], self.channels['input'], **kwargs) return ZMQSource( self.logger, self.loop, kwargs['gate'], socket, ) @gen.coroutine def join(self, timeout=None): yield self.transistor.join(timeout) yield self.terminate() @gen.coroutine def onKafkaError(self, err): if err.code() in TRANSIENT_ERRORS: self.logger.warning('Ignoring: %s', err) else: self.logger.error(err) if IKafka.providedBy(self.transistor.drain): self.transistor.drain.output_error.set() if IKafka.providedBy(self.transistor.source): self.transistor.source.input_error.set() @gen.coroutine def terminate(self): if self.transistor.state != CLOSED: self.transistor.close('Actuator terminating') super(Actuator, self).terminate() def main(): # Execute this before script_main, to avoid polluting on simple module # import but also to be present before argparse does its thing _register_scheme('kafka') _register_scheme('sqs') for scheme in ZMQ_TRANSPORTS: _register_scheme(scheme) actuator = script_main(Actuator, None, start_loop=False) actuator.join() actuator.loop.start() if __name__ == "__main__": main()
{ "content_hash": "95daa0577d167b55e7570b378dd424e4", "timestamp": "", "source": "github", "line_count": 418, "max_line_length": 85, "avg_line_length": 32.46650717703349, "alnum_prop": 0.517500552649031, "repo_name": "CrowdStrike/cs.eyrie", "id": "507d84d21d064e5b790e5a7e01c3a4f77af266af", "size": "13571", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "cs/eyrie/scripts/actuator.py", "mode": "33188", "license": "bsd-2-clause", "language": [ { "name": "Makefile", "bytes": "1273" }, { "name": "Python", "bytes": "223264" } ], "symlink_target": "" }
import os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # sys.path.insert(0, os.path.abspath('.')) # -- General configuration ------------------------------------------------ # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.coverage', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. source_encoding = 'utf-8' # The master toctree document. master_doc = 'index' # General information about the project. project = 'ThriftPy' copyright = '2014, Lx Yu' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '0.1.15' # The full version, including alpha/beta/rc tags. release = '0.1.15' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all # documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output ---------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'default' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # import and set the theme if we're building docs locally if os.environ.get('READTHEDOCS', None) != 'True': html_theme = 'sphinx_rtd_theme' import sphinx_rtd_theme html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". # html_title = None # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = None # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Add any extra paths that contain custom files (such as robots.txt or # .htaccess) here, relative to this directory. These files are copied # directly to the root of the documentation. # html_extra_path = [] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'ThriftPydoc' # -- Options for LaTeX output --------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ ('index', 'ThriftPy.tex', 'ThriftPy Documentation', 'Lx Yu', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- Options for manual page output --------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'thriftpy', 'ThriftPy Documentation', ['Lx Yu'], 1) ] # If true, show URL addresses after external links. # man_show_urls = False # -- Options for Texinfo output ------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description, category) texinfo_documents = [ ('index', 'ThriftPy', 'ThriftPy Documentation', 'Lx Yu', 'ThriftPy', 'Pure python implementation of Thrift python lib', 'Miscellaneous'), ] # Documents to append as an appendix to all manuals. # texinfo_appendices = [] # If false, no module index is generated. # texinfo_domain_indices = True # How to display URL addresses: 'footnote', 'no', or 'inline'. # texinfo_show_urls = 'footnote' # If true, do not generate a @detailmenu in the "Top" node's menu. # texinfo_no_detailmenu = False
{ "content_hash": "634085306f43f19594c06869268da67d", "timestamp": "", "source": "github", "line_count": 252, "max_line_length": 79, "avg_line_length": 31.91269841269841, "alnum_prop": 0.6965928873414573, "repo_name": "itnihao/thriftpy", "id": "35c08a9bd624a0370eb66caab0b5d728dd61cf9d", "size": "8476", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "docs/conf.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "974" }, { "name": "Makefile", "bytes": "7053" }, { "name": "Python", "bytes": "178233" }, { "name": "Shell", "bytes": "374" }, { "name": "Thrift", "bytes": "20845" } ], "symlink_target": "" }
import unittest from gotorrent import tracker from pyactor.context import set_context, create_host, sleep, shutdown class TestTracker(unittest.TestCase): def setUp(self): #Gets executed before every test set_context('green_thread') self.h = create_host() self.t = self.h.spawn('tracker', tracker.Tracker, [2, 'push']) self.t.init_start() def tearDown(self): # Gets executed after every test shutdown() def test_empty_swarm(self): self.assertEqual(self.t.get_peers('tetris'), []) def test_peers(self): #Test that 2 peers annouce themselves correctly self.t.announce('tetris', 'christian') self.t.announce('tetris', 'arnau') #See if there are 2 peers self.assertEqual(len(self.t.get_peers('tetris')), 2) sleep(10) #See if the idle peers are deleted correctly self.assertEqual(self.t.get_peers('tetris'), []) def test_peer_announcing(self): # Tests if peers exist in an existing torrent_hash update its ttl correctly # and if adds peers correctly to an existing torrent_hash self.t.announce('candy', 'arnau') sleep(5) self.t.announce('candy', 'arnau') self.t.announce('candy', 'christian') self.assertTrue( set(self.t.get_peers('candy')) == set(['arnau', 'christian'])) if __name__ == '__main__': suite = unittest.TestLoader().loadTestsFromTestCase(TestStringMethods) unittest.TextTestRunner(verbosity=2).run(suite)
{ "content_hash": "72cf86755d76e772986b654f430739a3", "timestamp": "", "source": "github", "line_count": 46, "max_line_length": 87, "avg_line_length": 33.391304347826086, "alnum_prop": 0.6373697916666666, "repo_name": "christianzanger/sd-gotorrent", "id": "44725e86bdb227d33885ca46a0771b7fbb2e45a5", "size": "1536", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/tracker-test.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "13782" }, { "name": "Shell", "bytes": "49" } ], "symlink_target": "" }
from django.db import models from django.contrib.localflavor.mk.models import ( MKIdentityCardNumberField, MKMunicipalityField, UMCNField) class MKPerson(models.Model): first_name = models.CharField(max_length = 20) last_name = models.CharField(max_length = 20) umcn = UMCNField() id_number = MKIdentityCardNumberField() municipality = MKMunicipalityField(blank = True) municipality_req = MKMunicipalityField(blank = False) class Meta: app_label = 'localflavor'
{ "content_hash": "4603cd819adc5c82e15a527d4f7cb7b7", "timestamp": "", "source": "github", "line_count": 14, "max_line_length": 62, "avg_line_length": 36.142857142857146, "alnum_prop": 0.7312252964426877, "repo_name": "mitsuhiko/django", "id": "b79239a9bfadc180be1f8d7bdf7cff89528839bf", "size": "506", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tests/regressiontests/localflavor/mk/models.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "85678" }, { "name": "Python", "bytes": "7282847" }, { "name": "Shell", "bytes": "4559" } ], "symlink_target": "" }
"""A library with tools that processes simulation data.""" from typing import Sequence, Tuple import numpy as np def coordinates_to_indices( locations: np.ndarray, domain_size: Sequence[float], mesh_size_local: Sequence[int], partition: Sequence[int], halo_width: int, ) -> Tuple[Sequence[int], np.ndarray]: """Finds the indices of the partition and physical locations in each core. This function assumes that all probes are in the same core of partition. Args: locations: The indices of the probe locations. Stored in a 2D array of 3 columns, with the columns being x, y, and z indices, respectively. domain_size: A three-element sequence that stores the physical size of the full domain. mesh_size_local: A length 3 tuple with elements being the number of mesh points in the x, y, and z directions in each core, respectively. Including halos. partition: A length 3 tuple with elements being the number of cores in the x, y, and directions, respectively. halo_width: The number of points contained in the halo layer on each side of the simulation mesh. Returns: A tuple of 2 elements. The first element is a length three sequence that stores the indices of the core in the partition. The second element is a 2D np.array. Each row of the array stores the index of the corresponding point in `locations` that's local to the core. """ # Effective size of the mesh in each core. n = [core_n - 2 * halo_width for core_n in mesh_size_local] # Length of the domain in each core. core_l = [l_i / nc_i for l_i, nc_i in zip(domain_size, partition)] # Grid spacing. h = [ l_i / (n_i * c_i - 1.0) for l_i, c_i, n_i in zip(domain_size, partition, n) ] # Find the indices of the core. Assumes that all probes are in the same # partition. c_indices = [np.int(locations[0][i] // core_l[i]) for i in range(3)] # Finds the indices of the physical coordinates inside the core. indices = np.zeros_like(locations, dtype=np.int) for i in range(3): indices[:, i] = np.array( (locations[:, i] - c_indices[i] * core_l[i]) // h[i] + halo_width, dtype=np.int) return c_indices, indices
{ "content_hash": "5f70e92422705e68580bf85be3009198", "timestamp": "", "source": "github", "line_count": 61, "max_line_length": 80, "avg_line_length": 36.622950819672134, "alnum_prop": 0.6799462846911369, "repo_name": "google-research/swirl-lm", "id": "f298c0703684dfdafe499ebadb7033131e26101b", "size": "2820", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "swirl_lm/utility/post_processing/data_processing.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Jupyter Notebook", "bytes": "19379" }, { "name": "Python", "bytes": "1046433" }, { "name": "Starlark", "bytes": "29" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations def set_null_to_blank(queryset, fields): for element in queryset: for field in fields: value = getattr(element, field) if value is None: setattr(element, field, '') element.save() def run_data_migration(apps, schema_editor): Condition = apps.get_model('conditions', 'Condition') set_null_to_blank(Condition.objects.all(), [ 'uri', 'uri_prefix', 'key', 'comment', 'target_text', ]) class Migration(migrations.Migration): dependencies = [ ('conditions', '0016_meta'), ] operations = [ migrations.RunPython(run_data_migration), ]
{ "content_hash": "b5845d6c1cdd4db3f594fc23c42fbcc7", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 57, "avg_line_length": 21.542857142857144, "alnum_prop": 0.5848806366047745, "repo_name": "rdmorganiser/rdmo", "id": "fcb3e7d16a172e3de031a5459ce7436aec1fef8e", "size": "828", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "rdmo/conditions/migrations/0017_data_migration.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "426256" }, { "name": "JavaScript", "bytes": "110821" }, { "name": "Python", "bytes": "1265092" }, { "name": "SCSS", "bytes": "20373" } ], "symlink_target": "" }
"""Blocking and non-blocking HTTP client interfaces. This module defines a common interface shared by two implementations, ``simple_httpclient`` and ``curl_httpclient``. Applications may either instantiate their chosen implementation class directly or use the `AsyncHTTPClient` class from this module, which selects an implementation that can be overridden with the `AsyncHTTPClient.configure` method. The default implementation is ``simple_httpclient``, and this is expected to be suitable for most users' needs. However, some applications may wish to switch to ``curl_httpclient`` for reasons such as the following: * ``curl_httpclient`` has some features not found in ``simple_httpclient``, including support for HTTP proxies and the ability to use a specified network interface. * ``curl_httpclient`` is more likely to be compatible with sites that are not-quite-compliant with the HTTP spec, or sites that use little-exercised features of HTTP. * ``curl_httpclient`` is faster. * ``curl_httpclient`` was the default prior to Tornado 2.0. Note that if you are using ``curl_httpclient``, it is highly recommended that you use a recent version of ``libcurl`` and ``pycurl``. Currently the minimum supported version is 7.18.2, and the recommended version is 7.21.1 or newer. """ from __future__ import absolute_import, division, print_function, with_statement import functools import time import weakref from tornado.concurrent import TracebackFuture from tornado.escape import utf8 from tornado import httputil, stack_context from tornado.ioloop import IOLoop from tornado.util import Configurable class HTTPClient(object): """A blocking HTTP client. This interface is provided for convenience and testing; most applications that are running an IOLoop will want to use `AsyncHTTPClient` instead. Typical usage looks like this:: http_client = httpclient.HTTPClient() try: response = http_client.fetch("http://www.google.com/") print response.body except httpclient.HTTPError as e: print "Error:", e http_client.close() """ def __init__(self, async_client_class=None, **kwargs): self._io_loop = IOLoop() if async_client_class is None: async_client_class = AsyncHTTPClient self._async_client = async_client_class(self._io_loop, **kwargs) self._closed = False def __del__(self): self.close() def close(self): """Closes the HTTPClient, freeing any resources used.""" if not self._closed: self._async_client.close() self._io_loop.close() self._closed = True def fetch(self, request, **kwargs): """Executes a request, returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` If an error occurs during the fetch, we raise an `HTTPError`. """ response = self._io_loop.run_sync(functools.partial( self._async_client.fetch, request, **kwargs)) response.rethrow() return response class AsyncHTTPClient(Configurable): """An non-blocking HTTP client. Example usage:: def handle_request(response): if response.error: print "Error:", response.error else: print response.body http_client = AsyncHTTPClient() http_client.fetch("http://www.google.com/", handle_request) The constructor for this class is magic in several respects: It actually creates an instance of an implementation-specific subclass, and instances are reused as a kind of pseudo-singleton (one per `.IOLoop`). The keyword argument ``force_instance=True`` can be used to suppress this singleton behavior. Constructor arguments other than ``io_loop`` and ``force_instance`` are deprecated. The implementation subclass as well as arguments to its constructor can be set with the static method `configure()` """ @classmethod def configurable_base(cls): return AsyncHTTPClient @classmethod def configurable_default(cls): from tornado.simple_httpclient import SimpleAsyncHTTPClient return SimpleAsyncHTTPClient @classmethod def _async_clients(cls): attr_name = '_async_client_dict_' + cls.__name__ if not hasattr(cls, attr_name): setattr(cls, attr_name, weakref.WeakKeyDictionary()) return getattr(cls, attr_name) def __new__(cls, io_loop=None, force_instance=False, **kwargs): io_loop = io_loop or IOLoop.current() if io_loop in cls._async_clients() and not force_instance: return cls._async_clients()[io_loop] instance = super(AsyncHTTPClient, cls).__new__(cls, io_loop=io_loop, **kwargs) if not force_instance: cls._async_clients()[io_loop] = instance return instance def initialize(self, io_loop, defaults=None): self.io_loop = io_loop self.defaults = dict(HTTPRequest._DEFAULTS) if defaults is not None: self.defaults.update(defaults) def close(self): """Destroys this HTTP client, freeing any file descriptors used. This method is **not needed in normal use** due to the way that `AsyncHTTPClient` objects are transparently reused. ``close()`` is generally only necessary when either the `.IOLoop` is also being closed, or the ``force_instance=True`` argument was used when creating the `AsyncHTTPClient`. No other methods may be called on the `AsyncHTTPClient` after ``close()``. """ if self._async_clients().get(self.io_loop) is self: del self._async_clients()[self.io_loop] def fetch(self, request, callback=None, **kwargs): """Executes a request, asynchronously returning an `HTTPResponse`. The request may be either a string URL or an `HTTPRequest` object. If it is a string, we construct an `HTTPRequest` using any additional kwargs: ``HTTPRequest(request, **kwargs)`` This method returns a `.Future` whose result is an `HTTPResponse`. The ``Future`` wil raise an `HTTPError` if the request returned a non-200 response code. If a ``callback`` is given, it will be invoked with the `HTTPResponse`. In the callback interface, `HTTPError` is not automatically raised. Instead, you must check the response's ``error`` attribute or call its `~HTTPResponse.rethrow` method. """ if not isinstance(request, HTTPRequest): request = HTTPRequest(url=request, **kwargs) # We may modify this (to add Host, Accept-Encoding, etc), # so make sure we don't modify the caller's object. This is also # where normal dicts get converted to HTTPHeaders objects. request.headers = httputil.HTTPHeaders(request.headers) request = _RequestProxy(request, self.defaults) future = TracebackFuture() if callback is not None: callback = stack_context.wrap(callback) def handle_future(future): exc = future.exception() if isinstance(exc, HTTPError) and exc.response is not None: response = exc.response elif exc is not None: response = HTTPResponse( request, 599, error=exc, request_time=time.time() - request.start_time) else: response = future.result() self.io_loop.add_callback(callback, response) future.add_done_callback(handle_future) def handle_response(response): if response.error: future.set_exception(response.error) else: future.set_result(response) self.fetch_impl(request, handle_response) return future def fetch_impl(self, request, callback): raise NotImplementedError() @classmethod def configure(cls, impl, **kwargs): """Configures the `AsyncHTTPClient` subclass to use. ``AsyncHTTPClient()`` actually creates an instance of a subclass. This method may be called with either a class object or the fully-qualified name of such a class (or ``None`` to use the default, ``SimpleAsyncHTTPClient``) If additional keyword arguments are given, they will be passed to the constructor of each subclass instance created. The keyword argument ``max_clients`` determines the maximum number of simultaneous `~AsyncHTTPClient.fetch()` operations that can execute in parallel on each `.IOLoop`. Additional arguments may be supported depending on the implementation class in use. Example:: AsyncHTTPClient.configure("tornado.curl_httpclient.CurlAsyncHTTPClient") """ super(AsyncHTTPClient, cls).configure(impl, **kwargs) class HTTPRequest(object): """HTTP client request object.""" # Default values for HTTPRequest parameters. # Merged with the values on the request object by AsyncHTTPClient # implementations. _DEFAULTS = dict( connect_timeout=20.0, request_timeout=20.0, follow_redirects=True, max_redirects=5, use_gzip=True, proxy_password='', allow_nonstandard_methods=False, validate_cert=True) def __init__(self, url, method="GET", headers=None, body=None, auth_username=None, auth_password=None, auth_mode=None, connect_timeout=None, request_timeout=None, if_modified_since=None, follow_redirects=None, max_redirects=None, user_agent=None, use_gzip=None, network_interface=None, streaming_callback=None, header_callback=None, prepare_curl_callback=None, proxy_host=None, proxy_port=None, proxy_username=None, proxy_password=None, allow_nonstandard_methods=None, validate_cert=None, ca_certs=None, allow_ipv6=None, client_key=None, client_cert=None): r"""All parameters except ``url`` are optional. :arg string url: URL to fetch :arg string method: HTTP method, e.g. "GET" or "POST" :arg headers: Additional HTTP headers to pass on the request :arg body: HTTP body to pass on the request :type headers: `~tornado.httputil.HTTPHeaders` or `dict` :arg string auth_username: Username for HTTP authentication :arg string auth_password: Password for HTTP authentication :arg string auth_mode: Authentication mode; default is "basic". Allowed values are implementation-defined; ``curl_httpclient`` supports "basic" and "digest"; ``simple_httpclient`` only supports "basic" :arg float connect_timeout: Timeout for initial connection in seconds :arg float request_timeout: Timeout for entire request in seconds :arg if_modified_since: Timestamp for ``If-Modified-Since`` header :type if_modified_since: `datetime` or `float` :arg bool follow_redirects: Should redirects be followed automatically or return the 3xx response? :arg int max_redirects: Limit for ``follow_redirects`` :arg string user_agent: String to send as ``User-Agent`` header :arg bool use_gzip: Request gzip encoding from the server :arg string network_interface: Network interface to use for request. ``curl_httpclient`` only; see note below. :arg callable streaming_callback: If set, ``streaming_callback`` will be run with each chunk of data as it is received, and ``HTTPResponse.body`` and ``HTTPResponse.buffer`` will be empty in the final response. :arg callable header_callback: If set, ``header_callback`` will be run with each header line as it is received (including the first line, e.g. ``HTTP/1.0 200 OK\r\n``, and a final line containing only ``\r\n``. All lines include the trailing newline characters). ``HTTPResponse.headers`` will be empty in the final response. This is most useful in conjunction with ``streaming_callback``, because it's the only way to get access to header data while the request is in progress. :arg callable prepare_curl_callback: If set, will be called with a ``pycurl.Curl`` object to allow the application to make additional ``setopt`` calls. :arg string proxy_host: HTTP proxy hostname. To use proxies, ``proxy_host`` and ``proxy_port`` must be set; ``proxy_username`` and ``proxy_pass`` are optional. Proxies are currently only supported with ``curl_httpclient``. :arg int proxy_port: HTTP proxy port :arg string proxy_username: HTTP proxy username :arg string proxy_password: HTTP proxy password :arg bool allow_nonstandard_methods: Allow unknown values for ``method`` argument? :arg bool validate_cert: For HTTPS requests, validate the server's certificate? :arg string ca_certs: filename of CA certificates in PEM format, or None to use defaults. See note below when used with ``curl_httpclient``. :arg bool allow_ipv6: Use IPv6 when available? Default is false in ``simple_httpclient`` and true in ``curl_httpclient`` :arg string client_key: Filename for client SSL key, if any. See note below when used with ``curl_httpclient``. :arg string client_cert: Filename for client SSL certificate, if any. See note below when used with ``curl_httpclient``. .. note:: When using ``curl_httpclient`` certain options may be inherited by subsequent fetches because ``pycurl`` does not allow them to be cleanly reset. This applies to the ``ca_certs``, ``client_key``, ``client_cert``, and ``network_interface`` arguments. If you use these options, you should pass them on every request (you don't have to always use the same values, but it's not possible to mix requests that specify these options with ones that use the defaults). .. versionadded:: 3.1 The ``auth_mode`` argument. """ if headers is None: headers = httputil.HTTPHeaders() if if_modified_since: headers["If-Modified-Since"] = httputil.format_timestamp( if_modified_since) self.proxy_host = proxy_host self.proxy_port = proxy_port self.proxy_username = proxy_username self.proxy_password = proxy_password self.url = url self.method = method self.headers = headers self.body = utf8(body) self.auth_username = auth_username self.auth_password = auth_password self.auth_mode = auth_mode self.connect_timeout = connect_timeout self.request_timeout = request_timeout self.follow_redirects = follow_redirects self.max_redirects = max_redirects self.user_agent = user_agent self.use_gzip = use_gzip self.network_interface = network_interface self.streaming_callback = stack_context.wrap(streaming_callback) self.header_callback = stack_context.wrap(header_callback) self.prepare_curl_callback = stack_context.wrap(prepare_curl_callback) self.allow_nonstandard_methods = allow_nonstandard_methods self.validate_cert = validate_cert self.ca_certs = ca_certs self.allow_ipv6 = allow_ipv6 self.client_key = client_key self.client_cert = client_cert self.start_time = time.time() class HTTPResponse(object): """HTTP Response object. Attributes: * request: HTTPRequest object * code: numeric HTTP status code, e.g. 200 or 404 * reason: human-readable reason phrase describing the status code (with curl_httpclient, this is a default value rather than the server's actual response) * headers: `tornado.httputil.HTTPHeaders` object * effective_url: final location of the resource after following any redirects * buffer: ``cStringIO`` object for response body * body: response body as string (created on demand from ``self.buffer``) * error: Exception object, if any * request_time: seconds from request start to finish * time_info: dictionary of diagnostic timing information from the request. Available data are subject to change, but currently uses timings available from http://curl.haxx.se/libcurl/c/curl_easy_getinfo.html, plus ``queue``, which is the delay (if any) introduced by waiting for a slot under `AsyncHTTPClient`'s ``max_clients`` setting. """ def __init__(self, request, code, headers=None, buffer=None, effective_url=None, error=None, request_time=None, time_info=None, reason=None): if isinstance(request, _RequestProxy): self.request = request.request else: self.request = request self.code = code self.reason = reason or httputil.responses.get(code, "Unknown") if headers is not None: self.headers = headers else: self.headers = httputil.HTTPHeaders() self.buffer = buffer self._body = None if effective_url is None: self.effective_url = request.url else: self.effective_url = effective_url if error is None: if self.code < 200 or self.code >= 300: self.error = HTTPError(self.code, response=self) else: self.error = None else: self.error = error self.request_time = request_time self.time_info = time_info or {} def _get_body(self): if self.buffer is None: return None elif self._body is None: self._body = self.buffer.getvalue() return self._body body = property(_get_body) def rethrow(self): """If there was an error on the request, raise an `HTTPError`.""" if self.error: raise self.error def __repr__(self): args = ",".join("%s=%r" % i for i in sorted(self.__dict__.items())) return "%s(%s)" % (self.__class__.__name__, args) class HTTPError(Exception): """Exception thrown for an unsuccessful HTTP request. Attributes: * ``code`` - HTTP error integer error code, e.g. 404. Error code 599 is used when no HTTP response was received, e.g. for a timeout. * ``response`` - `HTTPResponse` object, if any. Note that if ``follow_redirects`` is False, redirects become HTTPErrors, and you can look at ``error.response.headers['Location']`` to see the destination of the redirect. """ def __init__(self, code, message=None, response=None): self.code = code message = message or httputil.responses.get(code, "Unknown") self.response = response Exception.__init__(self, "HTTP %d: %s" % (self.code, message)) class _RequestProxy(object): """Combines an object with a dictionary of defaults. Used internally by AsyncHTTPClient implementations. """ def __init__(self, request, defaults): self.request = request self.defaults = defaults def __getattr__(self, name): request_attr = getattr(self.request, name) if request_attr is not None: return request_attr elif self.defaults is not None: return self.defaults.get(name, None) else: return None def main(): from tornado.options import define, options, parse_command_line define("print_headers", type=bool, default=False) define("print_body", type=bool, default=True) define("follow_redirects", type=bool, default=True) define("validate_cert", type=bool, default=True) args = parse_command_line() client = HTTPClient() for arg in args: try: response = client.fetch(arg, follow_redirects=options.follow_redirects, validate_cert=options.validate_cert, ) except HTTPError as e: if e.response is not None: response = e.response else: raise if options.print_headers: print(response.headers) if options.print_body: print(response.body) client.close() if __name__ == "__main__": main()
{ "content_hash": "8a3cb2ff790e4f7776e9deb4dfd8340c", "timestamp": "", "source": "github", "line_count": 520, "max_line_length": 83, "avg_line_length": 40.68269230769231, "alnum_prop": 0.6328527534861735, "repo_name": "bufferx/tornado", "id": "b58a834854936547dbdba904f7697486b113bc52", "size": "21155", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tornado/httpclient.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "1003" }, { "name": "CSS", "bytes": "9014" }, { "name": "JavaScript", "bytes": "6045" }, { "name": "Python", "bytes": "1080288" }, { "name": "Ruby", "bytes": "1889" }, { "name": "Shell", "bytes": "5377" } ], "symlink_target": "" }
from __future__ import annotations import warnings from typing import Any, Iterable, Mapping, Sequence, SupportsAbs from airflow.providers.common.sql.operators.sql import ( SQLCheckOperator, SQLExecuteQueryOperator, SQLIntervalCheckOperator, SQLValueCheckOperator, ) class SnowflakeOperator(SQLExecuteQueryOperator): """ Executes SQL code in a Snowflake database .. seealso:: For more information on how to use this operator, take a look at the guide: :ref:`howto/operator:SnowflakeOperator` :param snowflake_conn_id: Reference to :ref:`Snowflake connection id<howto/connection:snowflake>` :param sql: the SQL code to be executed as a single string, or a list of str (sql statements), or a reference to a template file. Template references are recognized by str ending in '.sql' :param autocommit: if True, each command is automatically committed. (default value: True) :param parameters: (optional) the parameters to render the SQL query with. :param warehouse: name of warehouse (will overwrite any warehouse defined in the connection's extra JSON) :param database: name of database (will overwrite database defined in connection) :param schema: name of schema (will overwrite schema defined in connection) :param role: name of role (will overwrite any role defined in connection's extra JSON) :param authenticator: authenticator for Snowflake. 'snowflake' (default) to use the internal Snowflake authenticator 'externalbrowser' to authenticate using your web browser and Okta, ADFS or any other SAML 2.0-compliant identify provider (IdP) that has been defined for your account 'https://<your_okta_account_name>.okta.com' to authenticate through native Okta. :param session_parameters: You can set session-level parameters at the time you connect to Snowflake """ template_fields: Sequence[str] = ("sql",) template_ext: Sequence[str] = (".sql",) template_fields_renderers = {"sql": "sql"} ui_color = "#ededed" def __init__( self, *, snowflake_conn_id: str = "snowflake_default", warehouse: str | None = None, database: str | None = None, role: str | None = None, schema: str | None = None, authenticator: str | None = None, session_parameters: dict | None = None, **kwargs, ) -> None: if any([warehouse, database, role, schema, authenticator, session_parameters]): hook_params = kwargs.pop("hook_params", {}) kwargs["hook_params"] = { "warehouse": warehouse, "database": database, "role": role, "schema": schema, "authenticator": authenticator, "session_parameters": session_parameters, **hook_params, } super().__init__(conn_id=snowflake_conn_id, **kwargs) warnings.warn( """This class is deprecated. Please use `airflow.providers.common.sql.operators.sql.SQLExecuteQueryOperator`. Also, you can provide `hook_params={'warehouse': <warehouse>, 'database': <database>, 'role': <role>, 'schema': <schema>, 'authenticator': <authenticator>, 'session_parameters': <session_parameters>}`.""", DeprecationWarning, stacklevel=2, ) class SnowflakeCheckOperator(SQLCheckOperator): """ Performs a check against Snowflake. The ``SnowflakeCheckOperator`` expects a sql query that will return a single row. Each value on that first row is evaluated using python ``bool`` casting. If any of the values return ``False`` the check is failed and errors out. Note that Python bool casting evals the following as ``False``: * ``False`` * ``0`` * Empty string (``""``) * Empty list (``[]``) * Empty dictionary or set (``{}``) Given a query like ``SELECT COUNT(*) FROM foo``, it will fail only if the count ``== 0``. You can craft much more complex query that could, for instance, check that the table has the same number of rows as the source table upstream, or that the count of today's partition is greater than yesterday's partition, or that a set of metrics are less than 3 standard deviation for the 7 day average. This operator can be used as a data quality check in your pipeline, and depending on where you put it in your DAG, you have the choice to stop the critical path, preventing from publishing dubious data, or on the side and receive email alerts without stopping the progress of the DAG. :param sql: the SQL code to be executed as a single string, or a list of str (sql statements), or a reference to a template file. Template references are recognized by str ending in '.sql' :param snowflake_conn_id: Reference to :ref:`Snowflake connection id<howto/connection:snowflake>` :param autocommit: if True, each command is automatically committed. (default value: True) :param parameters: (optional) the parameters to render the SQL query with. :param warehouse: name of warehouse (will overwrite any warehouse defined in the connection's extra JSON) :param database: name of database (will overwrite database defined in connection) :param schema: name of schema (will overwrite schema defined in connection) :param role: name of role (will overwrite any role defined in connection's extra JSON) :param authenticator: authenticator for Snowflake. 'snowflake' (default) to use the internal Snowflake authenticator 'externalbrowser' to authenticate using your web browser and Okta, ADFS or any other SAML 2.0-compliant identify provider (IdP) that has been defined for your account 'https://<your_okta_account_name>.okta.com' to authenticate through native Okta. :param session_parameters: You can set session-level parameters at the time you connect to Snowflake """ template_fields: Sequence[str] = ("sql",) template_ext: Sequence[str] = (".sql",) ui_color = "#ededed" def __init__( self, *, sql: str, snowflake_conn_id: str = "snowflake_default", parameters: Iterable | Mapping | None = None, autocommit: bool = True, do_xcom_push: bool = True, warehouse: str | None = None, database: str | None = None, role: str | None = None, schema: str | None = None, authenticator: str | None = None, session_parameters: dict | None = None, **kwargs, ) -> None: super().__init__(sql=sql, **kwargs) self.snowflake_conn_id = snowflake_conn_id self.sql = sql self.autocommit = autocommit self.do_xcom_push = do_xcom_push self.parameters = parameters self.warehouse = warehouse self.database = database self.role = role self.schema = schema self.authenticator = authenticator self.session_parameters = session_parameters self.query_ids: list[str] = [] class SnowflakeValueCheckOperator(SQLValueCheckOperator): """ Performs a simple check using sql code against a specified value, within a certain level of tolerance. :param sql: the sql to be executed :param pass_value: the value to check against :param tolerance: (optional) the tolerance allowed to accept the query as passing :param snowflake_conn_id: Reference to :ref:`Snowflake connection id<howto/connection:snowflake>` :param autocommit: if True, each command is automatically committed. (default value: True) :param parameters: (optional) the parameters to render the SQL query with. :param warehouse: name of warehouse (will overwrite any warehouse defined in the connection's extra JSON) :param database: name of database (will overwrite database defined in connection) :param schema: name of schema (will overwrite schema defined in connection) :param role: name of role (will overwrite any role defined in connection's extra JSON) :param authenticator: authenticator for Snowflake. 'snowflake' (default) to use the internal Snowflake authenticator 'externalbrowser' to authenticate using your web browser and Okta, ADFS or any other SAML 2.0-compliant identify provider (IdP) that has been defined for your account 'https://<your_okta_account_name>.okta.com' to authenticate through native Okta. :param session_parameters: You can set session-level parameters at the time you connect to Snowflake """ def __init__( self, *, sql: str, pass_value: Any, tolerance: Any = None, snowflake_conn_id: str = "snowflake_default", parameters: Iterable | Mapping | None = None, autocommit: bool = True, do_xcom_push: bool = True, warehouse: str | None = None, database: str | None = None, role: str | None = None, schema: str | None = None, authenticator: str | None = None, session_parameters: dict | None = None, **kwargs, ) -> None: super().__init__(sql=sql, pass_value=pass_value, tolerance=tolerance, **kwargs) self.snowflake_conn_id = snowflake_conn_id self.sql = sql self.autocommit = autocommit self.do_xcom_push = do_xcom_push self.parameters = parameters self.warehouse = warehouse self.database = database self.role = role self.schema = schema self.authenticator = authenticator self.session_parameters = session_parameters self.query_ids: list[str] = [] class SnowflakeIntervalCheckOperator(SQLIntervalCheckOperator): """ Checks that the values of metrics given as SQL expressions are within a certain tolerance of the ones from days_back before. This method constructs a query like so :: SELECT {metrics_threshold_dict_key} FROM {table} WHERE {date_filter_column}=<date> :param table: the table name :param days_back: number of days between ds and the ds we want to check against. Defaults to 7 days :param metrics_thresholds: a dictionary of ratios indexed by metrics, for example 'COUNT(*)': 1.5 would require a 50 percent or less difference between the current day, and the prior days_back. :param snowflake_conn_id: Reference to :ref:`Snowflake connection id<howto/connection:snowflake>` :param autocommit: if True, each command is automatically committed. (default value: True) :param parameters: (optional) the parameters to render the SQL query with. :param warehouse: name of warehouse (will overwrite any warehouse defined in the connection's extra JSON) :param database: name of database (will overwrite database defined in connection) :param schema: name of schema (will overwrite schema defined in connection) :param role: name of role (will overwrite any role defined in connection's extra JSON) :param authenticator: authenticator for Snowflake. 'snowflake' (default) to use the internal Snowflake authenticator 'externalbrowser' to authenticate using your web browser and Okta, ADFS or any other SAML 2.0-compliant identify provider (IdP) that has been defined for your account 'https://<your_okta_account_name>.okta.com' to authenticate through native Okta. :param session_parameters: You can set session-level parameters at the time you connect to Snowflake """ def __init__( self, *, table: str, metrics_thresholds: dict, date_filter_column: str = "ds", days_back: SupportsAbs[int] = -7, snowflake_conn_id: str = "snowflake_default", parameters: Iterable | Mapping | None = None, autocommit: bool = True, do_xcom_push: bool = True, warehouse: str | None = None, database: str | None = None, role: str | None = None, schema: str | None = None, authenticator: str | None = None, session_parameters: dict | None = None, **kwargs, ) -> None: super().__init__( table=table, metrics_thresholds=metrics_thresholds, date_filter_column=date_filter_column, days_back=days_back, **kwargs, ) self.snowflake_conn_id = snowflake_conn_id self.autocommit = autocommit self.do_xcom_push = do_xcom_push self.parameters = parameters self.warehouse = warehouse self.database = database self.role = role self.schema = schema self.authenticator = authenticator self.session_parameters = session_parameters self.query_ids: list[str] = []
{ "content_hash": "ff2a857a25ab36418bfb31e5741e7d7e", "timestamp": "", "source": "github", "line_count": 322, "max_line_length": 97, "avg_line_length": 41.01863354037267, "alnum_prop": 0.6466535433070866, "repo_name": "nathanielvarona/airflow", "id": "2546ddfb5eec570c284e8abee35c1c7925c67bd5", "size": "13995", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "airflow/providers/snowflake/operators/snowflake.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "25980" }, { "name": "Dockerfile", "bytes": "70681" }, { "name": "HCL", "bytes": "3786" }, { "name": "HTML", "bytes": "173025" }, { "name": "JavaScript", "bytes": "142848" }, { "name": "Jinja", "bytes": "38895" }, { "name": "Jupyter Notebook", "bytes": "5482" }, { "name": "Mako", "bytes": "1339" }, { "name": "Python", "bytes": "23169682" }, { "name": "R", "bytes": "313" }, { "name": "Shell", "bytes": "211967" }, { "name": "TypeScript", "bytes": "484556" } ], "symlink_target": "" }
import uuid from msrest.pipeline import ClientRawResponse from msrestazure.azure_exceptions import CloudError from msrest.polling import LROPoller, NoPolling from msrestazure.polling.arm_polling import ARMPolling from .. import models class StorageAccountCredentialsOperations(object): """StorageAccountCredentialsOperations operations. :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. :ivar api_version: The API version. Constant value: "2019-03-01". """ models = models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self.api_version = "2019-03-01" self.config = config def list_by_data_box_edge_device( self, device_name, resource_group_name, custom_headers=None, raw=False, **operation_config): """Gets all the storage account credentials in a data box edge/gateway device. :param device_name: The device name. :type device_name: str :param resource_group_name: The resource group name. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: An iterator like instance of StorageAccountCredential :rtype: ~azure.mgmt.edgegateway.models.StorageAccountCredentialPaged[~azure.mgmt.edgegateway.models.StorageAccountCredential] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ def internal_paging(next_link=None, raw=False): if not next_link: # Construct URL url = self.list_by_data_box_edge_device.metadata['url'] path_format_arguments = { 'deviceName': self._serialize.url("device_name", device_name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') else: url = next_link query_parameters = {} # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp return response # Deserialize response deserialized = models.StorageAccountCredentialPaged(internal_paging, self._deserialize.dependencies) if raw: header_dict = {} client_raw_response = models.StorageAccountCredentialPaged(internal_paging, self._deserialize.dependencies, header_dict) return client_raw_response return deserialized list_by_data_box_edge_device.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials'} def get( self, device_name, name, resource_group_name, custom_headers=None, raw=False, **operation_config): """Gets the properties of the specified storage account credential. :param device_name: The device name. :type device_name: str :param name: The storage account credential name. :type name: str :param resource_group_name: The resource group name. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: returns the direct response alongside the deserialized response :param operation_config: :ref:`Operation configuration overrides<msrest:optionsforoperations>`. :return: StorageAccountCredential or ClientRawResponse if raw=true :rtype: ~azure.mgmt.edgegateway.models.StorageAccountCredential or ~msrest.pipeline.ClientRawResponse :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ # Construct URL url = self.get.metadata['url'] path_format_arguments = { 'deviceName': self._serialize.url("device_name", device_name, 'str'), 'name': self._serialize.url("name", name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.get(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('StorageAccountCredential', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} def _create_or_update_initial( self, device_name, name, storage_account_credential, resource_group_name, custom_headers=None, raw=False, **operation_config): # Construct URL url = self.create_or_update.metadata['url'] path_format_arguments = { 'deviceName': self._serialize.url("device_name", device_name, 'str'), 'name': self._serialize.url("name", name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} header_parameters['Accept'] = 'application/json' header_parameters['Content-Type'] = 'application/json; charset=utf-8' if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct body body_content = self._serialize.body(storage_account_credential, 'StorageAccountCredential') # Construct and send request request = self._client.put(url, query_parameters, header_parameters, body_content) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp deserialized = None if response.status_code == 200: deserialized = self._deserialize('StorageAccountCredential', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized def create_or_update( self, device_name, name, storage_account_credential, resource_group_name, custom_headers=None, raw=False, polling=True, **operation_config): """Creates or updates the storage account credential. :param device_name: The device name. :type device_name: str :param name: The storage account credential name. :type name: str :param storage_account_credential: The storage account credential. :type storage_account_credential: ~azure.mgmt.edgegateway.models.StorageAccountCredential :param resource_group_name: The resource group name. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns StorageAccountCredential or ClientRawResponse<StorageAccountCredential> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[~azure.mgmt.edgegateway.models.StorageAccountCredential] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[~azure.mgmt.edgegateway.models.StorageAccountCredential]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._create_or_update_initial( device_name=device_name, name=name, storage_account_credential=storage_account_credential, resource_group_name=resource_group_name, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): deserialized = self._deserialize('StorageAccountCredential', response) if raw: client_raw_response = ClientRawResponse(deserialized, response) return client_raw_response return deserialized lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'} def _delete_initial( self, device_name, name, resource_group_name, custom_headers=None, raw=False, **operation_config): # Construct URL url = self.delete.metadata['url'] path_format_arguments = { 'deviceName': self._serialize.url("device_name", device_name, 'str'), 'name': self._serialize.url("name", name, 'str'), 'subscriptionId': self._serialize.url("self.config.subscription_id", self.config.subscription_id, 'str'), 'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str') } url = self._client.format_url(url, **path_format_arguments) # Construct parameters query_parameters = {} query_parameters['api-version'] = self._serialize.query("self.api_version", self.api_version, 'str') # Construct headers header_parameters = {} if self.config.generate_client_request_id: header_parameters['x-ms-client-request-id'] = str(uuid.uuid1()) if custom_headers: header_parameters.update(custom_headers) if self.config.accept_language is not None: header_parameters['accept-language'] = self._serialize.header("self.config.accept_language", self.config.accept_language, 'str') # Construct and send request request = self._client.delete(url, query_parameters, header_parameters) response = self._client.send(request, stream=False, **operation_config) if response.status_code not in [200, 202, 204]: exp = CloudError(response) exp.request_id = response.headers.get('x-ms-request-id') raise exp if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response def delete( self, device_name, name, resource_group_name, custom_headers=None, raw=False, polling=True, **operation_config): """Deletes the storage account credential. :param device_name: The device name. :type device_name: str :param name: The storage account credential name. :type name: str :param resource_group_name: The resource group name. :type resource_group_name: str :param dict custom_headers: headers that will be added to the request :param bool raw: The poller return type is ClientRawResponse, the direct response alongside the deserialized response :param polling: True for ARMPolling, False for no polling, or a polling object for personal polling strategy :return: An instance of LROPoller that returns None or ClientRawResponse<None> if raw==True :rtype: ~msrestazure.azure_operation.AzureOperationPoller[None] or ~msrestazure.azure_operation.AzureOperationPoller[~msrest.pipeline.ClientRawResponse[None]] :raises: :class:`CloudError<msrestazure.azure_exceptions.CloudError>` """ raw_result = self._delete_initial( device_name=device_name, name=name, resource_group_name=resource_group_name, custom_headers=custom_headers, raw=True, **operation_config ) def get_long_running_output(response): if raw: client_raw_response = ClientRawResponse(None, response) return client_raw_response lro_delay = operation_config.get( 'long_running_operation_timeout', self.config.long_running_operation_timeout) if polling is True: polling_method = ARMPolling(lro_delay, **operation_config) elif polling is False: polling_method = NoPolling() else: polling_method = polling return LROPoller(self._client, raw_result, get_long_running_output, polling_method) delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.DataBoxEdge/dataBoxEdgeDevices/{deviceName}/storageAccountCredentials/{name}'}
{ "content_hash": "3f1183954814e1b2f9705ec9b14196a0", "timestamp": "", "source": "github", "line_count": 354, "max_line_length": 211, "avg_line_length": 47.432203389830505, "alnum_prop": 0.6546959680781371, "repo_name": "Azure/azure-sdk-for-python", "id": "60a5e1ab2367689738c48c05d631e6e3b6e74cff", "size": "17265", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/edgegateway/azure-mgmt-edgegateway/azure/mgmt/edgegateway/operations/storage_account_credentials_operations.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
import http.client, urllib.parse def http_request(url, data = None, method = "GET", headers = {}): parsed = urllib.parse.urlparse(url) scheme, netloc, path = parsed.scheme, parsed.netloc, parsed.path if not method: method = "GET" method = method.upper() if not headers: headers = {} if data: data = urllib.parse.urlencode(data) #data = data.encode("utf-8") if method == "GET": if data: path += "?" + data data = None if not headers: headers = {} if data: headers["Content-Length"] = len(data) headers["Content-Type"] = "application/x-www-form-urlencoded" conn = None if scheme and scheme == "https": conn = http.client.HTTPSConnection(netloc) else: conn = http.client.HTTPConnection(netloc) conn.request(method, path, data, headers) res = conn.getresponse() res_status, res_reason = res.status, res.reason res_body = res.read() res_headers = res.getheaders() conn.close() res_body = res_body.decode("utf-8") return res_body, res_status, res_reason, res_headers def http_head(url, data = None, headers = None): return http_request(url, data, "HEAD", headers) def http_get(url, data = None, headers = None): return http_request(url, data, "GET", headers) def http_post(url, data = None, headers = None): return http_request(url, data, "POST", headers) def http_delete(url, data = None, headers = None): return http_request(url, data, "DELETE", headers) def http_put(url, data = None, headers = None): return http_request(url, data, "PUT", headers) def http_patch(url, data = None, headers = None): return http_request(url, data, "PATCH", headers)
{ "content_hash": "49c32cea3504850d17a311fa5667962f", "timestamp": "", "source": "github", "line_count": 69, "max_line_length": 65, "avg_line_length": 23.681159420289855, "alnum_prop": 0.6707466340269278, "repo_name": "MrVallentin/http_request", "id": "08f98d32f073c8a759a51d5a1b5fc9a27ec1c07c", "size": "1927", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "python/http_request.py", "mode": "33188", "license": "mit", "language": [ { "name": "Java", "bytes": "9531" }, { "name": "JavaScript", "bytes": "2702" }, { "name": "Lua", "bytes": "3209" }, { "name": "PHP", "bytes": "2083" }, { "name": "Python", "bytes": "1927" } ], "symlink_target": "" }
''' Text Input ========== .. versionadded:: 1.0.4 .. image:: images/textinput-mono.jpg .. image:: images/textinput-multi.jpg The :class:`TextInput` widget provides a box for editable plain text. Unicode, multiline, cursor navigation, selection and clipboard features are supported. The :class:`TextInput` uses two different coordinate systems: * (x, y) - coordinates in pixels, mostly used for rendering on screen. * (row, col) - cursor index in characters / lines, used for selection and cursor movement. Usage example ------------- To create a multiline :class:`TextInput` (the 'enter' key adds a new line):: from kivy.uix.textinput import TextInput textinput = TextInput(text='Hello world') To create a singleline :class:`TextInput`, set the :class:`TextInput.multiline` property to False (the 'enter' key will defocus the TextInput and emit an :meth:`TextInput.on_text_validate` event):: def on_enter(instance, value): print('User pressed enter in', instance) textinput = TextInput(text='Hello world', multiline=False) textinput.bind(on_text_validate=on_enter) The textinput's text is stored in its :attr:`TextInput.text` property. To run a callback when the text changes:: def on_text(instance, value): print('The widget', instance, 'have:', value) textinput = TextInput() textinput.bind(text=on_text) You can set the :class:`focus <kivy.uix.behaviors.FocusBehavior>` to a Textinput, meaning that the input box will be highlighted and keyboard focus will be requested:: textinput = TextInput(focus=True) The textinput is defocused if the 'escape' key is pressed, or if another widget requests the keyboard. You can bind a callback to the focus property to get notified of focus changes:: def on_focus(instance, value): if value: print('User focused', instance) else: print('User defocused', instance) textinput = TextInput() textinput.bind(focus=on_focus) See :class:`~kivy.uix.behaviors.FocusBehavior`, from which the :class:`TextInput` inherits, for more details. Selection --------- The selection is automatically updated when the cursor position changes. You can get the currently selected text from the :attr:`TextInput.selection_text` property. Filtering --------- You can control which text can be added to the :class:`TextInput` by overwriting :meth:`TextInput.insert_text`. Every string that is typed, pasted or inserted by any other means into the :class:`TextInput` is passed through this function. By overwriting it you can reject or change unwanted characters. For example, to write only in capitalized characters:: class CapitalInput(TextInput): def insert_text(self, substring, from_undo=False): s = substring.upper() return super(CapitalInput, self).insert_text(s,\ from_undo=from_undo) Or to only allow floats (0 - 9 and a single period):: class FloatInput(TextInput): pat = re.compile('[^0-9]') def insert_text(self, substring, from_undo=False): pat = self.pat if '.' in self.text: s = re.sub(pat, '', substring) else: s = '.'.join([re.sub(pat, '', s) for s in\ substring.split('.', 1)]) return super(FloatInput, self).insert_text(s, from_undo=from_undo) Default shortcuts ----------------- =============== ======================================================== Shortcuts Description --------------- -------------------------------------------------------- Left Move cursor to left Right Move cursor to right Up Move cursor to up Down Move cursor to down Home Move cursor at the beginning of the line End Move cursor at the end of the line PageUp Move cursor to 3 lines before PageDown Move cursor to 3 lines after Backspace Delete the selection or character before the cursor Del Delete the selection of character after the cursor Shift + <dir> Start a text selection. Dir can be Up, Down, Left or Right Control + c Copy selection Control + x Cut selection Control + v Paste clipboard content Control + a Select all the content Control + z undo Control + r redo =============== ======================================================== .. note:: To enable Emacs-style keyboard shortcuts, you can use :class:`~kivy.uix.behaviors.emacs.EmacsBehavior`. ''' __all__ = ('TextInput', ) import re import sys from os import environ from weakref import ref from kivy.animation import Animation from kivy.base import EventLoop from kivy.cache import Cache from kivy.clock import Clock from kivy.config import Config from kivy.metrics import inch from kivy.utils import boundary, platform from kivy.uix.behaviors import FocusBehavior from kivy.core.text import Label, DEFAULT_FONT from kivy.graphics import Color, Rectangle, PushMatrix, PopMatrix, Callback from kivy.graphics.context_instructions import Transform from kivy.graphics.texture import Texture from kivy.uix.widget import Widget from kivy.uix.bubble import Bubble from kivy.uix.behaviors import ButtonBehavior from kivy.uix.image import Image from kivy.properties import StringProperty, NumericProperty, \ BooleanProperty, AliasProperty, \ ListProperty, ObjectProperty, VariableListProperty Cache_register = Cache.register Cache_append = Cache.append Cache_get = Cache.get Cache_remove = Cache.remove Cache_register('textinput.label', timeout=60.) Cache_register('textinput.width', timeout=60.) FL_IS_LINEBREAK = 0x01 FL_IS_WORDBREAK = 0x02 FL_IS_NEWLINE = FL_IS_LINEBREAK | FL_IS_WORDBREAK # late binding Clipboard = None CutBuffer = None MarkupLabel = None _platform = platform # for reloading, we need to keep a list of textinput to retrigger the rendering _textinput_list = [] # cache the result _is_osx = sys.platform == 'darwin' # When we are generating documentation, Config doesn't exist _is_desktop = False if Config: _is_desktop = Config.getboolean('kivy', 'desktop') # register an observer to clear the textinput cache when OpenGL will reload if 'KIVY_DOC' not in environ: def _textinput_clear_cache(*l): Cache_remove('textinput.label') Cache_remove('textinput.width') for wr in _textinput_list[:]: textinput = wr() if textinput is None: _textinput_list.remove(wr) else: textinput._trigger_refresh_text() textinput._refresh_hint_text() from kivy.graphics.context import get_context get_context().add_reload_observer(_textinput_clear_cache, True) class Selector(ButtonBehavior, Image): # Internal class for managing the selection Handles. window = ObjectProperty() target = ObjectProperty() matrix = ObjectProperty() def __init__(self, **kwargs): super(Selector, self).__init__(**kwargs) self.matrix = self.target.get_window_matrix() with self.canvas.before: Callback(self.update_transform) PushMatrix() self.transform = Transform() with self.canvas.after: PopMatrix() def update_transform(self, cb): m = self.target.get_window_matrix() if self.matrix != m: self.matrix = m self.transform.identity() self.transform.transform(self.matrix) def transform_touch(self, touch): matrix = self.matrix.inverse() touch.apply_transform_2d( lambda x, y: matrix.transform_point(x, y, 0)[:2]) def on_touch_down(self, touch): if self.parent is not EventLoop.window: return try: touch.push() self.transform_touch(touch) self._touch_diff = self.top - touch.y if self.collide_point(*touch.pos): FocusBehavior.ignored_touch.append(touch) return super(Selector, self).on_touch_down(touch) finally: touch.pop() class TextInputCutCopyPaste(Bubble): # Internal class used for showing the little bubble popup when # copy/cut/paste happen. textinput = ObjectProperty(None) ''' Holds a reference to the TextInput this Bubble belongs to. ''' but_cut = ObjectProperty(None) but_copy = ObjectProperty(None) but_paste = ObjectProperty(None) but_selectall = ObjectProperty(None) matrix = ObjectProperty(None) _check_parent_ev = None def __init__(self, **kwargs): self.mode = 'normal' super(TextInputCutCopyPaste, self).__init__(**kwargs) self._check_parent_ev = Clock.schedule_interval(self._check_parent, .5) self.matrix = self.textinput.get_window_matrix() with self.canvas.before: Callback(self.update_transform) PushMatrix() self.transform = Transform() with self.canvas.after: PopMatrix() def update_transform(self, cb): m = self.textinput.get_window_matrix() if self.matrix != m: self.matrix = m self.transform.identity() self.transform.transform(self.matrix) def transform_touch(self, touch): matrix = self.matrix.inverse() touch.apply_transform_2d( lambda x, y: matrix.transform_point(x, y, 0)[:2]) def on_touch_down(self, touch): try: touch.push() self.transform_touch(touch) if self.collide_point(*touch.pos): FocusBehavior.ignored_touch.append(touch) return super(TextInputCutCopyPaste, self).on_touch_down(touch) finally: touch.pop() def on_touch_up(self, touch): try: touch.push() self.transform_touch(touch) for child in self.content.children: if ref(child) in touch.grab_list: touch.grab_current = child break return super(TextInputCutCopyPaste, self).on_touch_up(touch) finally: touch.pop() def on_textinput(self, instance, value): global Clipboard if value and not Clipboard and not _is_desktop: value._ensure_clipboard() def _check_parent(self, dt): # this is a prevention to get the Bubble staying on the screen, if the # attached textinput is not on the screen anymore. parent = self.textinput while parent is not None: if parent == parent.parent: break parent = parent.parent if parent is None: self._check_parent_ev.cancel() if self.textinput: self.textinput._hide_cut_copy_paste() def on_parent(self, instance, value): parent = self.textinput mode = self.mode if parent: self.clear_widgets() if mode == 'paste': # show only paste on long touch self.but_selectall.opacity = 1 widget_list = [self.but_selectall, ] if not parent.readonly: widget_list.append(self.but_paste) elif parent.readonly: # show only copy for read only text input widget_list = (self.but_copy, ) else: # normal mode widget_list = (self.but_cut, self.but_copy, self.but_paste) for widget in widget_list: self.add_widget(widget) def do(self, action): textinput = self.textinput if action == 'cut': textinput._cut(textinput.selection_text) elif action == 'copy': textinput.copy() elif action == 'paste': textinput.paste() elif action == 'selectall': textinput.select_all() self.mode = '' anim = Animation(opacity=0, d=.333) anim.bind(on_complete=lambda *args: self.on_parent(self, self.parent)) anim.start(self.but_selectall) return self.hide() def hide(self): parent = self.parent if not parent: return anim = Animation(opacity=0, d=.225) anim.bind(on_complete=lambda *args: parent.remove_widget(self)) anim.start(self) class TextInput(FocusBehavior, Widget): '''TextInput class. See module documentation for more information. :Events: `on_text_validate` Fired only in multiline=False mode when the user hits 'enter'. This will also unfocus the textinput. `on_double_tap` Fired when a double tap happens in the text input. The default behavior selects the text around the cursor position. More info at :meth:`on_double_tap`. `on_triple_tap` Fired when a triple tap happens in the text input. The default behavior selects the line around the cursor position. More info at :meth:`on_triple_tap`. `on_quad_touch` Fired when four fingers are touching the text input. The default behavior selects the whole text. More info at :meth:`on_quad_touch`. .. warning:: When changing a :class:`TextInput` property that requires re-drawing, e.g. modifying the :attr:`text`, the updates occur on the next clock cycle and not instantly. This might cause any changes to the :class:`TextInput` that occur between the modification and the next cycle to be ignored, or to use previous values. For example, after a update to the :attr:`text`, changing the cursor in the same clock frame will move it using the previous text and will likely end up in an incorrect position. The solution is to schedule any updates to occur on the next clock cycle using :meth:`~kivy.clock.ClockBase.schedule_once`. .. Note:: Selection is cancelled when TextInput is focused. If you need to show selection when TextInput is focused, you should delay (use Clock.schedule) the call to the functions for selecting text (select_all, select_text). .. versionchanged:: 1.10.0 `background_disabled_active` has been removed. .. versionchanged:: 1.9.0 :class:`TextInput` now inherits from :class:`~kivy.uix.behaviors.FocusBehavior`. :attr:`~kivy.uix.behaviors.FocusBehavior.keyboard_mode`, :meth:`~kivy.uix.behaviors.FocusBehavior.show_keyboard`, :meth:`~kivy.uix.behaviors.FocusBehavior.hide_keyboard`, :meth:`~kivy.uix.behaviors.FocusBehavior.focus`, and :attr:`~kivy.uix.behaviors.FocusBehavior.input_type` have been removed since they are now inherited from :class:`~kivy.uix.behaviors.FocusBehavior`. .. versionchanged:: 1.7.0 `on_double_tap`, `on_triple_tap` and `on_quad_touch` events added. ''' __events__ = ('on_text_validate', 'on_double_tap', 'on_triple_tap', 'on_quad_touch') def __init__(self, **kwargs): self._update_graphics_ev = Clock.create_trigger( self._update_graphics, -1) self.is_focusable = kwargs.get('is_focusable', True) self._cursor = [0, 0] self._selection = False self._selection_finished = True self._selection_touch = None self.selection_text = u'' self._selection_from = None self._selection_to = None self._selection_callback = None self._handle_left = None self._handle_right = None self._handle_middle = None self._bubble = None self._lines_flags = [] self._lines_labels = [] self._lines_rects = [] self._hint_text_flags = [] self._hint_text_labels = [] self._hint_text_rects = [] self._label_cached = None self._line_options = None self._keyboard_mode = Config.get('kivy', 'keyboard_mode') self._command_mode = False self._command = '' self.reset_undo() self._touch_count = 0 self._ctrl_l = False self._ctrl_r = False self._alt_l = False self._alt_r = False self._refresh_text_from_property_ev = None self._long_touch_ev = None self._do_blink_cursor_ev = Clock.create_trigger( self._do_blink_cursor, .5, interval=True) self._refresh_line_options_ev = None self.interesting_keys = { 8: 'backspace', 13: 'enter', 127: 'del', 271: 'enter', 273: 'cursor_up', 274: 'cursor_down', 275: 'cursor_right', 276: 'cursor_left', 278: 'cursor_home', 279: 'cursor_end', 280: 'cursor_pgup', 281: 'cursor_pgdown', 303: 'shift_L', 304: 'shift_R', 305: 'ctrl_L', 306: 'ctrl_R', 308: 'alt_L', 307: 'alt_R'} super(TextInput, self).__init__(**kwargs) fbind = self.fbind refresh_line_options = self._trigger_refresh_line_options update_text_options = self._update_text_options fbind('font_size', refresh_line_options) fbind('font_name', refresh_line_options) def handle_readonly(instance, value): if value and (not _is_desktop or not self.allow_copy): self.is_focusable = False if (not (value or self.disabled) or _is_desktop and self._keyboard_mode == 'system'): self._editable = True else: self._editable = False fbind('padding', update_text_options) fbind('tab_width', update_text_options) fbind('font_size', update_text_options) fbind('font_name', update_text_options) fbind('size', update_text_options) fbind('password', update_text_options) fbind('password_mask', update_text_options) fbind('pos', self._trigger_update_graphics) fbind('readonly', handle_readonly) fbind('focus', self._on_textinput_focused) handle_readonly(self, self.readonly) handles = self._trigger_position_handles = Clock.create_trigger( self._position_handles) self._trigger_show_handles = Clock.create_trigger( self._show_handles, .05) self._trigger_cursor_reset = Clock.create_trigger( self._reset_cursor_blink) self._trigger_update_cutbuffer = Clock.create_trigger( self._update_cutbuffer) refresh_line_options() self._trigger_refresh_text() fbind('pos', handles) fbind('size', handles) # when the gl context is reloaded, trigger the text rendering again. _textinput_list.append(ref(self, TextInput._reload_remove_observer)) if platform == 'linux': self._ensure_clipboard() def on_text_validate(self): pass def cursor_index(self, cursor=None): '''Return the cursor index in the text/value. ''' if not cursor: cursor = self.cursor try: l = self._lines if len(l) == 0: return 0 lf = self._lines_flags index, cr = cursor for row in range(cr): if row >= len(l): continue index += len(l[row]) if lf[row] & FL_IS_LINEBREAK: index += 1 if lf[cr] & FL_IS_LINEBREAK: index += 1 return index except IndexError: return 0 def cursor_offset(self): '''Get the cursor x offset on the current line. ''' offset = 0 row = self.cursor_row col = self.cursor_col _lines = self._lines if col and row < len(_lines): offset = self._get_text_width( _lines[row][:col], self.tab_width, self._label_cached) return offset def get_cursor_from_index(self, index): '''Return the (row, col) of the cursor from text index. ''' index = boundary(index, 0, len(self.text)) if index <= 0: return 0, 0 lf = self._lines_flags l = self._lines i = 0 for row in range(len(l)): ni = i + len(l[row]) if lf[row] & FL_IS_LINEBREAK: ni += 1 i += 1 if ni >= index: return index - i, row i = ni return index, row def select_text(self, start, end): ''' Select a portion of text displayed in this TextInput. .. versionadded:: 1.4.0 :Parameters: `start` Index of textinput.text from where to start selection `end` Index of textinput.text till which the selection should be displayed ''' if end < start: raise Exception('end must be superior to start') m = len(self.text) self._selection_from = boundary(start, 0, m) self._selection_to = boundary(end, 0, m) self._selection_finished = True self._update_selection(True) self._update_graphics_selection() def select_all(self): ''' Select all of the text displayed in this TextInput. .. versionadded:: 1.4.0 ''' self.select_text(0, len(self.text)) re_indent = re.compile('^(\s*|)') def _auto_indent(self, substring): index = self.cursor_index() _text = self._get_text(encode=False) if index > 0: line_start = _text.rfind('\n', 0, index) if line_start > -1: line = _text[line_start + 1:index] indent = self.re_indent.match(line).group() substring += indent return substring def insert_text(self, substring, from_undo=False): '''Insert new text at the current cursor position. Override this function in order to pre-process text for input validation. ''' if self.readonly or not substring or not self._lines: return if isinstance(substring, bytes): substring = substring.decode('utf8') if self.replace_crlf: substring = substring.replace(u'\r\n', u'\n') self._hide_handles(EventLoop.window) if not from_undo and self.multiline and self.auto_indent \ and substring == u'\n': substring = self._auto_indent(substring) mode = self.input_filter if mode not in (None, 'int', 'float'): substring = mode(substring, from_undo) if not substring: return cc, cr = self.cursor sci = self.cursor_index ci = sci() text = self._lines[cr] len_str = len(substring) new_text = text[:cc] + substring + text[cc:] if mode is not None: if mode == 'int': if not re.match(self._insert_int_pat, new_text): return elif mode == 'float': if not re.match(self._insert_float_pat, new_text): return self._set_line_text(cr, new_text) wrap = (self._get_text_width( new_text, self.tab_width, self._label_cached) > (self.width - self.padding[0] - self.padding[2])) if len_str > 1 or substring == u'\n' or wrap: # Avoid refreshing text on every keystroke. # Allows for faster typing of text when the amount of text in # TextInput gets large. start, finish, lines,\ lineflags, len_lines = self._get_line_from_cursor(cr, new_text) # calling trigger here could lead to wrong cursor positioning # and repeating of text when keys are added rapidly in a automated # fashion. From Android Keyboard for example. self._refresh_text_from_property('insert', start, finish, lines, lineflags, len_lines) self.cursor = self.get_cursor_from_index(ci + len_str) # handle undo and redo self._set_unredo_insert(ci, ci + len_str, substring, from_undo) def _get_line_from_cursor(self, start, new_text): # get current paragraph from cursor position finish = start lines = self._lines linesflags = self._lines_flags if start and not linesflags[start]: start -= 1 new_text = u''.join((lines[start], new_text)) try: while not linesflags[finish + 1]: new_text = u''.join((new_text, lines[finish + 1])) finish += 1 except IndexError: pass lines, lineflags = self._split_smart(new_text) len_lines = max(1, len(lines)) return start, finish, lines, lineflags, len_lines def _set_unredo_insert(self, ci, sci, substring, from_undo): # handle undo and redo if from_undo: return self._undo.append({'undo_command': ('insert', ci, sci), 'redo_command': (ci, substring)}) # reset redo when undo is appended to self._redo = [] def reset_undo(self): '''Reset undo and redo lists from memory. .. versionadded:: 1.3.0 ''' self._redo = self._undo = [] def do_redo(self): '''Do redo operation. .. versionadded:: 1.3.0 This action re-does any command that has been un-done by do_undo/ctrl+z. This function is automatically called when `ctrl+r` keys are pressed. ''' try: x_item = self._redo.pop() undo_type = x_item['undo_command'][0] _get_cusror_from_index = self.get_cursor_from_index if undo_type == 'insert': ci, substring = x_item['redo_command'] self.cursor = _get_cusror_from_index(ci) self.insert_text(substring, True) elif undo_type == 'bkspc': self.cursor = _get_cusror_from_index(x_item['redo_command']) self.do_backspace(from_undo=True) elif undo_type == 'shiftln': direction, rows, cursor = x_item['redo_command'][1:] self._shift_lines(direction, rows, cursor, True) else: # delsel ci, sci = x_item['redo_command'] self._selection_from = ci self._selection_to = sci self._selection = True self.delete_selection(True) self.cursor = _get_cusror_from_index(ci) self._undo.append(x_item) except IndexError: # reached at top of undo list pass def do_undo(self): '''Do undo operation. .. versionadded:: 1.3.0 This action un-does any edits that have been made since the last call to reset_undo(). This function is automatically called when `ctrl+z` keys are pressed. ''' try: x_item = self._undo.pop() undo_type = x_item['undo_command'][0] self.cursor = self.get_cursor_from_index(x_item['undo_command'][1]) if undo_type == 'insert': ci, sci = x_item['undo_command'][1:] self._selection_from = ci self._selection_to = sci self._selection = True self.delete_selection(True) elif undo_type == 'bkspc': substring = x_item['undo_command'][2:][0] self.insert_text(substring, True) elif undo_type == 'shiftln': direction, rows, cursor = x_item['undo_command'][1:] self._shift_lines(direction, rows, cursor, True) else: # delsel substring = x_item['undo_command'][2:][0] self.insert_text(substring, True) self._redo.append(x_item) except IndexError: # reached at top of undo list pass def do_backspace(self, from_undo=False, mode='bkspc'): '''Do backspace operation from the current cursor position. This action might do several things: - removing the current selection if available. - removing the previous char and move the cursor back. - do nothing, if we are at the start. ''' if self.readonly: return cc, cr = self.cursor _lines = self._lines text = _lines[cr] cursor_index = self.cursor_index() text_last_line = _lines[cr - 1] if cc == 0 and cr == 0: return _lines_flags = self._lines_flags start = cr if cc == 0: substring = u'\n' if _lines_flags[cr] else u' ' new_text = text_last_line + text self._set_line_text(cr - 1, new_text) self._delete_line(cr) start = cr - 1 else: # ch = text[cc-1] substring = text[cc - 1] new_text = text[:cc - 1] + text[cc:] self._set_line_text(cr, new_text) # refresh just the current line instead of the whole text start, finish, lines, lineflags, len_lines =\ self._get_line_from_cursor(start, new_text) # avoid trigger refresh, leads to issue with # keys/text send rapidly through code. self._refresh_text_from_property('del', start, finish, lines, lineflags, len_lines) self.cursor = self.get_cursor_from_index(cursor_index - 1) # handle undo and redo self._set_undo_redo_bkspc( cursor_index, cursor_index - 1, substring, from_undo) def _set_undo_redo_bkspc(self, ol_index, new_index, substring, from_undo): # handle undo and redo for backspace if from_undo: return self._undo.append({ 'undo_command': ('bkspc', new_index, substring), 'redo_command': ol_index}) # reset redo when undo is appended to self._redo = [] _re_whitespace = re.compile(r'\s+') def _move_cursor_word_left(self, index=None): pos = index or self.cursor_index() if pos == 0: return self.cursor lines = self._lines col, row = self.get_cursor_from_index(pos) if col == 0: row -= 1 col = len(lines[row]) while True: matches = list(self._re_whitespace.finditer(lines[row], 0, col)) if not matches: if col == 0: if row == 0: return 0, 0 row -= 1 col = len(lines[row]) continue return 0, row match = matches[-1] mpos = match.end() if mpos == col: if len(matches) > 1: match = matches[-2] mpos = match.end() else: if match.start() == 0: if row == 0: return 0, 0 row -= 1 col = len(lines[row]) continue return 0, row col = mpos return col, row def _move_cursor_word_right(self, index=None): pos = index or self.cursor_index() col, row = self.get_cursor_from_index(pos) lines = self._lines mrow = len(lines) - 1 if row == mrow and col == len(lines[row]): return col, row if col == len(lines[row]): row += 1 col = 0 while True: matches = list(self._re_whitespace.finditer(lines[row], col)) if not matches: if col == len(lines[row]): if row == mrow: return col, row row += 1 col = 0 continue return len(lines[row]), row match = matches[0] mpos = match.start() if mpos == col: if len(matches) > 1: match = matches[1] mpos = match.start() else: if match.end() == len(lines[row]): if row == mrow: return col, row row += 1 col = 0 continue return len(lines[row]), row col = mpos return col, row def _expand_range(self, ifrom, ito=None): if ito is None: ito = ifrom rfrom = self.get_cursor_from_index(ifrom)[1] rtcol, rto = self.get_cursor_from_index(ito) rfrom, rto = self._expand_rows(rfrom, rto + 1 if rtcol else rto) return (self.cursor_index((0, rfrom)), self.cursor_index((0, rto))) def _expand_rows(self, rfrom, rto=None): if rto is None or rto == rfrom: rto = rfrom + 1 lines = self._lines flags = list(reversed(self._lines_flags)) while rfrom > 0 and not (flags[rfrom - 1] & FL_IS_NEWLINE): rfrom -= 1 rmax = len(lines) - 1 while 0 < rto < rmax and not (flags[rto - 1] & FL_IS_NEWLINE): rto += 1 return max(0, rfrom), min(rmax, rto) def _shift_lines(self, direction, rows=None, old_cursor=None, from_undo=False): if self._selection_callback: if from_undo: self._selection_callback.cancel() else: return lines = self._lines flags = list(reversed(self._lines_flags)) labels = self._lines_labels rects = self._lines_rects orig_cursor = self.cursor sel = None if old_cursor is not None: self.cursor = old_cursor if not rows: sindex = self.selection_from eindex = self.selection_to if (sindex or eindex) and sindex != eindex: sindex, eindex = tuple(sorted((sindex, eindex))) sindex, eindex = self._expand_range(sindex, eindex) else: sindex, eindex = self._expand_range(self.cursor_index()) srow = self.get_cursor_from_index(sindex)[1] erow = self.get_cursor_from_index(eindex)[1] sel = sindex, eindex if direction < 0 and srow > 0: psrow, perow = self._expand_rows(srow - 1) rows = ((srow, erow), (psrow, perow)) elif direction > 0 and erow < len(lines) - 1: psrow, perow = self._expand_rows(erow) rows = ((srow, erow), (psrow, perow)) if rows: (srow, erow), (psrow, perow) = rows if direction < 0: m1srow, m1erow = psrow, perow m2srow, m2erow = srow, erow cdiff = psrow - perow xdiff = srow - erow else: m1srow, m1erow = srow, erow m2srow, m2erow = psrow, perow cdiff = perow - psrow xdiff = erow - srow self._lines_flags = list(reversed( flags[:m1srow] + flags[m2srow:m2erow] + flags[m1srow:m1erow] + flags[m2erow:])) self._lines = (lines[:m1srow] + lines[m2srow:m2erow] + lines[m1srow:m1erow] + lines[m2erow:]) self._lines_labels = (labels[:m1srow] + labels[m2srow:m2erow] + labels[m1srow:m1erow] + labels[m2erow:]) self._lines_rects = (rects[:m1srow] + rects[m2srow:m2erow] + rects[m1srow:m1erow] + rects[m2erow:]) self._trigger_update_graphics() csrow = srow + cdiff cerow = erow + cdiff sel = (self.cursor_index((0, csrow)), self.cursor_index((0, cerow))) self.cursor = self.cursor_col, self.cursor_row + cdiff if not from_undo: undo_rows = ((srow + cdiff, erow + cdiff), (psrow - xdiff, perow - xdiff)) self._undo.append({ 'undo_command': ('shiftln', direction * -1, undo_rows, self.cursor), 'redo_command': ('shiftln', direction, rows, orig_cursor), }) self._redo = [] if sel: def cb(dt): self.select_text(*sel) self._selection_callback = None self._selection_callback = Clock.schedule_once(cb) def do_cursor_movement(self, action, control=False, alt=False): '''Move the cursor relative to it's current position. Action can be one of : - cursor_left: move the cursor to the left - cursor_right: move the cursor to the right - cursor_up: move the cursor on the previous line - cursor_down: move the cursor on the next line - cursor_home: move the cursor at the start of the current line - cursor_end: move the cursor at the end of current line - cursor_pgup: move one "page" before - cursor_pgdown: move one "page" after In addition, the behavior of certain actions can be modified: - control + cursor_left: move the cursor one word to the left - control + cursor_right: move the cursor one word to the right - control + cursor_up: scroll up one line - control + cursor_down: scroll down one line - control + cursor_home: go to beginning of text - control + cursor_end: go to end of text - alt + cursor_up: shift line(s) up - alt + cursor_down: shift line(s) down .. versionchanged:: 1.9.1 ''' if not self._lines: return pgmove_speed = int(self.height / (self.line_height + self.line_spacing) - 1) col, row = self.cursor if action == 'cursor_up': if self.multiline and control: self.scroll_y = max(0, self.scroll_y - self.line_height) elif not self.readonly and self.multiline and alt: self._shift_lines(-1) return else: row = max(row - 1, 0) col = min(len(self._lines[row]), col) elif action == 'cursor_down': if self.multiline and control: maxy = self.minimum_height - self.height self.scroll_y = max(0, min(maxy, self.scroll_y + self.line_height)) elif not self.readonly and self.multiline and alt: self._shift_lines(1) return else: row = min(row + 1, len(self._lines) - 1) col = min(len(self._lines[row]), col) elif action == 'cursor_left': if not self.password and control: col, row = self._move_cursor_word_left() else: if col == 0: if row: row -= 1 col = len(self._lines[row]) else: col, row = col - 1, row elif action == 'cursor_right': if not self.password and control: col, row = self._move_cursor_word_right() else: if col == len(self._lines[row]): if row < len(self._lines) - 1: col = 0 row += 1 else: col, row = col + 1, row elif action == 'cursor_home': col = 0 if control: row = 0 elif action == 'cursor_end': if control: row = len(self._lines) - 1 col = len(self._lines[row]) elif action == 'cursor_pgup': row = max(0, row - pgmove_speed) col = min(len(self._lines[row]), col) elif action == 'cursor_pgdown': row = min(row + pgmove_speed, len(self._lines) - 1) col = min(len(self._lines[row]), col) self.cursor = (col, row) def get_cursor_from_xy(self, x, y): '''Return the (row, col) of the cursor from an (x, y) position. ''' padding_left = self.padding[0] padding_top = self.padding[1] l = self._lines dy = self.line_height + self.line_spacing cx = x - self.x scrl_y = self.scroll_y scrl_x = self.scroll_x scrl_y = scrl_y / dy if scrl_y > 0 else 0 cy = (self.top - padding_top + scrl_y * dy) - y cy = int(boundary(round(cy / dy - 0.5), 0, len(l) - 1)) _get_text_width = self._get_text_width _tab_width = self.tab_width _label_cached = self._label_cached for i in range(0, len(l[cy])): if _get_text_width(l[cy][:i], _tab_width, _label_cached) + \ _get_text_width(l[cy][i], _tab_width, _label_cached) * 0.6 +\ padding_left > cx + scrl_x: cx = i break return cx, cy # # Selection control # def cancel_selection(self): '''Cancel current selection (if any). ''' self._selection_from = self._selection_to = self.cursor_index() self._selection = False self._selection_finished = True self._selection_touch = None self.selection_text = u'' self._trigger_update_graphics() def delete_selection(self, from_undo=False): '''Delete the current text selection (if any). ''' if self.readonly: return self._hide_handles(EventLoop.window) scrl_x = self.scroll_x scrl_y = self.scroll_y cc, cr = self.cursor if not self._selection: return v = self._get_text(encode=False) a, b = self._selection_from, self._selection_to if a > b: a, b = b, a self.cursor = cursor = self.get_cursor_from_index(a) start = cursor finish = self.get_cursor_from_index(b) cur_line = self._lines[start[1]][:start[0]] +\ self._lines[finish[1]][finish[0]:] lines, lineflags = self._split_smart(cur_line) len_lines = len(lines) if start[1] == finish[1]: self._set_line_text(start[1], cur_line) else: self._refresh_text_from_property('del', start[1], finish[1], lines, lineflags, len_lines) self.scroll_x = scrl_x self.scroll_y = scrl_y # handle undo and redo for delete selection self._set_unredo_delsel(a, b, v[a:b], from_undo) self.cancel_selection() def _set_unredo_delsel(self, a, b, substring, from_undo): # handle undo and redo for backspace if from_undo: return self._undo.append({ 'undo_command': ('delsel', a, substring), 'redo_command': (a, b)}) # reset redo when undo is appended to self._redo = [] def _update_selection(self, finished=False): '''Update selection text and order of from/to if finished is True. Can be called multiple times until finished is True. ''' a, b = self._selection_from, self._selection_to if a > b: a, b = b, a self._selection_finished = finished _selection_text = self._get_text(encode=False)[a:b] self.selection_text = ("" if not self.allow_copy else ((self.password_mask * (b - a)) if self.password else _selection_text)) if not finished: self._selection = True else: self._selection = bool(len(_selection_text)) self._selection_touch = None if a == 0: # update graphics only on new line # allows smoother scrolling, noticeably # faster when dealing with large text. self._update_graphics_selection() # self._trigger_update_graphics() # # Touch control # def long_touch(self, dt): self._long_touch_ev = None if self._selection_to == self._selection_from: pos = self.to_local(*self._long_touch_pos, relative=False) self._show_cut_copy_paste( pos, EventLoop.window, mode='paste') def on_double_tap(self): '''This event is dispatched when a double tap happens inside TextInput. The default behavior is to select the word around the current cursor position. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' ci = self.cursor_index() cc = self.cursor_col line = self._lines[self.cursor_row] len_line = len(line) start = max(0, len(line[:cc]) - line[:cc].rfind(u' ') - 1) end = line[cc:].find(u' ') end = end if end > - 1 else (len_line - cc) Clock.schedule_once(lambda dt: self.select_text(ci - start, ci + end)) def on_triple_tap(self): '''This event is dispatched when a triple tap happens inside TextInput. The default behavior is to select the line around current cursor position. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' ci = self.cursor_index() sindex, eindex = self._expand_range(ci) Clock.schedule_once(lambda dt: self.select_text(sindex, eindex)) def on_quad_touch(self): '''This event is dispatched when four fingers are touching inside TextInput. The default behavior is to select all text. Override this to provide different behavior. Alternatively, you can bind to this event to provide additional functionality. ''' Clock.schedule_once(lambda dt: self.select_all()) def on_touch_down(self, touch): if self.disabled: return touch_pos = touch.pos if not self.collide_point(*touch_pos): return False if super(TextInput, self).on_touch_down(touch): return True if self.focus: self._trigger_cursor_reset() # Check for scroll wheel if 'button' in touch.profile and touch.button.startswith('scroll'): scroll_type = touch.button[6:] if scroll_type == 'down': if self.multiline: if self.scroll_y <= 0: return True self.scroll_y -= self.line_height else: if self.scroll_x <= 0: return True self.scroll_x -= self.line_height if scroll_type == 'up': if self.multiline: if (self._lines_rects[-1].pos[1] > self.y + self.line_height): return True self.scroll_y += self.line_height else: if (self.scroll_x + self.width >= self._lines_rects[-1].texture.size[0]): return True self.scroll_x += self.line_height touch.grab(self) self._touch_count += 1 if touch.is_double_tap: self.dispatch('on_double_tap') if touch.is_triple_tap: self.dispatch('on_triple_tap') if self._touch_count == 4: self.dispatch('on_quad_touch') self._hide_cut_copy_paste(EventLoop.window) # schedule long touch for paste self._long_touch_pos = touch.pos self._long_touch_ev = Clock.schedule_once(self.long_touch, .5) self.cursor = self.get_cursor_from_xy(*touch_pos) if not self._selection_touch: self.cancel_selection() self._selection_touch = touch self._selection_from = self._selection_to = self.cursor_index() self._update_selection() if CutBuffer and 'button' in touch.profile and \ touch.button == 'middle': self.insert_text(CutBuffer.get_cutbuffer()) return True return True def on_touch_move(self, touch): if touch.grab_current is not self: return if not self.focus: touch.ungrab(self) if self._selection_touch is touch: self._selection_touch = None return False if self._selection_touch is touch: self.cursor = self.get_cursor_from_xy(touch.x, touch.y) self._selection_to = self.cursor_index() self._update_selection() return True def on_touch_up(self, touch): if touch.grab_current is not self: return touch.ungrab(self) self._touch_count -= 1 # schedule long touch for paste if self._long_touch_ev is not None: self._long_touch_ev.cancel() self._long_touch_ev = None if not self.focus: return False if self._selection_touch is touch: self._selection_to = self.cursor_index() self._update_selection(True) # show Bubble win = EventLoop.window if self._selection_to != self._selection_from: self._show_cut_copy_paste(touch.pos, win) elif self.use_handles: self._hide_handles() handle_middle = self._handle_middle if handle_middle is None: self._handle_middle = handle_middle = Selector( source=self.handle_image_middle, window=win, target=self, size_hint=(None, None), size=('45dp', '45dp')) handle_middle.bind(on_press=self._handle_pressed, on_touch_move=self._handle_move, on_release=self._handle_released) if not self._handle_middle.parent and self.text: EventLoop.window.add_widget(handle_middle, canvas='after') self._position_handles(mode='middle') return True def _handle_pressed(self, instance): self._hide_cut_copy_paste() sf, st = self._selection_from, self.selection_to if sf > st: self._selection_from, self._selection_to = st, sf def _handle_released(self, instance): sf, st = self._selection_from, self.selection_to if sf == st: return self._update_selection() self._show_cut_copy_paste( (instance.right if instance is self._handle_left else instance.x, instance.top + self.line_height), EventLoop.window) def _handle_move(self, instance, touch): if touch.grab_current != instance: return get_cursor = self.get_cursor_from_xy handle_right = self._handle_right handle_left = self._handle_left handle_middle = self._handle_middle try: touch.push() touch.apply_transform_2d(self.to_widget) x, y = touch.pos finally: touch.pop() cursor = get_cursor( x, y + instance._touch_diff + (self.line_height / 2)) if instance != touch.grab_current: return if instance == handle_middle: self.cursor = cursor self._position_handles(mode='middle') return ci = self.cursor_index(cursor=cursor) sf, st = self._selection_from, self.selection_to if instance == handle_left: self._selection_from = ci elif instance == handle_right: self._selection_to = ci self._trigger_update_graphics() self._trigger_position_handles() def _position_handles(self, *args, **kwargs): if not self.text: return mode = kwargs.get('mode', 'both') lh = self.line_height handle_middle = self._handle_middle if handle_middle: hp_mid = self.cursor_pos pos = self.to_local(*hp_mid, relative=True) handle_middle.x = pos[0] - handle_middle.width / 2 handle_middle.top = pos[1] - lh if mode[0] == 'm': return group = self.canvas.get_group('selection') if not group: return EventLoop.window.remove_widget(self._handle_middle) handle_left = self._handle_left if not handle_left: return hp_left = group[2].pos handle_left.pos = self.to_local(*hp_left, relative=True) handle_left.x -= handle_left.width handle_left.y -= handle_left.height handle_right = self._handle_right last_rect = group[-1] hp_right = last_rect.pos[0], last_rect.pos[1] x, y = self.to_local(*hp_right, relative=True) handle_right.x = x + last_rect.size[0] handle_right.y = y - handle_right.height def _hide_handles(self, win=None): win = win or EventLoop.window if win is None: return win.remove_widget(self._handle_right) win.remove_widget(self._handle_left) win.remove_widget(self._handle_middle) def _show_handles(self, dt): if not self.use_handles or not self.text: return win = EventLoop.window handle_right = self._handle_right handle_left = self._handle_left if self._handle_left is None: self._handle_left = handle_left = Selector( source=self.handle_image_left, target=self, window=win, size_hint=(None, None), size=('45dp', '45dp')) handle_left.bind(on_press=self._handle_pressed, on_touch_move=self._handle_move, on_release=self._handle_released) self._handle_right = handle_right = Selector( source=self.handle_image_right, target=self, window=win, size_hint=(None, None), size=('45dp', '45dp')) handle_right.bind(on_press=self._handle_pressed, on_touch_move=self._handle_move, on_release=self._handle_released) else: if self._handle_left.parent: self._position_handles() return if not self.parent: return self._trigger_position_handles() if self.selection_from != self.selection_to: self._handle_left.opacity = self._handle_right.opacity = 0 win.add_widget(self._handle_left, canvas='after') win.add_widget(self._handle_right, canvas='after') anim = Animation(opacity=1, d=.4) anim.start(self._handle_right) anim.start(self._handle_left) def _show_cut_copy_paste(self, pos, win, parent_changed=False, mode='', pos_in_window=False, *l): # Show a bubble with cut copy and paste buttons if not self.use_bubble: return bubble = self._bubble if bubble is None: self._bubble = bubble = TextInputCutCopyPaste(textinput=self) self.fbind('parent', self._show_cut_copy_paste, pos, win, True) win.bind( size=lambda *args: self._hide_cut_copy_paste(win)) self.bind(cursor_pos=lambda *args: self._hide_cut_copy_paste(win)) else: win.remove_widget(bubble) if not self.parent: return if parent_changed: return # Search the position from the touch to the window lh, ls = self.line_height, self.line_spacing x, y = pos t_pos = (x, y) if pos_in_window else self.to_window(x, y) bubble_size = bubble.size bubble_hw = bubble_size[0] / 2. win_size = win.size bubble_pos = (t_pos[0], t_pos[1] + inch(.25)) if (bubble_pos[0] - bubble_hw) < 0: # bubble beyond left of window if bubble_pos[1] > (win_size[1] - bubble_size[1]): # bubble above window height bubble_pos = (bubble_hw, (t_pos[1]) - (lh + ls + inch(.25))) bubble.arrow_pos = 'top_left' else: bubble_pos = (bubble_hw, bubble_pos[1]) bubble.arrow_pos = 'bottom_left' elif (bubble_pos[0] + bubble_hw) > win_size[0]: # bubble beyond right of window if bubble_pos[1] > (win_size[1] - bubble_size[1]): # bubble above window height bubble_pos = (win_size[0] - bubble_hw, (t_pos[1]) - (lh + ls + inch(.25))) bubble.arrow_pos = 'top_right' else: bubble_pos = (win_size[0] - bubble_hw, bubble_pos[1]) bubble.arrow_pos = 'bottom_right' else: if bubble_pos[1] > (win_size[1] - bubble_size[1]): # bubble above window height bubble_pos = (bubble_pos[0], (t_pos[1]) - (lh + ls + inch(.25))) bubble.arrow_pos = 'top_mid' else: bubble.arrow_pos = 'bottom_mid' bubble_pos = self.to_widget(*bubble_pos, relative=True) bubble.center_x = bubble_pos[0] if bubble.arrow_pos[0] == 't': bubble.top = bubble_pos[1] else: bubble.y = bubble_pos[1] bubble.mode = mode Animation.cancel_all(bubble) bubble.opacity = 0 win.add_widget(bubble, canvas='after') Animation(opacity=1, d=.225).start(bubble) def _hide_cut_copy_paste(self, win=None): bubble = self._bubble if not bubble: return bubble.hide() # # Private # @staticmethod def _reload_remove_observer(wr): # called when the textinput is deleted if wr in _textinput_list: _textinput_list.remove(wr) def _on_textinput_focused(self, instance, value, *largs): win = EventLoop.window self.cancel_selection() self._hide_cut_copy_paste(win) if value: if (not (self.readonly or self.disabled) or _is_desktop and self._keyboard_mode == 'system'): self._trigger_cursor_reset() self._editable = True else: self._editable = False else: self._do_blink_cursor_ev.cancel() self._hide_handles(win) def _ensure_clipboard(self): global Clipboard, CutBuffer if not Clipboard: from kivy.core.clipboard import Clipboard, CutBuffer def cut(self): ''' Copy current selection to clipboard then delete it from TextInput. .. versionadded:: 1.8.0 ''' self._cut(self.selection_text) def _cut(self, data): self._ensure_clipboard() Clipboard.copy(data) self.delete_selection() def copy(self, data=''): ''' Copy the value provided in argument `data` into current clipboard. If data is not of type string it will be converted to string. If no data is provided then current selection if present is copied. .. versionadded:: 1.8.0 ''' self._ensure_clipboard() if data: return Clipboard.copy(data) if self.selection_text: return Clipboard.copy(self.selection_text) def paste(self): ''' Insert text from system :class:`~kivy.core.clipboard.Clipboard` into the :class:`~kivy.uix.textinput.TextInput` at current cursor position. .. versionadded:: 1.8.0 ''' self._ensure_clipboard() data = Clipboard.paste() self.delete_selection() self.insert_text(data) def _update_cutbuffer(self, *args): CutBuffer.set_cutbuffer(self.selection_text) def _get_text_width(self, text, tab_width, _label_cached): # Return the width of a text, according to the current line options kw = self._get_line_options() try: cid = u'{}\0{}\0{}'.format(text, self.password, kw) except UnicodeDecodeError: cid = '{}\0{}\0{}'.format(text, self.password, kw) width = Cache_get('textinput.width', cid) if width: return width if not _label_cached: _label_cached = self._label_cached text = text.replace('\t', ' ' * tab_width) if not self.password: width = _label_cached.get_extents(text)[0] else: width = _label_cached.get_extents( self.password_mask * len(text))[0] Cache_append('textinput.width', cid, width) return width def _do_blink_cursor(self, dt): # Callback for blinking the cursor. self.cursor_blink = not self.cursor_blink def _reset_cursor_blink(self, *args): self._do_blink_cursor_ev.cancel() self.cursor_blink = 0 self._do_blink_cursor_ev() def on_cursor(self, instance, value): # When the cursor is moved, reset cursor blinking to keep it showing, # and update all the graphics. if self.focus: self._trigger_cursor_reset() self._trigger_update_graphics() def _delete_line(self, idx): # Delete current line, and fix cursor position assert(idx < len(self._lines)) self._lines_flags.pop(idx) self._lines_labels.pop(idx) self._lines.pop(idx) self.cursor = self.cursor def _set_line_text(self, line_num, text): # Set current line with other text than the default one. self._lines_labels[line_num] = self._create_line_label(text) self._lines[line_num] = text def _trigger_refresh_line_options(self, *largs): if self._refresh_line_options_ev is not None: self._refresh_line_options_ev.cancel() else: self._refresh_line_options_ev = Clock.create_trigger( self._refresh_line_options, 0) self._refresh_line_options_ev() def _refresh_line_options(self, *largs): self._line_options = None self._get_line_options() self._refresh_text_from_property() self._refresh_hint_text() self.cursor = self.get_cursor_from_index(len(self.text)) def _trigger_refresh_text(self, *largs): if len(largs) and largs[0] == self: largs = () if self._refresh_text_from_property_ev is not None: self._refresh_text_from_property_ev.cancel() self._refresh_text_from_property_ev = Clock.schedule_once( lambda dt: self._refresh_text_from_property(*largs)) def _update_text_options(self, *largs): Cache_remove('textinput.width') self._trigger_refresh_text() def _refresh_text_from_trigger(self, dt, *largs): self._refresh_text_from_property(*largs) def _refresh_text_from_property(self, *largs): self._refresh_text(self._get_text(encode=False), *largs) def _refresh_text(self, text, *largs): # Refresh all the lines from a new text. # By using cache in internal functions, this method should be fast. mode = 'all' if len(largs) > 1: mode, start, finish, _lines, _lines_flags, len_lines = largs # start = max(0, start) cursor = None else: cursor = self.cursor_index() _lines, self._lines_flags = self._split_smart(text) _lines_labels = [] _line_rects = [] _create_label = self._create_line_label for x in _lines: lbl = _create_label(x) _lines_labels.append(lbl) _line_rects.append(Rectangle(size=lbl.size)) if mode == 'all': self._lines_labels = _lines_labels self._lines_rects = _line_rects self._lines = _lines elif mode == 'del': if finish > start: self._insert_lines(start, finish if start == finish else (finish + 1), len_lines, _lines_flags, _lines, _lines_labels, _line_rects) elif mode == 'insert': self._insert_lines( start, finish if (start == finish and not len_lines) else (finish + 1), len_lines, _lines_flags, _lines, _lines_labels, _line_rects) min_line_ht = self._label_cached.get_extents('_')[1] # with markup texture can be of height `1` self.line_height = max(_lines_labels[0].height, min_line_ht) # self.line_spacing = 2 # now, if the text change, maybe the cursor is not at the same place as # before. so, try to set the cursor on the good place row = self.cursor_row self.cursor = self.get_cursor_from_index(self.cursor_index() if cursor is None else cursor) # if we back to a new line, reset the scroll, otherwise, the effect is # ugly if self.cursor_row != row: self.scroll_x = 0 # with the new text don't forget to update graphics again self._trigger_update_graphics() def _insert_lines(self, start, finish, len_lines, _lines_flags, _lines, _lines_labels, _line_rects): self_lines_flags = self._lines_flags _lins_flags = [] _lins_flags.extend(self_lines_flags[:start]) if len_lines: # if not inserting at first line then if start: # make sure line flags restored for first line # _split_smart assumes first line to be not a new line _lines_flags[0] = self_lines_flags[start] _lins_flags.extend(_lines_flags) _lins_flags.extend(self_lines_flags[finish:]) self._lines_flags = _lins_flags _lins_lbls = [] _lins_lbls.extend(self._lines_labels[:start]) if len_lines: _lins_lbls.extend(_lines_labels) _lins_lbls.extend(self._lines_labels[finish:]) self._lines_labels = _lins_lbls _lins_rcts = [] _lins_rcts.extend(self._lines_rects[:start]) if len_lines: _lins_rcts.extend(_line_rects) _lins_rcts.extend(self._lines_rects[finish:]) self._lines_rects = _lins_rcts _lins = [] _lins.extend(self._lines[:start]) if len_lines: _lins.extend(_lines) _lins.extend(self._lines[finish:]) self._lines = _lins def _trigger_update_graphics(self, *largs): self._update_graphics_ev.cancel() self._update_graphics_ev() def _update_graphics(self, *largs): # Update all the graphics according to the current internal values. # # This is a little bit complex, cause we have to : # - handle scroll_x # - handle padding # - create rectangle for the lines matching the viewport # - crop the texture coordinates to match the viewport # # This is the first step of graphics, the second is the selection. self.canvas.clear() add = self.canvas.add lh = self.line_height dy = lh + self.line_spacing # adjust view if the cursor is going outside the bounds sx = self.scroll_x sy = self.scroll_y # draw labels if not self._lines or ( not self._lines[0] and len(self._lines) == 1): rects = self._hint_text_rects labels = self._hint_text_labels lines = self._hint_text_lines else: rects = self._lines_rects labels = self._lines_labels lines = self._lines padding_left, padding_top, padding_right, padding_bottom = self.padding x = self.x + padding_left y = self.top - padding_top + sy miny = self.y + padding_bottom maxy = self.top - padding_top for line_num, value in enumerate(lines): if miny <= y <= maxy + dy: texture = labels[line_num] size = list(texture.size) texc = texture.tex_coords[:] # calcul coordinate viewport_pos = sx, 0 vw = self.width - padding_left - padding_right vh = self.height - padding_top - padding_bottom tw, th = list(map(float, size)) oh, ow = tch, tcw = texc[1:3] tcx, tcy = 0, 0 # adjust size/texcoord according to viewport if viewport_pos: tcx, tcy = viewport_pos tcx = tcx / tw * (ow) tcy = tcy / th * oh if tw - viewport_pos[0] < vw: tcw = tcw - tcx size[0] = tcw * size[0] elif vw < tw: tcw = (vw / tw) * tcw size[0] = vw if vh < th: tch = (vh / th) * tch size[1] = vh # cropping mlh = lh if y > maxy: vh = (maxy - y + lh) tch = (vh / float(lh)) * oh tcy = oh - tch size[1] = vh if y - lh < miny: diff = miny - (y - lh) y += diff vh = lh - diff tch = (vh / float(lh)) * oh size[1] = vh texc = ( tcx, tcy + tch, tcx + tcw, tcy + tch, tcx + tcw, tcy, tcx, tcy) # add rectangle. r = rects[line_num] r.pos = int(x), int(y - mlh) r.size = size r.texture = texture r.tex_coords = texc add(r) y -= dy self._update_graphics_selection() def _update_graphics_selection(self): if not self._selection: return self.canvas.remove_group('selection') dy = self.line_height + self.line_spacing rects = self._lines_rects padding_top = self.padding[1] padding_bottom = self.padding[3] _top = self.top y = _top - padding_top + self.scroll_y miny = self.y + padding_bottom maxy = _top - padding_top draw_selection = self._draw_selection a, b = self._selection_from, self._selection_to if a > b: a, b = b, a get_cursor_from_index = self.get_cursor_from_index s1c, s1r = get_cursor_from_index(a) s2c, s2r = get_cursor_from_index(b) s2r += 1 # pass only the selection lines[] # passing all the lines can get slow when dealing with a lot of text y -= s1r * dy _lines = self._lines _get_text_width = self._get_text_width tab_width = self.tab_width _label_cached = self._label_cached width = self.width padding_left = self.padding[0] padding_right = self.padding[2] x = self.x canvas_add = self.canvas.add selection_color = self.selection_color for line_num, value in enumerate(_lines[s1r:s2r], start=s1r): if miny <= y <= maxy + dy: r = rects[line_num] draw_selection(r.pos, r.size, line_num, (s1c, s1r), (s2c, s2r - 1), _lines, _get_text_width, tab_width, _label_cached, width, padding_left, padding_right, x, canvas_add, selection_color) y -= dy self._position_handles('both') def _draw_selection(self, *largs): pos, size, line_num, (s1c, s1r), (s2c, s2r),\ _lines, _get_text_width, tab_width, _label_cached, width,\ padding_left, padding_right, x, canvas_add, selection_color = largs # Draw the current selection on the widget. if line_num < s1r or line_num > s2r: return x, y = pos w, h = size x1 = x x2 = x + w if line_num == s1r: lines = _lines[line_num] x1 -= self.scroll_x x1 += _get_text_width(lines[:s1c], tab_width, _label_cached) if line_num == s2r: lines = _lines[line_num] x2 = (x - self.scroll_x) + _get_text_width(lines[:s2c], tab_width, _label_cached) width_minus_padding = width - (padding_right + padding_left) maxx = x + width_minus_padding if x1 > maxx: return x1 = max(x1, x) x2 = min(x2, x + width_minus_padding) canvas_add(Color(*selection_color, group='selection')) canvas_add(Rectangle( pos=(x1, pos[1]), size=(x2 - x1, size[1]), group='selection')) def on_size(self, instance, value): # if the size change, we might do invalid scrolling / text split # size the text maybe be put after size_hint have been resolved. self._trigger_refresh_text() self._refresh_hint_text() self.scroll_x = self.scroll_y = 0 def _get_cursor_pos(self): # return the current cursor x/y from the row/col dy = self.line_height + self.line_spacing padding_left = self.padding[0] padding_top = self.padding[1] left = self.x + padding_left top = self.top - padding_top y = top + self.scroll_y y -= self.cursor_row * dy x, y = left + self.cursor_offset() - self.scroll_x, y if x < left: self.scroll_x = 0 x = left if y > top: y = top self.scroll_y = 0 return x, y def _get_line_options(self): # Get or create line options, to be used for Label creation if self._line_options is None: self._line_options = kw = { 'font_size': self.font_size, 'font_name': self.font_name, 'anchor_x': 'left', 'anchor_y': 'top', 'padding_x': 0, 'padding_y': 0, 'padding': (0, 0)} self._label_cached = Label(**kw) return self._line_options def _create_line_label(self, text, hint=False): # Create a label from a text, using line options ntext = text.replace(u'\n', u'').replace(u'\t', u' ' * self.tab_width) if self.password and not hint: # Don't replace hint_text with * ntext = self.password_mask * len(ntext) kw = self._get_line_options() cid = '%s\0%s' % (ntext, str(kw)) texture = Cache_get('textinput.label', cid) if texture is None: # FIXME right now, we can't render very long line... # if we move on "VBO" version as fallback, we won't need to # do this. try to found the maximum text we can handle label = None label_len = len(ntext) ld = None # check for blank line if not ntext: texture = Texture.create(size=(1, 1)) Cache_append('textinput.label', cid, texture) return texture while True: try: label = Label(text=ntext[:label_len], **kw) label.refresh() if ld is not None and ld > 2: ld = int(ld / 2) label_len += ld else: break except: # exception happen when we tried to render the text # reduce it... if ld is None: ld = len(ntext) ld = int(ld / 2) if ld < 2 and label_len: label_len -= 1 label_len -= ld continue # ok, we found it. texture = label.texture Cache_append('textinput.label', cid, texture) return texture def _tokenize(self, text): # Tokenize a text string from some delimiters if text is None: return delimiters = u' ,\'".;:\n\r\t' oldindex = 0 for index, char in enumerate(text): if char not in delimiters: continue if oldindex != index: yield text[oldindex:index] yield text[index:index + 1] oldindex = index + 1 yield text[oldindex:] def _split_smart(self, text): # Do a "smart" split. If autowidth or autosize is set, # we are not doing smart split, just a split on line break. # Otherwise, we are trying to split as soon as possible, to prevent # overflow on the widget. # depend of the options, split the text on line, or word if not self.multiline: lines = text.split(u'\n') lines_flags = [0] + [FL_IS_LINEBREAK] * (len(lines) - 1) return lines, lines_flags # no autosize, do wordwrap. x = flags = 0 line = [] lines = [] lines_flags = [] _join = u''.join lines_append, lines_flags_append = lines.append, lines_flags.append padding_left = self.padding[0] padding_right = self.padding[2] width = self.width - padding_left - padding_right text_width = self._get_text_width _tab_width, _label_cached = self.tab_width, self._label_cached # try to add each word on current line. for word in self._tokenize(text): is_newline = (word == u'\n') w = text_width(word, _tab_width, _label_cached) # if we have more than the width, or if it's a newline, # push the current line, and create a new one if (x + w > width and line) or is_newline: lines_append(_join(line)) lines_flags_append(flags) flags = 0 line = [] x = 0 if is_newline: flags |= FL_IS_LINEBREAK elif width >= 1 and w > width: while w > width: split_width = split_pos = 0 # split the word for c in word: cw = self._get_text_width( c, self.tab_width, self._label_cached ) if split_width + cw > width: break split_width += cw split_pos += 1 if split_width == split_pos == 0: # can't fit the word in, give up break lines_append(word[:split_pos]) lines_flags_append(flags) flags = FL_IS_WORDBREAK word = word[split_pos:] w -= split_width x = w line.append(word) else: x += w line.append(word) if line or flags & FL_IS_LINEBREAK: lines_append(_join(line)) lines_flags_append(flags) return lines, lines_flags def _key_down(self, key, repeat=False): displayed_str, internal_str, internal_action, scale = key if internal_action is None: if self._selection: self.delete_selection() self.insert_text(displayed_str) elif internal_action in ('shift', 'shift_L', 'shift_R'): if not self._selection: self._selection_from = self._selection_to = self.cursor_index() self._selection = True self._selection_finished = False elif internal_action == 'ctrl_L': self._ctrl_l = True elif internal_action == 'ctrl_R': self._ctrl_r = True elif internal_action == 'alt_L': self._alt_l = True elif internal_action == 'alt_R': self._alt_r = True elif internal_action.startswith('cursor_'): cc, cr = self.cursor self.do_cursor_movement(internal_action, self._ctrl_l or self._ctrl_r, self._alt_l or self._alt_r) if self._selection and not self._selection_finished: self._selection_to = self.cursor_index() self._update_selection() else: self.cancel_selection() elif self._selection and internal_action in ('del', 'backspace'): self.delete_selection() elif internal_action == 'del': # Move cursor one char to the right. If that was successful, # do a backspace (effectively deleting char right of cursor) cursor = self.cursor self.do_cursor_movement('cursor_right') if cursor != self.cursor: self.do_backspace(mode='del') elif internal_action == 'backspace': self.do_backspace() elif internal_action == 'enter': if self.multiline: self.insert_text(u'\n') else: self.dispatch('on_text_validate') if self.text_validate_unfocus: self.focus = False elif internal_action == 'escape': self.focus = False if internal_action != 'escape': # self._recalc_size() pass def _key_up(self, key, repeat=False): displayed_str, internal_str, internal_action, scale = key if internal_action in ('shift', 'shift_L', 'shift_R'): if self._selection: self._update_selection(True) elif internal_action == 'ctrl_L': self._ctrl_l = False elif internal_action == 'ctrl_R': self._ctrl_r = False elif internal_action == 'alt_L': self._alt_l = False elif internal_action == 'alt_R': self._alt_r = False def keyboard_on_key_down(self, window, keycode, text, modifiers): # Keycodes on OS X: ctrl, cmd = 64, 1024 key, key_str = keycode win = EventLoop.window # This allows *either* ctrl *or* cmd, but not both. is_shortcut = (modifiers == ['ctrl'] or ( _is_osx and modifiers == ['meta'])) is_interesting_key = key in (list(self.interesting_keys.keys()) + [27]) if not self.write_tab and super(TextInput, self).keyboard_on_key_down(window, keycode, text, modifiers): return True if not self._editable: # duplicated but faster testing for non-editable keys if text and not is_interesting_key: if is_shortcut and key == ord('c'): self.copy() elif key == 27: self.focus = False return True if text and not is_interesting_key: self._hide_handles(win) self._hide_cut_copy_paste(win) win.remove_widget(self._handle_middle) # check for command modes # we use \x01INFO\x02 to get info from IME on mobiles # pygame seems to pass \x01 as the unicode for ctrl+a # checking for modifiers ensures conflict resolution. first_char = ord(text[0]) if not modifiers and first_char == 1: self._command_mode = True self._command = '' if not modifiers and first_char == 2: self._command_mode = False self._command = self._command[1:] if self._command_mode: self._command += text return _command = self._command if _command and first_char == 2: from_undo = True _command, data = _command.split(':') self._command = '' if self._selection: self.delete_selection() if _command == 'DEL': count = int(data) if not count: self.delete_selection(from_undo=True) end = self.cursor_index() self._selection_from = max(end - count, 0) self._selection_to = end self._selection = True self.delete_selection(from_undo=True) return elif _command == 'INSERT': self.insert_text(data, from_undo) elif _command == 'INSERTN': from_undo = False self.insert_text(data, from_undo) elif _command == 'SELWORD': self.dispatch('on_double_tap') elif _command == 'SEL': if data == '0': Clock.schedule_once(lambda dt: self.cancel_selection()) elif _command == 'CURCOL': self.cursor = int(data), self.cursor_row return if is_shortcut: if key == ord('x'): # cut selection self._cut(self.selection_text) elif key == ord('c'): # copy selection self.copy() elif key == ord('v'): # paste clipboard content self.paste() elif key == ord('a'): # select all self.select_all() elif key == ord('z'): # undo self.do_undo() elif key == ord('r'): # redo self.do_redo() else: if EventLoop.window.__class__.__module__ == \ 'kivy.core.window.window_sdl2': if not (text == ' ' and platform == 'android'): return if self._selection: self.delete_selection() self.insert_text(text) # self._recalc_size() return if is_interesting_key: self._hide_cut_copy_paste(win) self._hide_handles(win) if key == 27: # escape self.focus = False return True elif key == 9: # tab self.insert_text(u'\t') return True k = self.interesting_keys.get(key) if k: key = (None, None, k, 1) self._key_down(key) def keyboard_on_key_up(self, window, keycode): key, key_str = keycode k = self.interesting_keys.get(key) if k: key = (None, None, k, 1) self._key_up(key) def keyboard_on_textinput(self, window, text): if self._selection: self.delete_selection() self.insert_text(text, False) def on__hint_text(self, instance, value): self._refresh_hint_text() def _refresh_hint_text(self): _lines, self._hint_text_flags = self._split_smart(self.hint_text) _hint_text_labels = [] _hint_text_rects = [] _create_label = self._create_line_label for x in _lines: lbl = _create_label(x, hint=True) _hint_text_labels.append(lbl) _hint_text_rects.append(Rectangle(size=lbl.size)) self._hint_text_lines = _lines self._hint_text_labels = _hint_text_labels self._hint_text_rects = _hint_text_rects # Remember to update graphics self._trigger_update_graphics() # # Properties # _lines = ListProperty([]) _hint_text_lines = ListProperty([]) _editable = BooleanProperty(True) _insert_int_pat = re.compile(u'^-?[0-9]*$') _insert_float_pat = re.compile(u'^-?[0-9]*\\.?[0-9]*$') readonly = BooleanProperty(False) '''If True, the user will not be able to change the content of a textinput. .. versionadded:: 1.3.0 :attr:`readonly` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' text_validate_unfocus = BooleanProperty(True) '''If True, the :meth:`TextInput.on_text_validate` event will unfocus the widget, therefore make it stop listening to the keyboard. When disabled, the :meth:`TextInput.on_text_validate` event can be fired multiple times as the result of TextInput keeping the focus enabled. .. versionadded:: 1.10.1 :attr:`text_validate_unfocus` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' multiline = BooleanProperty(True) '''If True, the widget will be able show multiple lines of text. If False, the "enter" keypress will defocus the textinput instead of adding a new line. :attr:`multiline` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' password = BooleanProperty(False) '''If True, the widget will display its characters as the character set in :attr:`password_mask`. .. versionadded:: 1.2.0 :attr:`password` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' password_mask = StringProperty('*') '''Sets the character used to mask the text when :attr:`password` is True. .. versionadded:: 1.10.0 :attr:`password_mask` is a :class:`~kivy.properties.StringProperty` and defaults to `'*'`. ''' keyboard_suggestions = BooleanProperty(True) '''If True provides auto suggestions on top of keyboard. This will only work if :attr:`input_type` is set to `text`. .. versionadded:: 1.8.0 :attr:`keyboard_suggestions` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' cursor_blink = BooleanProperty(False) '''This property is used to blink the cursor graphic. The value of :attr:`cursor_blink` is automatically computed. Setting a value on it will have no impact. :attr:`cursor_blink` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' def _get_cursor(self): return self._cursor def _set_cursor(self, pos): if not self._lines: self._trigger_refresh_text() return l = self._lines cr = boundary(pos[1], 0, len(l) - 1) cc = boundary(pos[0], 0, len(l[cr])) cursor = cc, cr # adjust scrollview to ensure that the cursor will be always inside our # viewport. padding_left = self.padding[0] padding_right = self.padding[2] viewport_width = self.width - padding_left - padding_right sx = self.scroll_x offset = self.cursor_offset() # if offset is outside the current bounds, readjust if offset > viewport_width + sx: self.scroll_x = offset - viewport_width if offset < sx: self.scroll_x = offset # do the same for Y # this algo try to center the cursor as much as possible dy = self.line_height + self.line_spacing offsety = cr * dy sy = self.scroll_y padding_top = self.padding[1] padding_bottom = self.padding[3] viewport_height = self.height - padding_top - padding_bottom - dy if offsety > viewport_height + sy: sy = offsety - viewport_height if offsety < sy: sy = offsety self.scroll_y = sy if self._cursor == cursor: return self._cursor = cursor return True cursor = AliasProperty(_get_cursor, _set_cursor) '''Tuple of (row, col) values indicating the current cursor position. You can set a new (row, col) if you want to move the cursor. The scrolling area will be automatically updated to ensure that the cursor is visible inside the viewport. :attr:`cursor` is an :class:`~kivy.properties.AliasProperty`. ''' def _get_cursor_col(self): return self._cursor[0] cursor_col = AliasProperty(_get_cursor_col, None, bind=('cursor', )) '''Current column of the cursor. :attr:`cursor_col` is an :class:`~kivy.properties.AliasProperty` to cursor[0], read-only. ''' def _get_cursor_row(self): return self._cursor[1] cursor_row = AliasProperty(_get_cursor_row, None, bind=('cursor', )) '''Current row of the cursor. :attr:`cursor_row` is an :class:`~kivy.properties.AliasProperty` to cursor[1], read-only. ''' cursor_pos = AliasProperty(_get_cursor_pos, None, bind=( 'cursor', 'padding', 'pos', 'size', 'focus', 'scroll_x', 'scroll_y')) '''Current position of the cursor, in (x, y). :attr:`cursor_pos` is an :class:`~kivy.properties.AliasProperty`, read-only. ''' cursor_color = ListProperty([1, 0, 0, 1]) '''Current color of the cursor, in (r, g, b, a) format. .. versionadded:: 1.9.0 :attr:`cursor_color` is a :class:`~kivy.properties.ListProperty` and defaults to [1, 0, 0, 1]. ''' cursor_width = NumericProperty('1sp') '''Current width of the cursor. .. versionadded:: 1.10.0 :attr:`cursor_width` is a :class:`~kivy.properties.NumericProperty` and defaults to '1sp'. ''' line_height = NumericProperty(1) '''Height of a line. This property is automatically computed from the :attr:`font_name`, :attr:`font_size`. Changing the line_height will have no impact. .. note:: :attr:`line_height` is the height of a single line of text. Use :attr:`minimum_height`, which also includes padding, to get the height required to display the text properly. :attr:`line_height` is a :class:`~kivy.properties.NumericProperty`, read-only. ''' tab_width = NumericProperty(4) '''By default, each tab will be replaced by four spaces on the text input widget. You can set a lower or higher value. :attr:`tab_width` is a :class:`~kivy.properties.NumericProperty` and defaults to 4. ''' padding_x = VariableListProperty([0, 0], length=2) '''Horizontal padding of the text: [padding_left, padding_right]. padding_x also accepts a one argument form [padding_horizontal]. :attr:`padding_x` is a :class:`~kivy.properties.VariableListProperty` and defaults to [0, 0]. This might be changed by the current theme. .. deprecated:: 1.7.0 Use :attr:`padding` instead. ''' def on_padding_x(self, instance, value): self.padding[0] = value[0] self.padding[2] = value[1] padding_y = VariableListProperty([0, 0], length=2) '''Vertical padding of the text: [padding_top, padding_bottom]. padding_y also accepts a one argument form [padding_vertical]. :attr:`padding_y` is a :class:`~kivy.properties.VariableListProperty` and defaults to [0, 0]. This might be changed by the current theme. .. deprecated:: 1.7.0 Use :attr:`padding` instead. ''' def on_padding_y(self, instance, value): self.padding[1] = value[0] self.padding[3] = value[1] padding = VariableListProperty([6, 6, 6, 6]) '''Padding of the text: [padding_left, padding_top, padding_right, padding_bottom]. padding also accepts a two argument form [padding_horizontal, padding_vertical] and a one argument form [padding]. .. versionchanged:: 1.7.0 Replaced AliasProperty with VariableListProperty. :attr:`padding` is a :class:`~kivy.properties.VariableListProperty` and defaults to [6, 6, 6, 6]. ''' scroll_x = NumericProperty(0) '''X scrolling value of the viewport. The scrolling is automatically updated when the cursor is moved or text changed. If there is no user input, the scroll_x and scroll_y properties may be changed. :attr:`scroll_x` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' scroll_y = NumericProperty(0) '''Y scrolling value of the viewport. See :attr:`scroll_x` for more information. :attr:`scroll_y` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' selection_color = ListProperty([0.1843, 0.6549, 0.8313, .5]) '''Current color of the selection, in (r, g, b, a) format. .. warning:: The color should always have an "alpha" component less than 1 since the selection is drawn after the text. :attr:`selection_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0.1843, 0.6549, 0.8313, .5]. ''' border = ListProperty([4, 4, 4, 4]) '''Border used for :class:`~kivy.graphics.vertex_instructions.BorderImage` graphics instruction. Used with :attr:`background_normal` and :attr:`background_active`. Can be used for a custom background. .. versionadded:: 1.4.1 It must be a list of four values: (bottom, right, top, left). Read the BorderImage instruction for more information about how to use it. :attr:`border` is a :class:`~kivy.properties.ListProperty` and defaults to (4, 4, 4, 4). ''' background_normal = StringProperty( 'atlas://data/images/defaulttheme/textinput') '''Background image of the TextInput when it's not in focus. .. versionadded:: 1.4.1 :attr:`background_normal` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput'. ''' background_disabled_normal = StringProperty( 'atlas://data/images/defaulttheme/textinput_disabled') '''Background image of the TextInput when disabled. .. versionadded:: 1.8.0 :attr:`background_disabled_normal` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput_disabled'. ''' background_active = StringProperty( 'atlas://data/images/defaulttheme/textinput_active') '''Background image of the TextInput when it's in focus. .. versionadded:: 1.4.1 :attr:`background_active` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/textinput_active'. ''' background_color = ListProperty([1, 1, 1, 1]) '''Current color of the background, in (r, g, b, a) format. .. versionadded:: 1.2.0 :attr:`background_color` is a :class:`~kivy.properties.ListProperty` and defaults to [1, 1, 1, 1] (white). ''' foreground_color = ListProperty([0, 0, 0, 1]) '''Current color of the foreground, in (r, g, b, a) format. .. versionadded:: 1.2.0 :attr:`foreground_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0, 0, 0, 1] (black). ''' disabled_foreground_color = ListProperty([0, 0, 0, .5]) '''Current color of the foreground when disabled, in (r, g, b, a) format. .. versionadded:: 1.8.0 :attr:`disabled_foreground_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0, 0, 0, 5] (50% transparent black). ''' use_bubble = BooleanProperty(not _is_desktop) '''Indicates whether the cut/copy/paste bubble is used. .. versionadded:: 1.7.0 :attr:`use_bubble` is a :class:`~kivy.properties.BooleanProperty` and defaults to True on mobile OS's, False on desktop OS's. ''' use_handles = BooleanProperty(not _is_desktop) '''Indicates whether the selection handles are displayed. .. versionadded:: 1.8.0 :attr:`use_handles` is a :class:`~kivy.properties.BooleanProperty` and defaults to True on mobile OS's, False on desktop OS's. ''' suggestion_text = StringProperty('') '''Shows a suggestion text at the end of the current line. The feature is useful for text autocompletion, and it does not implement validation (accepting the suggested text on enter etc.). This can also be used by the IME to setup the current word being edited. .. versionadded:: 1.9.0 :attr:`suggestion_text` is a :class:`~kivy.properties.StringProperty` and defaults to `''`. ''' def on_suggestion_text(self, instance, value): global MarkupLabel if not MarkupLabel: from kivy.core.text.markup import MarkupLabel cursor_row = self.cursor_row if cursor_row >= len(self._lines) or self.canvas is None: return cursor_pos = self.cursor_pos txt = self._lines[cursor_row] kw = self._get_line_options() rct = self._lines_rects[cursor_row] lbl = text = None if value: lbl = MarkupLabel( text=txt + "[b]{}[/b]".format(value), **kw) else: lbl = Label(**kw) text = txt lbl.refresh() self._lines_labels[cursor_row] = lbl.texture rct.size = lbl.size self._update_graphics() def get_sel_from(self): return self._selection_from selection_from = AliasProperty(get_sel_from, None) '''If a selection is in progress or complete, this property will represent the cursor index where the selection started. .. versionchanged:: 1.4.0 :attr:`selection_from` is an :class:`~kivy.properties.AliasProperty` and defaults to None, readonly. ''' def get_sel_to(self): return self._selection_to selection_to = AliasProperty(get_sel_to, None) '''If a selection is in progress or complete, this property will represent the cursor index where the selection started. .. versionchanged:: 1.4.0 :attr:`selection_to` is an :class:`~kivy.properties.AliasProperty` and defaults to None, readonly. ''' selection_text = StringProperty(u'') '''Current content selection. :attr:`selection_text` is a :class:`~kivy.properties.StringProperty` and defaults to '', readonly. ''' def on_selection_text(self, instance, value): if value: if self.use_handles: self._trigger_show_handles() if CutBuffer and not self.password: self._trigger_update_cutbuffer() def _get_text(self, encode=False): lf = self._lines_flags l = self._lines len_l = len(l) if len(lf) < len_l: lf.append(1) text = u''.join([(u'\n' if (lf[i] & FL_IS_LINEBREAK) else u'') + l[i] for i in range(len_l)]) if encode and not isinstance(text, bytes): text = text.encode('utf8') return text def _set_text(self, text): if isinstance(text, bytes): text = text.decode('utf8') if self.replace_crlf: text = text.replace(u'\r\n', u'\n') if self._get_text(encode=False) == text: return self._refresh_text(text) self.cursor = self.get_cursor_from_index(len(text)) text = AliasProperty(_get_text, _set_text, bind=('_lines', )) '''Text of the widget. Creation of a simple hello world:: widget = TextInput(text='Hello world') If you want to create the widget with an unicode string, use:: widget = TextInput(text=u'My unicode string') :attr:`text` is an :class:`~kivy.properties.AliasProperty`. ''' font_name = StringProperty(DEFAULT_FONT) '''Filename of the font to use. The path can be absolute or relative. Relative paths are resolved by the :func:`~kivy.resources.resource_find` function. .. warning:: Depending on your text provider, the font file may be ignored. However, you can mostly use this without problems. If the font used lacks the glyphs for the particular language/symbols you are using, you will see '[]' blank box characters instead of the actual glyphs. The solution is to use a font that has the glyphs you need to display. For example, to display |unicodechar|, use a font like freesans.ttf that has the glyph. .. |unicodechar| image:: images/unicode-char.png :attr:`font_name` is a :class:`~kivy.properties.StringProperty` and defaults to 'Roboto'. This value is taken from :class:`~kivy.config.Config`. ''' font_size = NumericProperty('15sp') '''Font size of the text in pixels. :attr:`font_size` is a :class:`~kivy.properties.NumericProperty` and defaults to 15\ :attr:`~kivy.metrics.sp`. ''' _hint_text = StringProperty('') def _set_hint_text(self, value): if isinstance(value, bytes): value = value.decode('utf8') self._hint_text = value def _get_hint_text(self): return self._hint_text hint_text = AliasProperty( _get_hint_text, _set_hint_text, bind=('_hint_text', )) '''Hint text of the widget, shown if text is ''. .. versionadded:: 1.6.0 .. versionchanged:: 1.10.0 The property is now an AliasProperty and byte values are decoded to strings. The hint text will stay visible when the widget is focused. :attr:`hint_text` a :class:`~kivy.properties.AliasProperty` and defaults to ''. ''' hint_text_color = ListProperty([0.5, 0.5, 0.5, 1.0]) '''Current color of the hint_text text, in (r, g, b, a) format. .. versionadded:: 1.6.0 :attr:`hint_text_color` is a :class:`~kivy.properties.ListProperty` and defaults to [0.5, 0.5, 0.5, 1.0] (grey). ''' auto_indent = BooleanProperty(False) '''Automatically indent multiline text. .. versionadded:: 1.7.0 :attr:`auto_indent` is a :class:`~kivy.properties.BooleanProperty` and defaults to False. ''' replace_crlf = BooleanProperty(True) '''Automatically replace CRLF with LF. .. versionadded:: 1.9.1 :attr:`replace_crlf` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' allow_copy = BooleanProperty(True) '''Decides whether to allow copying the text. .. versionadded:: 1.8.0 :attr:`allow_copy` is a :class:`~kivy.properties.BooleanProperty` and defaults to True. ''' def _get_min_height(self): return (len(self._lines) * (self.line_height + self.line_spacing) + self.padding[1] + self.padding[3]) minimum_height = AliasProperty(_get_min_height, None, bind=('_lines', 'line_spacing', 'padding', 'font_size', 'font_name', 'password', 'hint_text', 'line_height')) '''Minimum height of the content inside the TextInput. .. versionadded:: 1.8.0 :attr:`minimum_height` is a readonly :class:`~kivy.properties.AliasProperty`. .. warning:: :attr:`minimum_width` is calculated based on :attr:`width` therefore code like this will lead to an infinite loop:: <FancyTextInput>: height: self.minimum_height width: self.height ''' line_spacing = NumericProperty(0) '''Space taken up between the lines. .. versionadded:: 1.8.0 :attr:`line_spacing` is a :class:`~kivy.properties.NumericProperty` and defaults to 0. ''' input_filter = ObjectProperty(None, allownone=True) ''' Filters the input according to the specified mode, if not None. If None, no filtering is applied. .. versionadded:: 1.9.0 :attr:`input_filter` is an :class:`~kivy.properties.ObjectProperty` and defaults to `None`. Can be one of `None`, `'int'` (string), or `'float'` (string), or a callable. If it is `'int'`, it will only accept numbers. If it is `'float'` it will also accept a single period. Finally, if it is a callable it will be called with two parameters; the string to be added and a bool indicating whether the string is a result of undo (True). The callable should return a new substring that will be used instead. ''' handle_image_middle = StringProperty( 'atlas://data/images/defaulttheme/selector_middle') '''Image used to display the middle handle on the TextInput for cursor positioning. .. versionadded:: 1.8.0 :attr:`handle_image_middle` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/selector_middle'. ''' def on_handle_image_middle(self, instance, value): if self._handle_middle: self._handle_middle.source = value handle_image_left = StringProperty( 'atlas://data/images/defaulttheme/selector_left') '''Image used to display the Left handle on the TextInput for selection. .. versionadded:: 1.8.0 :attr:`handle_image_left` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/selector_left'. ''' def on_handle_image_left(self, instance, value): if self._handle_left: self._handle_left.source = value handle_image_right = StringProperty( 'atlas://data/images/defaulttheme/selector_right') '''Image used to display the Right handle on the TextInput for selection. .. versionadded:: 1.8.0 :attr:`handle_image_right` is a :class:`~kivy.properties.StringProperty` and defaults to 'atlas://data/images/defaulttheme/selector_right'. ''' def on_handle_image_right(self, instance, value): if self._handle_right: self._handle_right.source = value write_tab = BooleanProperty(True) '''Whether the tab key should move focus to the next widget or if it should enter a tab in the :class:`TextInput`. If `True` a tab will be written, otherwise, focus will move to the next widget. .. versionadded:: 1.9.0 :attr:`write_tab` is a :class:`~kivy.properties.BooleanProperty` and defaults to `True`. ''' if __name__ == '__main__': from kivy.app import App from kivy.uix.boxlayout import BoxLayout from kivy.lang import Builder class TextInputApp(App): def build(self): Builder.load_string(''' <TextInput> on_text: self.suggestion_text = '' self.suggestion_text = 'ion_text' ''') root = BoxLayout(orientation='vertical') textinput = TextInput(multiline=True, use_bubble=True, use_handles=True) # textinput.text = __doc__ root.add_widget(textinput) textinput2 = TextInput(multiline=False, text='monoline textinput', size_hint=(1, None), height=30) root.add_widget(textinput2) return root TextInputApp().run()
{ "content_hash": "59d9ba667640fe5976712396cfce40ed", "timestamp": "", "source": "github", "line_count": 3175, "max_line_length": 79, "avg_line_length": 35.37732283464567, "alnum_prop": 0.5481246049339851, "repo_name": "jegger/kivy", "id": "c25b77500056f981c9112db50f0544a6cb8ebe67", "size": "112349", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "kivy/uix/textinput.py", "mode": "33188", "license": "mit", "language": [ { "name": "C", "bytes": "337106" }, { "name": "C++", "bytes": "3551" }, { "name": "Emacs Lisp", "bytes": "9671" }, { "name": "GLSL", "bytes": "289" }, { "name": "HTML", "bytes": "19384" }, { "name": "Makefile", "bytes": "4202" }, { "name": "Objective-C", "bytes": "14779" }, { "name": "Python", "bytes": "3792478" }, { "name": "Vim script", "bytes": "1123" } ], "symlink_target": "" }
from django.core.management.base import BaseCommand, CommandError from django.contrib.gis.utils import add_postgis_srs class Command(BaseCommand): args = '<srid>' help = 'Add Custom SRID' def handle(self, *args, **options): srid = args[0] try: add_postgis_srs(srid) except: raise CommandError('Problem adding SRID: %s' % srid) self.stdout.write('Successfully added SRID "%s"' % srid)
{ "content_hash": "cdbdbe5c0d39eb30c72b8a97cb3e9185", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 65, "avg_line_length": 29.375, "alnum_prop": 0.6127659574468085, "repo_name": "Ecotrust/COMPASS", "id": "162d4a3990a0c04ff822f496da76045bd7f3ce6d", "size": "470", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "mp/general/management/commands/add_srid.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "7006" }, { "name": "CSS", "bytes": "339147" }, { "name": "HTML", "bytes": "479776" }, { "name": "JavaScript", "bytes": "9220804" }, { "name": "Python", "bytes": "456984" }, { "name": "Ruby", "bytes": "143092" }, { "name": "Shell", "bytes": "17087" } ], "symlink_target": "" }
from django.db import models from django.utils.translation import ugettext_lazy as _ class Project(models.Model): area = models.CharField(_('área'), max_length=100, blank=False, null=False) tema = models.CharField(_('tema'), max_length=200, blank=False, null=False) descricao = models.CharField(_('descrição'), max_length=400, blank=False, null=False) universidade = models.CharField(_('universidade'), max_length=200, null=False) universidadeOrientador = models.CharField(_('orientador'), max_length=200, blank=True, null=True) liderNome = models.CharField(_('líder'), max_length=200, null=False, blank=False) liderTelefone = models.CharField(_('telefone'), max_length=20, blank=False, null=False) liderEmail = models.EmailField(_('email'), max_length=100, null=False, blank=False) liderSocial = models.CharField(_('rede social'), max_length=200, blank=True) liderIntegrantes = models.CharField(_('integrantes'), max_length=400, blank=True, null=True) link_slides = models.CharField(_('slides'), max_length=300, blank=True) link_monografia = models.CharField(_('monografia'), max_length=300, blank=True) link_modelagem = models.CharField(_('modelagem'), max_length=300, blank=True) link_website = models.CharField(_('website'), max_length=300, blank=True) link_outros = models.CharField(_('outros'), max_length=300, blank=True) link_versionamento = models.CharField(_('versionamento'), max_length=300, blank=True) etapa = models.CharField(_('etapa'), max_length=3, blank=True) tags = models.CharField(_('tags'), max_length=300, blank=True) ativo = models.CharField(_('ativo'), max_length=3, default='VAL') dataAlteracao = models.DateTimeField(_('data de alteracao'), auto_now=True, auto_now_add=True) dataCadastro = models.DateTimeField(_('data de cadastro'), auto_now=False, auto_now_add=True) class Meta: ordering = ['dataCadastro'] verbose_name = _(u'projeto') verbose_name_plural = _(u'projetos') def __unicode__(self): return self.tema +' - '+ self.liderNome def save(self, force_insert=False, force_update=False): self.area = self.area.upper() self.tema = self.tema.upper() self.descricao = self.descricao.upper() self.universidade = self.universidade.upper() self.universidadeOrientador = self.universidadeOrientador.upper() self.liderNome = self.liderNome.upper() self.liderEmail = self.liderEmail.upper() self.liderIntegrantes = self.liderIntegrantes.upper() self.etapa = self.etapa.upper() self.tags = self.tags.upper() self.ativo = self.ativo.upper() super(Project, self).save(force_insert, force_update)
{ "content_hash": "f9b14ddad9651ae6066384dde66cdd5b", "timestamp": "", "source": "github", "line_count": 52, "max_line_length": 101, "avg_line_length": 53.21153846153846, "alnum_prop": 0.6787134080231297, "repo_name": "edsonlb/PoloVota", "id": "0eef7ec20f95c7dae4588b64dd40b405df20a5f5", "size": "2825", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "projects/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "989" }, { "name": "JavaScript", "bytes": "138" }, { "name": "Python", "bytes": "69076" } ], "symlink_target": "" }
from GithubRepoClient import GithubRepoClient class GithubRepoPool(object): """Docstring for GithubRepoPool. """ def __init__(self): """TODO: to be defined1. """ self._github_repo_pool = list() def create_github_repo_client(self, full_name): """TODO: Docstring for create_github_repo_client. :full_name: TODO :returns: TODO """ for repo in self._github_repo_pool: if repo.get_repo_name() == full_name: return repo new_repo = GithubRepoClient(full_name) self.add_repo_to_pool(new_repo) return new_repo def add_repo_to_pool(self, github_repo_client): """TODO: Docstring for add_repo_to_pool. :returns: TODO """ try: self._github_repo_pool.append(github_repo_client) except Exception as e: raise e
{ "content_hash": "9fc0e328be5fe4a25db9768d6735e86c", "timestamp": "", "source": "github", "line_count": 35, "max_line_length": 61, "avg_line_length": 25.514285714285716, "alnum_prop": 0.574468085106383, "repo_name": "issuequality/issuequality", "id": "e81c04c5d4bb05265c84fa8ed3e3608130f9889d", "size": "940", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "GithubRepoPool.py", "mode": "33188", "license": "mit", "language": [ { "name": "Makefile", "bytes": "1189" }, { "name": "Python", "bytes": "45472" } ], "symlink_target": "" }
from __future__ import absolute_import # external import cybox.common # internal import stix import stix.utils as utils import stix.bindings.stix_common as stix_common_binding # relative from . import vocabs, VocabString from .identity import Identity from .structured_text import StructuredTextList class InformationSource(stix.Entity): _binding = stix_common_binding _binding_class = stix_common_binding.InformationSourceType _namespace = 'http://stix.mitre.org/common-1' def __init__(self, description=None, identity=None, time=None, tools=None, contributing_sources=None, references=None): self.description = description self.identity = identity self.contributing_sources = contributing_sources self.time = time self.tools = tools self.references = references self.roles = None @property def contributing_sources(self): return self._contributing_sources @contributing_sources.setter def contributing_sources(self, value): self._contributing_sources = ContributingSources(value) def add_contributing_source(self, value): self.contributing_sources.append(value) @property def references(self): return self._references @references.setter def references(self, value): self._references = [] if not value: return elif utils.is_sequence(value): for v in value: self.add_reference(v) else: self.add_reference(value) def add_reference(self, value): if not value: return # TODO: Check if it's a valid URI? self.references.append(value) @property def description(self): """A single description about the contents or purpose of this object. Default Value: ``None`` Note: If this object has more than one description set, this will return the description with the lowest ordinality value. Returns: An instance of :class:`.StructuredText` """ return next(iter(self.descriptions), None) @description.setter def description(self, value): self.descriptions = value @property def descriptions(self): """A :class:`.StructuredTextList` object, containing descriptions about the purpose or intent of this object. Iterating over this object will yield its contents sorted by their ``ordinality`` value. Default Value: Empty :class:`.StructuredTextList` object. Note: IF this is set to a value that is not an instance of :class:`.StructuredText`, an effort will ne made to convert it. If this is set to an iterable, any values contained that are not an instance of :class:`.StructuredText` will be be converted. Returns: An instance of :class:`.StructuredTextList` """ return self._description @descriptions.setter def descriptions(self, value): self._description = StructuredTextList(value) def add_description(self, description): """Adds a description to the ``descriptions`` collection. This is the same as calling "foo.descriptions.add(bar)". """ self.descriptions.add(description) @property def identity(self): return self._identity @identity.setter def identity(self, value): self._set_var(Identity, try_cast=False, identity=value) @property def time(self): return self._time @time.setter def time(self, value): self._set_var(cybox.common.Time, try_cast=False, time=value) @property def tools(self): return self._tools @tools.setter def tools(self, value): self._set_var(cybox.common.ToolInformationList, try_cast=False, tools=value) @property def roles(self): return self._roles @roles.setter def roles(self, value): self._roles = _Roles(value) def add_role(self, value): self.roles.append(value) def to_obj(self, return_obj=None, ns_info=None): super(InformationSource, self).to_obj( return_obj=return_obj, ns_info=ns_info ) if not return_obj: return_obj = self._binding_class() if self.descriptions: return_obj.Description = self.descriptions.to_obj(ns_info=ns_info) if self.references: return_obj.References = stix_common_binding.ReferencesType(Reference=self.references) if self.contributing_sources: return_obj.Contributing_Sources = self.contributing_sources.to_obj(ns_info=ns_info) if self.identity: return_obj.Identity = self.identity.to_obj(ns_info=ns_info) if self.time: return_obj.Time = self.time.to_obj(ns_info=ns_info) if self.tools: return_obj.Tools = self.tools.to_obj(ns_info=ns_info) if self.roles: return_obj.Role = self.roles.to_obj(ns_info=ns_info) return return_obj @classmethod def from_obj(cls, obj, return_obj=None): if not obj: return None if not return_obj: return_obj = cls() return_obj.description = StructuredTextList.from_obj(obj.Description) return_obj.identity = Identity.from_obj(obj.Identity) return_obj.contributing_sources = ContributingSources.from_obj(obj.Contributing_Sources) return_obj.roles = _Roles.from_obj(obj.Role) if obj.References: return_obj.references = obj.References.Reference if obj.Time: return_obj.time = cybox.common.Time.from_obj(obj.Time) if obj.Tools: return_obj.tools = cybox.common.ToolInformationList.from_obj(obj.Tools) return return_obj @classmethod def from_dict(cls, dict_repr, return_obj=None): # To resolve circular dependency # TODO: Improve how this extension is handled. if not dict_repr: return None if not return_obj: return_obj = cls() get = dict_repr.get return_obj.description = StructuredTextList.from_dict(get('description')) return_obj.references = get('references') return_obj.contributing_sources = ContributingSources.from_dict(get('contributing_sources')) return_obj.identity = Identity.from_dict(get('identity')) return_obj.time = cybox.common.Time.from_dict(get('time')) return_obj.tools = cybox.common.ToolInformationList.from_list(get('tools')) return_obj.roles = _Roles.from_dict(get('roles')) return return_obj def to_dict(self): return super(InformationSource, self).to_dict() class ContributingSources(stix.EntityList): _namespace = "http://stix.mitre.org/common-1" _binding = stix_common_binding _binding_class = stix_common_binding.ContributingSourcesType _binding_var = "Source" _contained_type = InformationSource _inner_name = "sources" # NOT AN ACTUAL STIX TYPE! class _Roles(stix.TypedList): _contained_type = VocabString def _fix_value(self, value): return vocabs.InformationSourceRole(value)
{ "content_hash": "272d02715fadb741089557556ba0455c", "timestamp": "", "source": "github", "line_count": 240, "max_line_length": 123, "avg_line_length": 30.6125, "alnum_prop": 0.6353613719885668, "repo_name": "chriskiehl/python-stix", "id": "44d9dfef8aec5c4cfcc74cf89c1a76841c8d2817", "size": "7452", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "stix/common/information_source.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Python", "bytes": "1610823" } ], "symlink_target": "" }
from arangodb.api import Database, Collection from arangodb.query.advanced import Query from arangodb.tests.base import ExtendedTestCase class AqlQueryTestCase(ExtendedTestCase): def setUp(self): self.database_name = 'testcase_aqlquery_123' self.db = Database.create(name=self.database_name) self.test_1_col = self.db.create_collection('foo_1') self.test_2_col = self.db.create_collection('foo_2') self.col1_doc1 = self.test_1_col.create_document() self.col1_doc1.little_number = 33 self.col1_doc1.loved = False self.col1_doc1.small_text = "lll aa" self.col1_doc1.save() self.col1_doc2 = self.test_1_col.create_document() self.col1_doc2.little_number = 1 self.col1_doc2.loved = False self.col1_doc2.small_text = "aaa aa" self.col1_doc2.save() self.col1_doc3 = self.test_1_col.create_document() self.col1_doc3.little_number = 3 self.col1_doc3.loved = True self.col1_doc3.small_text = "xxx tt" self.col1_doc3.save() self.col2_doc1 = self.test_2_col.create_document() self.col2_doc1.little_number = 33 self.col2_doc1.loved = False self.col2_doc1.save() self.col2_doc2 = self.test_2_col.create_document() self.col2_doc2.little_number = 11 self.col2_doc2.loved = True self.col2_doc2.save() def tearDown(self): # They need to be deleted Collection.remove(name=self.test_1_col.name) Collection.remove(name=self.test_2_col.name) Database.remove(name=self.database_name) def test_get_all_doc_from_1_collection(self): q = Query() q.append_collection(self.test_2_col.name) docs = q.execute() self.assertEqual(len(docs), 2) def test_filter_number_field_in_document(self): q = Query() q.append_collection(self.test_1_col.name) q.filter(little_number=self.col1_doc3.little_number) docs = q.execute() self.assertEqual(len(docs), 1) doc = docs[0] self.assertDocumentsEqual(doc, self.col1_doc3) def test_filter_string_field_in_document(self): q = Query() q.append_collection(self.test_1_col.name) q.filter(small_text=self.col1_doc2.small_text) docs = q.execute() self.assertEqual(len(docs), 1) doc = docs[0] self.assertDocumentsEqual(doc, self.col1_doc2) def test_filter_of_multiple_collections(self): q = Query() q.append_collection(self.test_1_col.name) q.append_collection(self.test_2_col.name) dynamic_filter_dict = {} col_1_filter_name = "%s__%s" % (self.test_1_col.name, "little_number") col_2_filter_name = "%s__%s" % (self.test_2_col.name, "little_number") dynamic_filter_dict[col_1_filter_name] = 33 dynamic_filter_dict[col_2_filter_name] = 33 q.filter(bit_operator=Query.OR_BIT_OPERATOR, **dynamic_filter_dict) docs = q.execute() self.assertEqual(len(docs), 2) doc1 = docs[0] doc2 = docs[1] self.assertNotEqual(doc1.id, doc2.id) self.assertEqual(doc1.little_number, 33) self.assertEqual(doc2.little_number, 33) def test_exclude_document_from_list(self): q = Query() q.append_collection(self.test_1_col.name) q.exclude(loved=False) docs = q.execute() self.assertEqual(len(docs), 1) doc1 = docs[0] self.assertDocumentsEqual(doc1, self.col1_doc3) def test_sorting_asc_document_list(self): q = Query() q.append_collection(self.test_1_col.name) q.order_by('little_number') docs = q.execute() self.assertEqual(len(docs), 3) doc1 = docs[0] doc2 = docs[1] doc3 = docs[2] self.assertDocumentsEqual(doc1, self.col1_doc2) self.assertDocumentsEqual(doc2, self.col1_doc3) self.assertDocumentsEqual(doc3, self.col1_doc1) def test_limit_simple_list(self): q = Query() q.append_collection(self.test_1_col.name) q.order_by('little_number') q.limit(count=1) docs = q.execute() self.assertEqual(len(docs), 1) doc1 = docs[0] self.assertDocumentsEqual(doc1, self.col1_doc2) def test_limit_with_start_simple_list(self): q = Query() q.append_collection(self.test_1_col.name) q.order_by('little_number') q.limit(count=1, start=1) docs = q.execute() self.assertEqual(len(docs), 1) doc1 = docs[0] self.assertDocumentsEqual(doc1, self.col1_doc3) def test_greater_filtering(self): q = Query() q.append_collection(self.test_1_col.name) q.filter(little_number__gt=20) docs = q.execute() self.assertEqual(len(docs), 1) self.assertDocumentsEqual(self.col1_doc1, docs[0]) def test_greater_equals_filtering(self): q = Query() q.append_collection(self.test_1_col.name) q.filter(little_number__gte=3) docs = q.execute() self.assertEqual(len(docs), 2) def test_lower_filtering(self): q = Query() q.append_collection(self.test_1_col.name) q.filter(little_number__lt=3) docs = q.execute() self.assertEqual(len(docs), 1) self.assertDocumentsEqual(self.col1_doc2, docs[0]) def test_lower_equals_filtering(self): q = Query() q.append_collection(self.test_1_col.name) q.filter(little_number__lte=3) docs = q.execute() self.assertEqual(len(docs), 2) def test_contains_filtering(self): q = Query() q.append_collection(self.test_1_col.name) q.filter(small_text__contains='ll') docs = q.execute() self.assertEqual(len(docs), 1) doc1 = docs[0] self.assertDocumentsEqual(doc1, self.col1_doc1) def test_icontains_filtering(self): q = Query() q.append_collection(self.test_1_col.name) q.filter(small_text__icontains='LL') docs = q.execute() self.assertEqual(len(docs), 1) doc1 = docs[0] self.assertDocumentsEqual(doc1, self.col1_doc1)
{ "content_hash": "8d77d1790e08c67873b87248077beced", "timestamp": "", "source": "github", "line_count": 225, "max_line_length": 78, "avg_line_length": 27.955555555555556, "alnum_prop": 0.6039745627980923, "repo_name": "saeschdivara/ArangoPy", "id": "e79b9ab0ae0b2f1528a300b9d637d31302d1bd08", "size": "6290", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "arangodb/tests/query/aql.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "221757" }, { "name": "Shell", "bytes": "1682" } ], "symlink_target": "" }
import mock from neutron import context from neutron.db import api as db from neutron.openstack.common import uuidutils from neutron.plugins.nec.common import config from neutron.plugins.nec.db import api as ndb from neutron.plugins.nec.db import models as nmodels # noqa from neutron.plugins.nec import ofc_manager from neutron.tests import base class FakePortInfo(object): def __init__(self, id, datapath_id, port_no=0, vlan_id=65535, mac='00:11:22:33:44:55'): self.data = {'id': id, 'datapath_id': datapath_id, 'port_no': port_no, 'vlan_id': vlan_id, 'mac': mac} def __getattr__(self, name): if name in self.fields: return self[name] else: raise AttributeError(name) class OFCManagerTestBase(base.BaseTestCase): """Class conisting of OFCManager unit tests.""" def setUp(self): super(OFCManagerTestBase, self).setUp() db.configure_db() driver = "neutron.tests.unit.nec.stub_ofc_driver.StubOFCDriver" config.CONF.set_override('driver', driver, 'OFC') self.addCleanup(ndb.clear_db) self.plugin = mock.Mock() self.plugin.get_packet_filters_for_port.return_value = None self.ofc = ofc_manager.OFCManager(self.plugin) # NOTE: enable_autocheck() is a feature of StubOFCDriver self.ofc.driver.enable_autocheck() self.ctx = context.get_admin_context() self.addCleanup(mock.patch.stopall) def get_random_params(self): """create random parameters for portinfo test.""" tenant = uuidutils.generate_uuid() network = uuidutils.generate_uuid() port = uuidutils.generate_uuid() _filter = uuidutils.generate_uuid() none = uuidutils.generate_uuid() return tenant, network, port, _filter, none class OFCManagerTest(OFCManagerTestBase): def testa_create_ofc_tenant(self): """test create ofc_tenant.""" t, n, p, f, none = self.get_random_params() self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t)) self.ofc.create_ofc_tenant(self.ctx, t) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t)) tenant = ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t) self.assertEqual(tenant.ofc_id, "ofc-" + t[:-4]) def testb_exists_ofc_tenant(self): """test exists_ofc_tenant.""" t, n, p, f, none = self.get_random_params() self.assertFalse(self.ofc.exists_ofc_tenant(self.ctx, t)) self.ofc.create_ofc_tenant(self.ctx, t) self.assertTrue(self.ofc.exists_ofc_tenant(self.ctx, t)) def testc_delete_ofc_tenant(self): """test delete ofc_tenant.""" t, n, p, f, none = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t)) self.ofc.delete_ofc_tenant(self.ctx, t) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_tenant', t)) def testd_create_ofc_network(self): """test create ofc_network.""" t, n, p, f, none = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)) self.ofc.create_ofc_network(self.ctx, t, n) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)) network = ndb.get_ofc_item(self.ctx.session, 'ofc_network', n) self.assertEqual(network.ofc_id, "ofc-" + n[:-4]) def teste_exists_ofc_network(self): """test exists_ofc_network.""" t, n, p, f, none = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.assertFalse(self.ofc.exists_ofc_network(self.ctx, n)) self.ofc.create_ofc_network(self.ctx, t, n) self.assertTrue(self.ofc.exists_ofc_network(self.ctx, n)) def testf_delete_ofc_network(self): """test delete ofc_network.""" t, n, p, f, none = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.ofc.create_ofc_network(self.ctx, t, n) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)) self.ofc.delete_ofc_network(self.ctx, n, {'tenant_id': t}) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)) def _mock_get_portinfo(self, port_id, datapath_id='0xabc', port_no=1): get_portinfo = mock.patch.object(ndb, 'get_portinfo').start() fake_portinfo = FakePortInfo(id=port_id, datapath_id=datapath_id, port_no=port_no) get_portinfo.return_value = fake_portinfo return get_portinfo def _test_create_ofc_port(self, with_filter=False): t, n, p, f, none = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.ofc.create_ofc_network(self.ctx, t, n) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p)) get_portinfo = self._mock_get_portinfo(p) port = {'tenant_id': t, 'network_id': n} if with_filter: _filters = ['filter1', 'filter2'] self.plugin.get_packet_filters_for_port.return_value = _filters self.ofc.create_ofc_port(self.ctx, p, port) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p)) port = ndb.get_ofc_item(self.ctx.session, 'ofc_port', p) self.assertEqual(port.ofc_id, "ofc-" + p[:-4]) get_portinfo.assert_called_once_with(mock.ANY, p) portval = self.ofc.driver.ofc_port_dict[port.ofc_id] if with_filter: self.assertEqual(_filters, portval['filters']) else: self.assertFalse('filters' in portval) def testg_create_ofc_port(self): """test create ofc_port.""" self._test_create_ofc_port(with_filter=False) def testg_create_ofc_port_with_filters(self): """test create ofc_port.""" self._test_create_ofc_port(with_filter=True) def testh_exists_ofc_port(self): """test exists_ofc_port.""" t, n, p, f, none = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.ofc.create_ofc_network(self.ctx, t, n) self.assertFalse(self.ofc.exists_ofc_port(self.ctx, p)) get_portinfo = self._mock_get_portinfo(p) port = {'tenant_id': t, 'network_id': n} self.ofc.create_ofc_port(self.ctx, p, port) self.assertTrue(self.ofc.exists_ofc_port(self.ctx, p)) get_portinfo.assert_called_once_with(mock.ANY, p) def testi_delete_ofc_port(self): """test delete ofc_port.""" t, n, p, f, none = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.ofc.create_ofc_network(self.ctx, t, n) get_portinfo = self._mock_get_portinfo(p) port = {'tenant_id': t, 'network_id': n} self.ofc.create_ofc_port(self.ctx, p, port) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p)) self.ofc.delete_ofc_port(self.ctx, p, port) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p)) get_portinfo.assert_called_once_with(mock.ANY, p) class OFCManagerFilterTest(OFCManagerTestBase): def testj_create_ofc_packet_filter(self): """test create ofc_filter.""" t, n, p, f, none = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.ofc.create_ofc_network(self.ctx, t, n) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_packet_filter', f)) pf = {'tenant_id': t, 'network_id': n} self.ofc.create_ofc_packet_filter(self.ctx, f, pf) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_packet_filter', f)) _filter = ndb.get_ofc_item(self.ctx.session, 'ofc_packet_filter', f) self.assertEqual(_filter.ofc_id, "ofc-" + f[:-4]) def testk_exists_ofc_packet_filter(self): """test exists_ofc_packet_filter.""" t, n, p, f, none = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.ofc.create_ofc_network(self.ctx, t, n) self.assertFalse(self.ofc.exists_ofc_packet_filter(self.ctx, f)) pf = {'tenant_id': t, 'network_id': n} self.ofc.create_ofc_packet_filter(self.ctx, f, pf) self.assertTrue(self.ofc.exists_ofc_packet_filter(self.ctx, f)) def testl_delete_ofc_packet_filter(self): """test delete ofc_filter.""" t, n, p, f, none = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.ofc.create_ofc_network(self.ctx, t, n) pf = {'tenant_id': t, 'network_id': n} self.ofc.create_ofc_packet_filter(self.ctx, f, pf) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_packet_filter', f)) self.ofc.delete_ofc_packet_filter(self.ctx, f) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_packet_filter', f)) class OFCManagerRouterTest(OFCManagerTestBase): def get_random_params(self): tenant = uuidutils.generate_uuid() router = uuidutils.generate_uuid() network = uuidutils.generate_uuid() return (tenant, router, network) def test_create_ofc_router(self): """test create ofc_router""" t, r, _n = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)) self.ofc.create_ofc_router(self.ctx, t, r, 'test router') self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)) router = ndb.get_ofc_item(self.ctx.session, 'ofc_router', r) self.assertEqual(router.ofc_id, "ofc-" + r[:-4]) def test_exists_ofc_router(self): """test exists_ofc_router""" t, r, _n = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.assertFalse(self.ofc.exists_ofc_router(self.ctx, r)) self.ofc.create_ofc_router(self.ctx, t, r) self.assertTrue(self.ofc.exists_ofc_router(self.ctx, r)) def test_delete_ofc_router(self): """test delete ofc_router""" t, r, _n = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.ofc.create_ofc_router(self.ctx, t, r) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)) self.ofc.delete_ofc_router(self.ctx, r, {'tenant_id': t}) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', r)) def test_router_interface(self): t, r, n = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.ofc.create_ofc_router(self.ctx, t, r) self.ofc.create_ofc_network(self.ctx, t, n) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_network', n)) p = {'id': uuidutils.generate_uuid(), 'network_id': n, 'ip_address': '10.1.1.1', 'cidr': '10.1.0.0/20', 'mac_address': '11:22:33:44:55:66'} self.ofc.add_ofc_router_interface(self.ctx, r, p['id'], p) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p['id'])) self.ofc.delete_ofc_router_interface(self.ctx, r, p['id']) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_port', p['id'])) self.ofc.delete_ofc_router(self.ctx, r, {'tenant_id': t}) self.assertFalse(ndb.get_ofc_item(self.ctx.session, 'ofc_network', r)) def test_router_route(self): t, r, _n = self.get_random_params() self.ofc.create_ofc_tenant(self.ctx, t) self.ofc.create_ofc_router(self.ctx, t, r) self.assertTrue(ndb.get_ofc_item(self.ctx.session, 'ofc_router', r)) routes = [{'destination': '2.2.2.0/24', 'nexthop': '1.1.1.10'}] self.ofc.update_ofc_router_route(self.ctx, r, routes) self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 1) routes = [{'destination': '3.3.3.0/24', 'nexthop': '1.1.1.11'}, {'destination': '4.4.4.0/24', 'nexthop': '1.1.1.11'}] self.ofc.update_ofc_router_route(self.ctx, r, routes) self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 2) routes = [{'destination': '2.2.2.0/24', 'nexthop': '1.1.1.10'}] self.ofc.update_ofc_router_route(self.ctx, r, routes) self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 1) routes = [] self.ofc.update_ofc_router_route(self.ctx, r, routes) self.assertEqual(len(self.ofc.driver.ofc_router_route_dict), 0)
{ "content_hash": "79277b858dd88c1c875c1a0904aaf2b3", "timestamp": "", "source": "github", "line_count": 281, "max_line_length": 78, "avg_line_length": 45.95373665480427, "alnum_prop": 0.6074498567335244, "repo_name": "beagles/neutron_hacking", "id": "a163e7382501e6bdf5062f7d345154d15c9fbaa3", "size": "13611", "binary": false, "copies": "3", "ref": "refs/heads/neutron_oslo_messaging", "path": "neutron/tests/unit/nec/test_ofc_manager.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "37307" }, { "name": "JavaScript", "bytes": "67930" }, { "name": "Python", "bytes": "8125263" }, { "name": "Shell", "bytes": "8920" }, { "name": "XSLT", "bytes": "50907" } ], "symlink_target": "" }
import cv2 import numpy as np import random import os from argparse import ArgumentParser ALPHA = 5 def build_parser(): parser = ArgumentParser() parser.add_argument('--original', dest='ori', required=True) parser.add_argument('--image', dest='img', required=True) parser.add_argument('--result', dest='res', required=True) parser.add_argument('--alpha', dest='alpha', default=ALPHA) return parser def main(): parser = build_parser() options = parser.parse_args() ori = options.ori img = options.img res = options.res alpha = float(options.alpha) if not os.path.isfile(ori): parser.error("original image %s does not exist." % ori) if not os.path.isfile(img): parser.error("image %s does not exist." % img) decode(ori, img, res, alpha) def decode(ori_path, img_path, res_path, alpha): ori = cv2.imread(ori_path) img = cv2.imread(img_path) ori_f = np.fft.fft2(ori) img_f = np.fft.fft2(img) height, width = ori.shape[0], ori.shape[1] watermark = (ori_f - img_f) / alpha watermark = np.real(watermark) res = np.zeros(watermark.shape) random.seed(height + width) x = range(height / 2) y = range(width) random.shuffle(x) random.shuffle(y) for i in range(height / 2): for j in range(width): res[x[i]][y[j]] = watermark[i][j] cv2.imwrite(res_path, res, [int(cv2.IMWRITE_JPEG_QUALITY), 100]) if __name__ == '__main__': main()
{ "content_hash": "28f56c1e54332f32e64e80eacdd55a78", "timestamp": "", "source": "github", "line_count": 53, "max_line_length": 68, "avg_line_length": 29.0188679245283, "alnum_prop": 0.6027308192457738, "repo_name": "linyacool/blind-watermark", "id": "03e8a8d78a5491e1a8855d99ade93661f6947500", "size": "1554", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "decode.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "3252" } ], "symlink_target": "" }
"""gcloud dns record-sets export command.""" from googlecloudsdk.core.util import files from googlecloudapis.apitools.base import py as apitools from googlecloudsdk.calliope import base from googlecloudsdk.calliope import exceptions from googlecloudsdk.core import log from googlecloudsdk.core import properties from googlecloudsdk.dns.lib import export_util from googlecloudsdk.dns.lib import util class Export(base.Command): """Export your record-sets into a file. This command exports the record-sets contained within the specified managed-zone into a file. """ detailed_help = { 'DESCRIPTION': '{description}', 'EXAMPLES': """\ To export record-sets into a yaml file, run: $ {command} YAML_RECORDS_FILE -z MANAGED_ZONE To import record-sets into a zone file, run: $ {command} ZONE_FILE --zone-file-format -z MANAGED_ZONE """, } @staticmethod def Args(parser): parser.add_argument('records_file', help='File to which record-sets should be exported.') parser.add_argument( '--zone-file-format', required=False, action='store_true', help='Indicates that records-file should be in the zone file format.') @util.HandleHttpError def Run(self, args): dns = self.context['dns_client'] messages = self.context['dns_messages'] resources = self.context['dns_resources'] project_id = properties.VALUES.core.project.Get(required=True) # Get the managed-zone. zone_ref = resources.Parse(args.zone, collection='dns.managedZones') try: zone = dns.managedZones.Get(zone_ref.Request()) except apitools.HttpError as error: raise exceptions.HttpException(util.GetErrorMessage(error)) # Get all the record-sets. record_sets = [] for record_set in apitools.YieldFromList( dns.resourceRecordSets, messages.DnsResourceRecordSetsListRequest(project=project_id, managedZone=zone_ref.Name()), field='rrsets'): record_sets.append(record_set) # Export the record-sets. try: with files.Context(open(args.records_file, 'w')) as export_file: if args.zone_file_format: export_util.WriteToZoneFile(export_file, record_sets, zone.dnsName) else: export_util.WriteToYamlFile(export_file, record_sets) except Exception as exp: msg = 'unable to export record-sets to file [{0}]: {1}'.format( args.records_file, exp) raise exceptions.ToolException(msg) log.status.Print('Exported record-sets to [{0}].'.format(args.records_file))
{ "content_hash": "407ccedc41116e62b07954ff4c2d8228", "timestamp": "", "source": "github", "line_count": 80, "max_line_length": 80, "avg_line_length": 33.45, "alnum_prop": 0.6659192825112108, "repo_name": "wemanuel/smry", "id": "bc9acf6955ea827b30274d5be8caffee7d3c29d9", "size": "2726", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "server-auth/ls/google-cloud-sdk/lib/googlecloudsdk/dns/commands/record_sets/export.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "3990" }, { "name": "Groff", "bytes": "1221174" }, { "name": "HTML", "bytes": "1873470" }, { "name": "JavaScript", "bytes": "2192" }, { "name": "Makefile", "bytes": "6032" }, { "name": "PHP", "bytes": "16660" }, { "name": "Python", "bytes": "47139164" }, { "name": "Shell", "bytes": "37102" }, { "name": "SourcePawn", "bytes": "1160" } ], "symlink_target": "" }
"""Commands for feedback thread and message operations.""" from __future__ import annotations import datetime import itertools from core import feconf from core.domain import email_manager from core.domain import feedback_domain from core.domain import rights_manager from core.domain import subscription_services from core.domain import taskqueue_services from core.domain import user_services from core.platform import models ( email_models, expl_models, feedback_models, question_models, skill_models, suggestion_models, topic_models ) = models.Registry.import_models([ models.NAMES.email, models.NAMES.exploration, models.NAMES.feedback, models.NAMES.question, models.NAMES.skill, models.NAMES.suggestion, models.NAMES.topic ]) datastore_services = models.Registry.import_datastore_services() transaction_services = models.Registry.import_transaction_services() DEFAULT_SUGGESTION_THREAD_SUBJECT = 'Suggestion from a learner' DEFAULT_SUGGESTION_THREAD_INITIAL_MESSAGE = '' TARGET_TYPE_TO_TARGET_MODEL = { feconf.ENTITY_TYPE_EXPLORATION: ( expl_models.ExplorationModel), feconf.ENTITY_TYPE_QUESTION: ( question_models.QuestionModel), feconf.ENTITY_TYPE_SKILL: ( skill_models.SkillModel), feconf.ENTITY_TYPE_TOPIC: ( topic_models.TopicModel) } def get_exp_id_from_thread_id(thread_id): """Returns the exploration_id part of the thread_id. TODO(#8370): Once feedback threads are generalized, this function needs to be updated to get the id from any general entity, not just explorations. At the moment, it still assumes that the thread id is associated to an exploration. Args: thread_id: str. The id of the thread. Returns: str. The exploration id part of the thread_id. """ return thread_id.split('.')[1] def _create_models_for_thread_and_first_message( entity_type, entity_id, original_author_id, subject, text, has_suggestion): """Creates a feedback thread and its first message. Args: entity_type: str. The type of entity the feedback thread is linked to. entity_id: str. The id of the entity. original_author_id: str. The author id who starts this thread. subject: str. The subject of this thread. text: str. The text of the feedback message. This may be ''. has_suggestion: bool. Whether this thread has a related learner suggestion. Returns: str. The id of the new thread. """ thread_id = ( feedback_models.GeneralFeedbackThreadModel.generate_new_thread_id( entity_type, entity_id)) thread = feedback_models.GeneralFeedbackThreadModel.create(thread_id) thread.entity_type = entity_type thread.entity_id = entity_id thread.original_author_id = original_author_id # The feedback analytics jobs rely on the thread status being set to 'open' # when a new thread is created. If this is changed, changes need to be made # there as well. thread.status = feedback_models.STATUS_CHOICES_OPEN thread.subject = subject thread.has_suggestion = has_suggestion thread.message_count = 0 thread.update_timestamps() thread.put() create_message( thread_id, original_author_id, feedback_models.STATUS_CHOICES_OPEN, subject, text) return thread_id def create_thread( entity_type, entity_id, original_author_id, subject, text, has_suggestion=False): """Creates a thread and its first message. Args: entity_type: str. The type of entity the feedback thread is linked to. entity_id: str. The id of the entity. original_author_id: str. The author id who starts this thread. subject: str. The subject of this thread. text: str. The text of the feedback message. This may be ''. has_suggestion: bool. Whether the thread has a suggestion attached to it. Returns: str. The id of the new thread. """ return _create_models_for_thread_and_first_message( entity_type, entity_id, original_author_id, subject, text, has_suggestion) def create_message( thread_id, author_id, updated_status, updated_subject, text, received_via_email=False, should_send_email=True): """Creates a new message for the thread and subscribes the author to the thread. Args: thread_id: str. The thread id the message belongs to. author_id: str. The author id who creates this message. updated_status: str|None. One of STATUS_CHOICES. New thread status. Must be supplied if this is the first message of a thread. For the rest of the thread, should exist only when the status changes. updated_subject: str|None. New thread subject. Must be supplied if this is the first message of a thread. For the rest of the thread, should exist only when the subject changes. text: str. The text of the feedback message. This may be ''. received_via_email: bool. Whether new message is received via email or web. should_send_email: bool. Whether the new message(s) need to be added to the email buffer. Returns: FeedbackMessage. The domain object representing the new message added in the datastore. Raises: Exception. GeneralFeedbackThreadModel entity not found. """ return create_messages( [thread_id], author_id, updated_status, updated_subject, text, received_via_email=received_via_email, should_send_email=should_send_email)[0] def create_messages( thread_ids, author_id, updated_status, updated_subject, text, received_via_email=False, should_send_email=True): """Creates a new message for each of the distinct threads in thread_ids and for each message, subscribes the author to the thread. Args: thread_ids: list(str). The thread ids to append the messages to. author_id: str. The id of the author who creates the messages. updated_status: str|None. One of STATUS_CHOICES. Applied to each thread. Must be supplied if this is the first message of the threads. Otherwise, this property should only exist when the status changes. updated_subject: str|None. New thread subject. Applied to each thread. Must be supplied if this is the first message of the threads. Otherwise, this property should only exist when the subject changes. text: str. The text of the feedback message. This may be ''. received_via_email: bool. Whether the new message(s) are received via email or web. should_send_email: bool. Whether the new message(s) need to be added to the email buffer. Returns: list(FeedbackMessage). The domain objects representing the new messages added in the datastore. Raises: Exception. Thread_ids must be distinct. Exception. One or more GeneralFeedbackThreadModel entities not found. """ from core.domain import event_services # Check that the thread_ids are distinct. if len(set(thread_ids)) != len(thread_ids): raise Exception( 'Thread ids must be distinct when calling create_messsages.') # Get the threads at the outset, in order to check that there are models # corresponding to each of the thread_ids. thread_models = feedback_models.GeneralFeedbackThreadModel.get_multi( thread_ids) thread_ids_that_do_not_have_models = [] for index, thread_model in enumerate(thread_models): if thread_model is None: thread_ids_that_do_not_have_models.append(thread_ids[index]) if len(thread_ids_that_do_not_have_models) > 0: multiple_thread_models_are_missing = ( len(thread_ids_that_do_not_have_models) > 1 ) raise Exception( 'Thread%s belonging to the GeneralFeedbackThreadModel class with ' 'id%s:[%s] %s not found.' % ( 's' if multiple_thread_models_are_missing else '', 's' if multiple_thread_models_are_missing else '', ' '.join(thread_ids_that_do_not_have_models), 'were' if multiple_thread_models_are_missing else 'was' ) ) # Get the corresponding message ids, which are required for message # creation. message_ids = ( feedback_models.GeneralFeedbackMessageModel.get_message_counts( thread_ids) ) # Create a list of FullyQualifiedMessageIdentifier objects so that each # (thread_id, message_id) pair is kept together. message_identifiers = [] for thread_id, message_id in zip(thread_ids, message_ids): message_identifiers.append( feedback_domain.FullyQualifiedMessageIdentifier( thread_id, message_id)) # Create the GeneralFeedbackMessageModel instances. message_models = feedback_models.GeneralFeedbackMessageModel.create_multi( message_identifiers) # Update the message instances. for index, message_model in enumerate(message_models): message_model.thread_id = thread_ids[index] message_model.message_id = message_ids[index] message_model.author_id = author_id message_model.text = text message_model.received_via_email = received_via_email # Get the corresponding thread in storage. thread_model = thread_models[index] if updated_status: message_model.updated_status = updated_status if message_model.message_id == 0: # New thread. if thread_model.entity_type == feconf.ENTITY_TYPE_EXPLORATION: event_services.FeedbackThreadCreatedEventHandler.record( thread_model.entity_id) else: # Thread status changed. if thread_model.entity_type == feconf.ENTITY_TYPE_EXPLORATION: ( event_services .FeedbackThreadStatusChangedEventHandler .record( thread_model.entity_id, thread_model.status, updated_status) ) if updated_subject: message_model.updated_subject = updated_subject feedback_models.GeneralFeedbackMessageModel.update_timestamps_multi( message_models) feedback_models.GeneralFeedbackMessageModel.put_multi(message_models) # Update the message data cache of the threads. for thread_model in thread_models: thread_model.message_count += 1 if text: thread_model.last_nonempty_message_text = text thread_model.last_nonempty_message_author_id = author_id # We do a put() even if the status and subject are not updated, so that the # last_updated time of the threads reflects the last time a message was # added to it. old_statuses = [thread_model.status for thread_model in thread_models] new_statuses = old_statuses if updated_status or updated_subject: new_statuses = [] for index, thread_model in enumerate(thread_models): # Can't be the first thread. if message_ids[index] != 0: if updated_status and (updated_status != thread_model.status): thread_model.status = updated_status if updated_subject and ( updated_subject != thread_model.subject): thread_model.subject = updated_subject new_statuses.append(thread_model.status) feedback_models.GeneralFeedbackThreadModel.update_timestamps_multi( thread_models) feedback_models.GeneralFeedbackThreadModel.put_multi(thread_models) # For each thread, we do a put on the suggestion linked (if it exists) to # the thread, so that the last_updated time changes to show that there is # activity in the thread. thread_ids_that_have_linked_suggestions = [] for thread_model in thread_models: if thread_model.has_suggestion: thread_ids_that_have_linked_suggestions.append(thread_model.id) general_suggestion_models = ( suggestion_models.GeneralSuggestionModel.get_multi( thread_ids_that_have_linked_suggestions) ) suggestion_models_to_update = [] for suggestion_model in general_suggestion_models: # As the thread is created before the suggestion, for the first message # we need not update the suggestion. if suggestion_model: suggestion_models_to_update.append(suggestion_model) suggestion_models.GeneralSuggestionModel.update_timestamps_multi( suggestion_models_to_update) suggestion_models.GeneralSuggestionModel.put_multi( suggestion_models_to_update) if (feconf.CAN_SEND_EMAILS and ( feconf.CAN_SEND_FEEDBACK_MESSAGE_EMAILS and user_services.is_user_registered(author_id)) and # TODO(#12079): Figure out a better way to avoid sending feedback # thread emails for contributor dashboard suggestions. (len(text) > 0 or old_statuses[index] != new_statuses[index]) and should_send_email): for index, thread_model in enumerate(thread_models): _add_message_to_email_buffer( author_id, thread_model.id, message_ids[index], len(text), old_statuses[index], new_statuses[index]) if author_id: subscription_services.subscribe_to_threads(author_id, thread_ids) add_message_ids_to_read_by_list(author_id, message_identifiers) # Convert the GeneralFeedbackMessageModels into a list of FeedbackMessage # domain objects. feedback_messages = [ _get_message_from_model(message_model) for message_model in message_models ] return feedback_messages def _get_threads_user_info_keys(thread_ids): """Gets the feedback thread user model keys belonging to thread. Args: thread_ids: list(str). The ids of the threads. Returns: list(datastore_services.Key). The keys of the feedback thread user model. """ if thread_ids: return feedback_models.GeneralFeedbackThreadUserModel.query( feedback_models.GeneralFeedbackThreadUserModel.thread_id.IN( thread_ids) ).fetch(keys_only=True) else: return [] def delete_threads_for_multiple_entities(entity_type, entity_ids): """Deletes a thread, its messages and thread user models. When the thread belongs to exploration deletes feedback analytics. When the thread has a suggestion deletes the suggestion. Args: entity_type: str. The type of entity the feedback thread is linked to. entity_ids: list(str). The ids of the entities. """ threads = [] for entity_id in entity_ids: threads.extend(get_threads(entity_type, entity_id)) model_keys = [] for thread in threads: for message in get_messages(thread.id): model_keys.append( datastore_services.Key( feedback_models.GeneralFeedbackMessageModel, message.id) ) model_keys.append( datastore_services.Key( feedback_models.GeneralFeedbackThreadModel, thread.id) ) if thread.has_suggestion: model_keys.append( datastore_services.Key( suggestion_models.GeneralSuggestionModel, thread.id) ) model_keys += _get_threads_user_info_keys([thread.id for thread in threads]) if entity_type == feconf.ENTITY_TYPE_EXPLORATION: for entity_id in entity_ids: model_keys.append( datastore_services.Key( feedback_models.FeedbackAnalyticsModel, entity_id) ) datastore_services.delete_multi(model_keys) def update_messages_read_by_the_user(user_id, thread_id, message_ids): """Replaces the list of message ids read by the message ids given to the function. Args: user_id: str. The id of the user reading the messages. thread_id: str. The id of the thread. message_ids: list(int). The ids of the messages in the thread read by the user. """ feedback_thread_user_model = ( feedback_models.GeneralFeedbackThreadUserModel.get( user_id, thread_id) or feedback_models.GeneralFeedbackThreadUserModel.create( user_id, thread_id)) feedback_thread_user_model.message_ids_read_by_user = message_ids feedback_thread_user_model.update_timestamps() feedback_thread_user_model.put() def add_message_ids_to_read_by_list(user_id, message_identifiers): """Adds the given message IDs to the list of message IDs read by the user. Args: user_id: str. The id of the user reading the messages. message_identifiers: list(FullyQualifiedMessageIdentifier). Each message_identifier contains a thread_id and the corresponding message_id that will be added to the thread's list of message IDs read by the user. """ # Extract the thread_ids and message_ids from the # FullyQualifiedMessageIdentifier objects. thread_ids = [ message_identifier.thread_id for message_identifier in message_identifiers ] message_ids = [ message_identifier.message_id for message_identifier in message_identifiers ] # Get all of the GeneralFeedbackThreadUserModels that already exist. These # models will be None if a GeneralFeedbackThreadUserModel does not exist # for the user_id and thread_id yet. current_feedback_thread_user_models_with_possible_nones = ( feedback_models.GeneralFeedbackThreadUserModel.get_multi( user_id, thread_ids)) # Keep track of which thread_ids do not have feedback thread user models # yet. thread_ids_missing_user_models = [] # Keep track of the message_ids corresponding to the thread_ids that do not # have feedback thread user models yet. message_ids_for_missing_user_models = [] # Keep track of the feedback thread user models that already exist and # aren't None. This list will be used when we update the datastore. current_feedback_thread_user_models = [] for index, feedback_thread_user_model in enumerate( current_feedback_thread_user_models_with_possible_nones): if feedback_thread_user_model is None: thread_ids_missing_user_models.append(thread_ids[index]) message_ids_for_missing_user_models.append(message_ids[index]) else: current_feedback_thread_user_models.append( feedback_thread_user_model) # Add the message_id to the messages read by the user. feedback_thread_user_model.message_ids_read_by_user.append( message_ids[index]) # Create the new GeneralFeedbackThreadUserModels for each of the thread_ids # that do not have a model yet. new_feedback_thread_user_models = [] if thread_ids_missing_user_models: new_feedback_thread_user_models = ( feedback_models.GeneralFeedbackThreadUserModel.create_multi( user_id, thread_ids_missing_user_models) ) # For each of the new models, append the message_id to the # message_ids_read_by_user property. for index, feedback_thread_user_model in enumerate( new_feedback_thread_user_models): feedback_thread_user_model.message_ids_read_by_user.append( message_ids_for_missing_user_models[index] ) # Update both the new and previously existing models in the datastore. current_feedback_thread_user_models.extend( new_feedback_thread_user_models) feedback_models.GeneralFeedbackThreadUserModel.update_timestamps_multi( current_feedback_thread_user_models) feedback_models.GeneralFeedbackThreadUserModel.put_multi( current_feedback_thread_user_models) def _get_message_from_model(message_model): """Converts the FeedbackMessageModel to a FeedbackMessage. Args: message_model: FeedbackMessageModel. The FeedbackMessageModel to be converted. Returns: FeedbackMessage. The resulting FeedbackMessage domain object. """ return feedback_domain.FeedbackMessage( message_model.id, message_model.thread_id, message_model.message_id, message_model.author_id, message_model.updated_status, message_model.updated_subject, message_model.text, message_model.created_on, message_model.last_updated, message_model.received_via_email) def get_messages(thread_id): """Fetches all messages of the given thread. Args: thread_id: str. The id of the thread. Returns: list(FeedbackMessage). Contains all the messages in the thread. """ return [ _get_message_from_model(model) for model in feedback_models.GeneralFeedbackMessageModel.get_messages( thread_id) ] def get_message(thread_id, message_id): """Fetches the message indexed by thread_id and message_id. Args: thread_id: str. The id of the thread. message_id: int. The id of the message, relative to the thread. Returns: FeedbackMessage. The fetched message. """ return _get_message_from_model( feedback_models.GeneralFeedbackMessageModel.get(thread_id, message_id)) def get_next_page_of_all_feedback_messages( page_size=feconf.FEEDBACK_TAB_PAGE_SIZE, urlsafe_start_cursor=None): """Fetches a single page from the list of all feedback messages that have been posted to any exploration on the site. Args: page_size: int. The number of feedback messages to display per page. Defaults to feconf.FEEDBACK_TAB_PAGE_SIZE. urlsafe_start_cursor: str or None. The cursor which represents the current position to begin the fetch from. If None, the fetch is started from the beginning of the list of all messages. Returns: tuple(messages_on_page, next_urlsafe_start_cursor, more). Where: messages_on_page: list(FeedbackMessage). Contains the slice of messages that are part of the page pointed to by the given start cursor. next_urlsafe_start_cursor: str. The cursor to the next page. more: bool. Whether there are more messages available to fetch after this batch. """ models_on_page, next_urlsafe_start_cursor, more = ( feedback_models.GeneralFeedbackMessageModel.get_all_messages( page_size, urlsafe_start_cursor)) messages_on_page = [_get_message_from_model(m) for m in models_on_page] return (messages_on_page, next_urlsafe_start_cursor, more) def get_thread_analytics_multi(exploration_ids): """Fetches all FeedbackAnalytics, for all the given exploration ids. A FeedbackAnalytics contains the exploration id the analytics belongs to, how many open threads exist for the exploration, how many total threads exist for the exploration. Args: exploration_ids: list(str). A list of exploration ids. Returns: list(FeedbackAnalytics). Analytics in the the same order as the input list. If an exploration id is invalid, the number of threads in the corresponding FeedbackAnalytics object will be zero. """ feedback_thread_analytics_models = ( feedback_models.FeedbackAnalyticsModel.get_multi(exploration_ids)) return [ feedback_domain.FeedbackAnalytics( feconf.ENTITY_TYPE_EXPLORATION, exp_id, model.num_open_threads if model is not None else 0, model.num_total_threads if model is not None else 0) for exp_id, model in zip( exploration_ids, feedback_thread_analytics_models) ] def get_thread_analytics(exploration_id): """Fetches the FeedbackAnalytics for the given exploration. Args: exploration_id: str. The id of the exploration. Returns: FeedbackAnalytics. The feedback analytics of the given exploration. """ return get_thread_analytics_multi([exploration_id])[0] def get_total_open_threads(feedback_analytics_list): """Gets the count of all open threads from the given list of FeedbackAnalytics domain objects. Args: feedback_analytics_list: list(FeedbackAnalytics). A list of FeedbackAnalytics objects to get the count of all open threads. Returns: int. The count of all open threads for the given the given list of FeedbackAnalytics domain objects. """ return sum(a.num_open_threads for a in feedback_analytics_list) def get_multiple_threads(thread_ids): """Gets multiple feedback threads. Args: thread_ids: list(str). The list of thread ids. Returns: list(FeedbackThread). The list of feedback threads. """ return [ _get_thread_from_model(model) for model in feedback_models.GeneralFeedbackThreadModel.get_multi( thread_ids) ] def _get_thread_from_model(thread_model): """Converts the given FeedbackThreadModel to a FeedbackThread object. Args: thread_model: FeedbackThreadModel. The FeedbackThread model object to be converted to FeedbackThread object. Returns: FeedbackThread. The corresponding FeedbackThread domain object. """ message_count = ( thread_model.message_count or feedback_models.GeneralFeedbackMessageModel.get_message_count( thread_model.id)) return feedback_domain.FeedbackThread( thread_model.id, thread_model.entity_type, thread_model.entity_id, None, thread_model.original_author_id, thread_model.status, thread_model.subject, thread_model.summary, thread_model.has_suggestion, message_count, thread_model.created_on, thread_model.last_updated, thread_model.last_nonempty_message_text, thread_model.last_nonempty_message_author_id) def get_exp_thread_summaries(user_id, thread_ids): """Returns a list of summaries corresponding to the exploration threads from the given thread ids. Non-exploration threads are not included in the list. It also returns the number of threads that are currently not read by the user. Args: user_id: str. The id of the user. thread_ids: list(str). The ids of the threads for which we have to fetch the summaries. Returns: tuple(thread_summaries, number_of_unread_threads). Where: thread_summaries: list(FeedbackThreadSummary). number_of_unread_threads: int. The number of threads not read by the user. """ # We need to fetch the thread models first to filter out the threads which # don't refer to an exploration. exp_thread_models = [ model for model in feedback_models.GeneralFeedbackThreadModel.get_multi( thread_ids) if model and model.entity_type == feconf.ENTITY_TYPE_EXPLORATION ] exp_thread_user_model_ids = [ feedback_models.GeneralFeedbackThreadUserModel.generate_full_id( user_id, model.id) for model in exp_thread_models ] exp_model_ids = [model.entity_id for model in exp_thread_models] exp_thread_user_models, exp_models = ( datastore_services.fetch_multiple_entities_by_ids_and_models([ ('GeneralFeedbackThreadUserModel', exp_thread_user_model_ids), ('ExplorationModel', exp_model_ids), ])) threads = [_get_thread_from_model(m) for m in exp_thread_models] flattened_last_two_message_models_of_threads = ( feedback_models.GeneralFeedbackMessageModel.get_multi( itertools.chain.from_iterable( t.get_last_two_message_ids() for t in threads))) last_two_message_models_of_threads = [ flattened_last_two_message_models_of_threads[i:i + 2] for i in range(0, len(flattened_last_two_message_models_of_threads), 2) ] thread_summaries = [] number_of_unread_threads = 0 for thread, last_two_message_models, thread_user_model, exp_model in ( zip( threads, last_two_message_models_of_threads, exp_thread_user_models, exp_models)): message_ids_read_by_user = ( () if thread_user_model is None else thread_user_model.message_ids_read_by_user) last_message_model, second_last_message_model = last_two_message_models # We don't need to check if the last message is None because all threads # have at least one message. last_message_is_read = ( last_message_model.message_id in message_ids_read_by_user) author_last_message = ( last_message_model.author_id and user_services.get_username(last_message_model.author_id)) # The second-to-last message, however, might be None. second_last_message_is_read = ( second_last_message_model is not None and second_last_message_model.message_id in message_ids_read_by_user) author_second_last_message = ( second_last_message_model and second_last_message_model.author_id and user_services.get_username(second_last_message_model.author_id)) if not last_message_is_read: number_of_unread_threads += 1 thread_summaries.append( feedback_domain.FeedbackThreadSummary( thread.status, thread.original_author_id, thread.last_updated, last_message_model.text, thread.message_count, last_message_is_read, second_last_message_is_read, author_last_message, author_second_last_message, exp_model.title, exp_model.id, thread.id)) return thread_summaries, number_of_unread_threads def get_threads(entity_type, entity_id): """Fetches all the threads for the given entity id. Args: entity_type: str. The type of entity the feedback thread is linked to. entity_id: str. The id of the entity. Returns: list(FeedbackThread). The corresponding Suggestion domain object. """ thread_models = feedback_models.GeneralFeedbackThreadModel.get_threads( entity_type, entity_id) return [_get_thread_from_model(m) for m in thread_models] def get_thread(thread_id): """Fetches the thread by thread id. Args: thread_id: str. The id of the thread. Returns: FeedbackThread. The resulting FeedbackThread domain object. """ return _get_thread_from_model( feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id)) def get_closed_threads(entity_type, entity_id, has_suggestion): """Fetches all closed threads of the given entity id. Args: entity_type: str. The type of entity the feedback thread is linked to. entity_id: str. The id of the entity. has_suggestion: bool. If it's True, return a list of all closed threads that have a suggestion, otherwise return a list of all closed threads that do not have a suggestion. Returns: list(FeedbackThread). The resulting FeedbackThread domain objects. """ return [ thread for thread in get_threads(entity_type, entity_id) if ( thread.has_suggestion == has_suggestion and thread.status != feedback_models.STATUS_CHOICES_OPEN) ] def get_all_threads(entity_type, entity_id, has_suggestion): """Fetches all threads (regardless of their status) that correspond to the given entity id. Args: entity_type: str. The type of entity the feedback thread is linked to. entity_id: str. The id of the entity. has_suggestion: bool. If it's True, return a list of all threads that have a suggestion, otherwise return a list of all threads that do not have a suggestion. Returns: list(FeedbackThread). The resulting FeedbackThread domain objects. """ return [ thread for thread in get_threads(entity_type, entity_id) if thread.has_suggestion == has_suggestion ] def enqueue_feedback_message_batch_email_task(user_id): """Adds a 'send feedback email' (batch) task into the task queue. Args: user_id: str. The user to be notified. """ taskqueue_services.enqueue_task( feconf.TASK_URL_FEEDBACK_MESSAGE_EMAILS, {'user_id': user_id}, feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS) def enqueue_feedback_message_instant_email_task_transactional( user_id, reference): """Adds a 'send feedback email' (instant) task into the task queue. Args: user_id: str. The user to be notified. reference: FeedbackMessageReference. A reference that contains the data needed to identify the feedback message. """ payload = { 'user_id': user_id, 'reference_dict': reference.to_dict() } taskqueue_services.enqueue_task( feconf.TASK_URL_INSTANT_FEEDBACK_EMAILS, payload, 0) @transaction_services.run_in_transaction_wrapper def _enqueue_feedback_thread_status_change_email_task_transactional( user_id, reference, old_status, new_status): """Adds a task for sending email when a feedback thread status is changed. Args: user_id: str. The user to be notified. reference: FeedbackMessageReference. The feedback message reference object to be converted to dict. old_status: str. One of STATUS_CHOICES. new_status: str. One of STATUS_CHOICES. """ payload = { 'user_id': user_id, 'reference_dict': reference.to_dict(), 'old_status': old_status, 'new_status': new_status } taskqueue_services.enqueue_task( feconf.TASK_URL_FEEDBACK_STATUS_EMAILS, payload, 0) def get_feedback_message_references(user_id): """Fetches all FeedbackMessageReference objects written by the given user。 Args: user_id: str. If the user id is invalid or there is no message for this user, return an empty list. Returns: list(FeedbackMessageReference). The resulting FeedbackMessageReference domain objects. """ model = feedback_models.UnsentFeedbackEmailModel.get(user_id, strict=False) feedback_message_references = ( () if model is None else model.feedback_message_references) return [ feedback_domain.FeedbackMessageReference( reference['entity_type'], reference['entity_id'], reference['thread_id'], reference['message_id']) for reference in feedback_message_references ] @transaction_services.run_in_transaction_wrapper def _add_feedback_message_reference_transactional(user_id, reference): """Adds a new message to the feedback message buffer that is used to generate the next notification email to the given user. Args: user_id: str. If there's an UnsentFeedbackEmailModel for the given user, update the instance with given reference, otherwise create a new instance. reference: FeedbackMessageReference. The new message reference to add to the buffer. """ model = feedback_models.UnsentFeedbackEmailModel.get(user_id, strict=False) if model is not None: model.feedback_message_references.append(reference.to_dict()) model.update_timestamps() model.put() else: model = feedback_models.UnsentFeedbackEmailModel( id=user_id, feedback_message_references=[reference.to_dict()]) model.update_timestamps() model.put() enqueue_feedback_message_batch_email_task(user_id) @transaction_services.run_in_transaction_wrapper def update_feedback_email_retries_transactional(user_id): """If sufficient time has passed, increment the number of retries for the corresponding user's UnsentEmailFeedbackModel. Args: user_id: str. The id of the given user. """ model = feedback_models.UnsentFeedbackEmailModel.get(user_id) time_since_buffered = ( (datetime.datetime.utcnow() - model.created_on).seconds) if (time_since_buffered > feconf.DEFAULT_FEEDBACK_MESSAGE_EMAIL_COUNTDOWN_SECS): model.retries += 1 model.update_timestamps() model.put() @transaction_services.run_in_transaction_wrapper def pop_feedback_message_references_transactional( user_id, num_references_to_pop): """Pops feedback message references of the given user which have been processed already. Args: user_id: str. The id of the current user. num_references_to_pop: int. Number of feedback message references that have been processed already. """ model = feedback_models.UnsentFeedbackEmailModel.get(user_id) remaining_references = ( model.feedback_message_references[num_references_to_pop:]) model.delete() if remaining_references: # We recreate the model in order to re-initialize its 'created_on' # property and reset the retries count to 0. If we don't do this, then # the retries count will be incorrect. model = feedback_models.UnsentFeedbackEmailModel( id=user_id, feedback_message_references=remaining_references) model.update_timestamps() model.put() enqueue_feedback_message_batch_email_task(user_id) @transaction_services.run_in_transaction_wrapper def clear_feedback_message_references_transactional( user_id, exploration_id, thread_id): """Removes feedback message references associated with a feedback thread. Args: user_id: str. The user who created this reference. exploration_id: str. The id of the exploration. thread_id: str. The id of the thread. """ model = feedback_models.UnsentFeedbackEmailModel.get(user_id, strict=False) if model is None: # Model exists only if user has received feedback on exploration. return updated_references = [ reference for reference in model.feedback_message_references if (reference['entity_id'] != exploration_id or reference['thread_id'] != thread_id) ] if not updated_references: # Note that any tasks remaining in the email queue will still be # processed, but if the model for the given user does not exist, # no email will be sent. # Note that, since the task in the queue is not deleted, the following # scenario may occur: If creator attends to arrived feedback before # email is sent then model will be deleted but task will still execute # after its countdown. Arrival of new feedback (before task is executed) # will create new model and task. But actual email will be sent by first # task. It means that email may be sent just after a few minutes of # feedback's arrival. # In PR #2261, we decided to leave things as they are for now, since it # looks like the obvious solution of keying tasks by user id doesn't # work (see #2258). However, this may be worth addressing in the future. model.delete() else: model.feedback_message_references = updated_references model.update_timestamps() model.put() def _get_all_recipient_ids(exploration_id, thread_id, author_id): """Fetches all authors of the exploration excluding the given author and all the other recipients. Args: exploration_id: str. The id of the exploration. thread_id: str. The id of the thread. author_id: str. One author of the given exploration_id. Returns: tuple(batch_recipients, other_recipients). Where: batch_recipients: list(str). The user_ids of the authors excluding the given author. other_recipients: list(str). The user_ids of the other participants in this thread, excluding owners of the exploration and the given author. """ exploration_rights = rights_manager.get_exploration_rights(exploration_id) owner_ids = set(exploration_rights.owner_ids) participant_ids = { message.author_id for message in get_messages(thread_id) if user_services.is_user_registered(message.author_id) } batch_recipient_ids = owner_ids - {author_id} other_recipient_ids = participant_ids - batch_recipient_ids - {author_id} return (list(batch_recipient_ids), list(other_recipient_ids)) def _send_batch_emails( recipient_list, feedback_message_reference, exploration_id, has_suggestion): """Adds the given FeedbackMessageReference to each of the recipient's email buffers. The collected messages will be sent out as a batch after a short delay. Args: recipient_list: list(str). A list of user_ids of all recipients of the email. feedback_message_reference: FeedbackMessageReference. The reference to add to each email buffer. exploration_id: str. The id of exploration that received new message. has_suggestion: bool. Whether this thread has a related learner suggestion. """ can_recipients_receive_email = email_manager.can_users_receive_thread_email( recipient_list, exploration_id, has_suggestion) for recipient_id, can_receive_email in zip( recipient_list, can_recipients_receive_email): if can_receive_email: _add_feedback_message_reference_transactional( recipient_id, feedback_message_reference) def _send_instant_emails( recipient_list, feedback_message_reference, exploration_id, has_suggestion): """Adds the given FeedbackMessageReference to each of the recipient's email buffers. The collected messages will be sent out immediately. Args: recipient_list: list(str). A list of user_ids of all recipients of the email. feedback_message_reference: FeedbackMessageReference. The reference to add to each email buffer. exploration_id: str. The id of exploration that received new message. has_suggestion: bool. Whether this thread has a related learner suggestion. """ can_recipients_receive_email = email_manager.can_users_receive_thread_email( recipient_list, exploration_id, has_suggestion) for recipient_id, can_receive_email in zip( recipient_list, can_recipients_receive_email): if can_receive_email: enqueue_feedback_message_instant_email_task_transactional( recipient_id, feedback_message_reference) def _send_feedback_thread_status_change_emails( recipient_list, feedback_message_reference, old_status, new_status, exploration_id, has_suggestion): """Notifies the given recipients about the status change. Args: recipient_list: list(str). A list of recipient ids. feedback_message_reference: FeedbackMessageReference. The reference to add to each email buffer. old_status: str. One of STATUS_CHOICES. new_status: str. One of STATUS_CHOICES. exploration_id: str. The id of the exploration that received a new message. has_suggestion: bool. Whether this thread has a related learner suggestion. """ can_recipients_receive_email = email_manager.can_users_receive_thread_email( recipient_list, exploration_id, has_suggestion) for recipient_id, can_receive_email in zip( recipient_list, can_recipients_receive_email): if can_receive_email: _enqueue_feedback_thread_status_change_email_task_transactional( recipient_id, feedback_message_reference, old_status, new_status) def _add_message_to_email_buffer( author_id, thread_id, message_id, message_length, old_status, new_status): """Sends the given message to the recipients of the given thread. If status has changed, notify the recipients as well. Args: author_id: str. The id of the author of message. thread_id: str. The id of the thread that received new message. message_id: int. The id of the new message. message_length: int. Length of the feedback message to be sent. old_status: str. One of STATUS_CHOICES. Value of old thread status. new_status: str. One of STATUS_CHOICES. Value of new thread status. """ thread = feedback_models.GeneralFeedbackThreadModel.get_by_id(thread_id) exploration_id = thread.entity_id has_suggestion = thread.has_suggestion feedback_message_reference = feedback_domain.FeedbackMessageReference( thread.entity_type, thread.entity_id, thread_id, message_id) batch_recipient_ids, other_recipient_ids = ( _get_all_recipient_ids(exploration_id, thread_id, author_id)) if old_status != new_status: # Send email for feedback thread status change. _send_feedback_thread_status_change_emails( other_recipient_ids, feedback_message_reference, old_status, new_status, exploration_id, has_suggestion) if message_length: # Send feedback message email only if message text is non empty (the # message text can be empty in the case when only status is changed). _send_batch_emails( batch_recipient_ids, feedback_message_reference, exploration_id, has_suggestion) _send_instant_emails( other_recipient_ids, feedback_message_reference, exploration_id, has_suggestion) def delete_exploration_feedback_analytics(exp_ids): """Deletes the FeedbackAnalyticsModel models corresponding to the given exp_ids. Args: exp_ids: list(str). A list of exploration IDs whose feedback analytics models are to be deleted. """ feedback_analytics_models = ( feedback_models.FeedbackAnalyticsModel.get_multi( exp_ids)) feedback_analytics_models_to_be_deleted = [ model for model in feedback_analytics_models if model is not None] feedback_models.FeedbackAnalyticsModel.delete_multi( feedback_analytics_models_to_be_deleted) def handle_new_thread_created(exp_id): """Reacts to new threads added to an exploration. Args: exp_id: str. The exploration ID associated with the thread. """ _increment_total_threads_count_transactional(exp_id) _increment_open_threads_count_transactional(exp_id) def handle_thread_status_changed(exp_id, old_status, new_status): """Reacts to changes in an exploration thread's status. Args: exp_id: str. The exploration ID associated with the thread. old_status: str. The old status of the thread. new_status: str. The updated status of the thread. """ # Status changed from closed to open. if (old_status != feedback_models.STATUS_CHOICES_OPEN and new_status == feedback_models.STATUS_CHOICES_OPEN): _increment_open_threads_count_transactional(exp_id) # Status changed from open to closed. elif (old_status == feedback_models.STATUS_CHOICES_OPEN and new_status != feedback_models.STATUS_CHOICES_OPEN): _decrement_open_threads_count_transactional(exp_id) @transaction_services.run_in_transaction_wrapper def _increment_open_threads_count_transactional(exp_id): """Increments count of open threads by one.""" model = ( feedback_models.FeedbackAnalyticsModel.get(exp_id, strict=False) or feedback_models.FeedbackAnalyticsModel(id=exp_id, num_open_threads=0)) model.num_open_threads = (model.num_open_threads or 0) + 1 model.update_timestamps() model.put() @transaction_services.run_in_transaction_wrapper def _increment_total_threads_count_transactional(exp_id): """Increments count of total threads by one.""" model = ( feedback_models.FeedbackAnalyticsModel.get(exp_id, strict=False) or feedback_models.FeedbackAnalyticsModel(id=exp_id, num_total_threads=0)) model.num_total_threads = (model.num_total_threads or 0) + 1 model.update_timestamps() model.put() @transaction_services.run_in_transaction_wrapper def _decrement_open_threads_count_transactional(exp_id): """Decrements count of open threads by one.""" model = ( feedback_models.FeedbackAnalyticsModel.get(exp_id, strict=False) or feedback_models.FeedbackAnalyticsModel(id=exp_id, num_open_threads=0)) model.num_open_threads = (model.num_open_threads or 1) - 1 model.update_timestamps() model.put()
{ "content_hash": "16a8621c0113a129ebf85a20ae5a1d50", "timestamp": "", "source": "github", "line_count": 1232, "max_line_length": 80, "avg_line_length": 39.92613636363637, "alnum_prop": 0.6688283965927342, "repo_name": "brianrodri/oppia", "id": "5d40e7ea1d186ae60589fd1c923fef99abe7c1c4", "size": "49814", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "core/domain/feedback_services.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "487903" }, { "name": "HTML", "bytes": "1748056" }, { "name": "JavaScript", "bytes": "1176446" }, { "name": "PEG.js", "bytes": "71377" }, { "name": "Python", "bytes": "14169091" }, { "name": "Shell", "bytes": "2239" }, { "name": "TypeScript", "bytes": "13316709" } ], "symlink_target": "" }
import functools import Gaffer import GafferUI ## A simple PlugValueWidget which just displays the node connected # to a Plug. class ConnectionPlugValueWidget( GafferUI.PlugValueWidget ) : def __init__( self, plug, **kw ) : self.__frame = GafferUI.Frame( borderWidth = 2 ) GafferUI.PlugValueWidget.__init__( self, self.__frame, plug, **kw ) self.__inputLabel = GafferUI.NameLabel( None, horizontalAlignment = GafferUI.HorizontalAlignment.Center, formatter=self.__labelFormatter, numComponents=2, ) row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal ) row.append( self.__inputLabel, horizontalAlignment = GafferUI.HorizontalAlignment.Center, expand = True ) self.__frame.setChild( row ) self.__frame.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ), scoped = False ) self.__inputLabel.buttonReleaseSignal().connect( Gaffer.WeakMethod( self.__buttonRelease ), scoped = False ) self.__frame.enterSignal().connect( functools.partial( GafferUI.Frame.setHighlighted, highlighted=True ), scoped = False ) self.__frame.leaveSignal().connect( functools.partial( GafferUI.Frame.setHighlighted, highlighted=False ), scoped = False ) self._addPopupMenu( self.__frame ) self._updateFromPlug() def setHighlighted( self, highlighted ) : GafferUI.PlugValueWidget.setHighlighted( self, highlighted ) self.__frame.setHighlighted( highlighted ) def getToolTip( self ) : result = GafferUI.PlugValueWidget.getToolTip( self ) srcNode = None if self.getPlug() is not None : input = self.getPlug().getInput() if input is not None : srcNode = input.node() if srcNode is not None : if result : result += "\n" result += "## Actions\n\n" result += " - Left drag to drag source plug\n" result += " - Left click to edit source node\n" return result def _updateFromPlug( self ) : input = self.getPlug().getInput() self.__inputLabel.setGraphComponent( input ) if input is not None : self.__inputLabel.setNumComponents( input.relativeName( input.node() ).count( "." ) + 2 ) def __buttonRelease( self, widget, event ) : if event.button == event.Buttons.Left : if self.getPlug().getInput() is not None : GafferUI.NodeEditor.acquire( self.getPlug().getInput().node() ) return True return False @staticmethod def __labelFormatter( graphComponents ) : if graphComponents : return "<b>" + ".".join( [ g.getName() for g in graphComponents ] ) + "</b>" else : return "<b><i>None</i></b>" GafferUI.PlugValueWidget.registerType( Gaffer.Plug, ConnectionPlugValueWidget )
{ "content_hash": "ff4c75b738983f9ded9a0d789e8364b6", "timestamp": "", "source": "github", "line_count": 84, "max_line_length": 125, "avg_line_length": 31.19047619047619, "alnum_prop": 0.7057251908396946, "repo_name": "johnhaddon/gaffer", "id": "a7eff348f2240306420fd9946d6132856ccf856a", "size": "4423", "binary": false, "copies": "4", "ref": "refs/heads/main", "path": "python/GafferUI/ConnectionPlugValueWidget.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "Batchfile", "bytes": "5790" }, { "name": "C", "bytes": "61993" }, { "name": "C++", "bytes": "9571062" }, { "name": "CMake", "bytes": "85201" }, { "name": "GLSL", "bytes": "6208" }, { "name": "Python", "bytes": "10271481" }, { "name": "Ruby", "bytes": "419" }, { "name": "Shell", "bytes": "14389" } ], "symlink_target": "" }
import proto # type: ignore from google.ads.googleads.v10.enums.types import ( response_content_type as gage_response_content_type, ) from google.ads.googleads.v10.resources.types import batch_job from google.ads.googleads.v10.services.types import google_ads_service from google.rpc import status_pb2 # type: ignore __protobuf__ = proto.module( package="google.ads.googleads.v10.services", marshal="google.ads.googleads.v10", manifest={ "MutateBatchJobRequest", "BatchJobOperation", "MutateBatchJobResponse", "MutateBatchJobResult", "RunBatchJobRequest", "AddBatchJobOperationsRequest", "AddBatchJobOperationsResponse", "ListBatchJobResultsRequest", "ListBatchJobResultsResponse", "BatchJobResult", }, ) class MutateBatchJobRequest(proto.Message): r"""Request message for [BatchJobService.MutateBatchJob][google.ads.googleads.v10.services.BatchJobService.MutateBatchJob]. Attributes: customer_id (str): Required. The ID of the customer for which to create a batch job. operation (google.ads.googleads.v10.services.types.BatchJobOperation): Required. The operation to perform on an individual batch job. """ customer_id = proto.Field( proto.STRING, number=1, ) operation = proto.Field( proto.MESSAGE, number=2, message="BatchJobOperation", ) class BatchJobOperation(proto.Message): r"""A single operation on a batch job. This message has `oneof`_ fields (mutually exclusive fields). For each oneof, at most one member field can be set at the same time. Setting any member of the oneof automatically clears all other members. .. _oneof: https://proto-plus-python.readthedocs.io/en/stable/fields.html#oneofs-mutually-exclusive-fields Attributes: create (google.ads.googleads.v10.resources.types.BatchJob): Create operation: No resource name is expected for the new batch job. This field is a member of `oneof`_ ``operation``. remove (str): Remove operation: The batch job must not have been run. A resource name for the removed batch job is expected, in this format: ``customers/{customer_id}/batchJobs/{batch_job_id}`` This field is a member of `oneof`_ ``operation``. """ create = proto.Field( proto.MESSAGE, number=1, oneof="operation", message=batch_job.BatchJob, ) remove = proto.Field( proto.STRING, number=4, oneof="operation", ) class MutateBatchJobResponse(proto.Message): r"""Response message for [BatchJobService.MutateBatchJob][google.ads.googleads.v10.services.BatchJobService.MutateBatchJob]. Attributes: result (google.ads.googleads.v10.services.types.MutateBatchJobResult): The result for the mutate. """ result = proto.Field( proto.MESSAGE, number=1, message="MutateBatchJobResult", ) class MutateBatchJobResult(proto.Message): r"""The result for the batch job mutate. Attributes: resource_name (str): The resource name of the batch job. """ resource_name = proto.Field( proto.STRING, number=1, ) class RunBatchJobRequest(proto.Message): r"""Request message for [BatchJobService.RunBatchJob][google.ads.googleads.v10.services.BatchJobService.RunBatchJob]. Attributes: resource_name (str): Required. The resource name of the BatchJob to run. """ resource_name = proto.Field( proto.STRING, number=1, ) class AddBatchJobOperationsRequest(proto.Message): r"""Request message for [BatchJobService.AddBatchJobOperations][google.ads.googleads.v10.services.BatchJobService.AddBatchJobOperations]. Attributes: resource_name (str): Required. The resource name of the batch job. sequence_token (str): A token used to enforce sequencing. The first AddBatchJobOperations request for a batch job should not set sequence_token. Subsequent requests must set sequence_token to the value of next_sequence_token received in the previous AddBatchJobOperations response. mutate_operations (Sequence[google.ads.googleads.v10.services.types.MutateOperation]): Required. The list of mutates being added. Operations can use negative integers as temp ids to signify dependencies between entities created in this batch job. For example, a customer with id = 1234 can create a campaign and an ad group in that same campaign by creating a campaign in the first operation with the resource name explicitly set to "customers/1234/campaigns/-1", and creating an ad group in the second operation with the campaign field also set to "customers/1234/campaigns/-1". """ resource_name = proto.Field( proto.STRING, number=1, ) sequence_token = proto.Field( proto.STRING, number=2, ) mutate_operations = proto.RepeatedField( proto.MESSAGE, number=3, message=google_ads_service.MutateOperation, ) class AddBatchJobOperationsResponse(proto.Message): r"""Response message for [BatchJobService.AddBatchJobOperations][google.ads.googleads.v10.services.BatchJobService.AddBatchJobOperations]. Attributes: total_operations (int): The total number of operations added so far for this batch job. next_sequence_token (str): The sequence token to be used when calling AddBatchJobOperations again if more operations need to be added. The next AddBatchJobOperations request must set the sequence_token field to the value of this field. """ total_operations = proto.Field( proto.INT64, number=1, ) next_sequence_token = proto.Field( proto.STRING, number=2, ) class ListBatchJobResultsRequest(proto.Message): r"""Request message for [BatchJobService.ListBatchJobResults][google.ads.googleads.v10.services.BatchJobService.ListBatchJobResults]. Attributes: resource_name (str): Required. The resource name of the batch job whose results are being listed. page_token (str): Token of the page to retrieve. If not specified, the first page of results will be returned. Use the value obtained from ``next_page_token`` in the previous response in order to request the next page of results. page_size (int): Number of elements to retrieve in a single page. When a page request is too large, the server may decide to further limit the number of returned resources. response_content_type (google.ads.googleads.v10.enums.types.ResponseContentTypeEnum.ResponseContentType): The response content type setting. Determines whether the mutable resource or just the resource name should be returned. """ resource_name = proto.Field( proto.STRING, number=1, ) page_token = proto.Field( proto.STRING, number=2, ) page_size = proto.Field( proto.INT32, number=3, ) response_content_type = proto.Field( proto.ENUM, number=4, enum=gage_response_content_type.ResponseContentTypeEnum.ResponseContentType, ) class ListBatchJobResultsResponse(proto.Message): r"""Response message for [BatchJobService.ListBatchJobResults][google.ads.googleads.v10.services.BatchJobService.ListBatchJobResults]. Attributes: results (Sequence[google.ads.googleads.v10.services.types.BatchJobResult]): The list of rows that matched the query. next_page_token (str): Pagination token used to retrieve the next page of results. Pass the content of this string as the ``page_token`` attribute of the next request. ``next_page_token`` is not returned for the last page. """ @property def raw_page(self): return self results = proto.RepeatedField( proto.MESSAGE, number=1, message="BatchJobResult", ) next_page_token = proto.Field( proto.STRING, number=2, ) class BatchJobResult(proto.Message): r"""An individual batch job result. Attributes: operation_index (int): Index of the mutate operation. mutate_operation_response (google.ads.googleads.v10.services.types.MutateOperationResponse): Response for the mutate. May be empty if errors occurred. status (google.rpc.status_pb2.Status): Details of the errors when processing the operation. """ operation_index = proto.Field( proto.INT64, number=1, ) mutate_operation_response = proto.Field( proto.MESSAGE, number=2, message=google_ads_service.MutateOperationResponse, ) status = proto.Field( proto.MESSAGE, number=3, message=status_pb2.Status, ) __all__ = tuple(sorted(__protobuf__.manifest))
{ "content_hash": "02d62a3839f7bacd3017c973bc153478", "timestamp": "", "source": "github", "line_count": 308, "max_line_length": 117, "avg_line_length": 31.123376623376622, "alnum_prop": 0.6434383475902358, "repo_name": "googleads/google-ads-python", "id": "dec4f03c53eda15a4edb1e09b21411d3fb30b675", "size": "10186", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "google/ads/googleads/v10/services/types/batch_job_service.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "23399881" } ], "symlink_target": "" }
import pandas as pd import os from sklearn import preprocessing from ..main import config import numpy as np import sys def get_normalized_features(filename): filename_train = filename data_train = pd.read_csv(filename_train) filename_val = filename.replace("train","val") filename_test = filename.replace("train", "test") data_val = pd.read_csv(filename_val) data_test = pd.read_csv(filename_test) columns = data_train.columns[1:] column = columns[:-2] #print column #print filename_train X_train = data_train.as_matrix(columns=column) X_val = data_val.as_matrix(columns = column) X_test = data_test.as_matrix(columns = column) scalar = preprocessing.StandardScaler().fit(X_train) transformed_train = scalar.transform(X_train) transformed_val = scalar.transform(X_val) #print X_test #print np.isfinite(X_test).all() # for i in X_test: # for j in i: # if j == True: # print i,j X_test[np.isnan(X_test)] = 0 transformed_test = scalar.transform(X_test) data_normalized_train = pd.DataFrame(transformed_train,columns=column) data_normalized_val = pd.DataFrame(transformed_val, columns=column) data_normalized_test = pd.DataFrame(transformed_test, columns=column) data_normalized_train[['video','label','score']] = data_train[['video','label','score']] data_normalized_val[['video','label','score']] = data_val[['video','label','score']] data_normalized_test[['video','label','score']] = data_test[['video','label','score']] write_path_file_train = filename_train.replace("regular","normalize") write_path_file_val = filename_val.replace("regular","normalize") write_path_file_test = filename_test.replace("regular", "normalize") #print write_path_file_train #print filename_val #print write_path_file_val #print filename_test #print write_path_file_test data_normalized_train.to_csv(write_path_file_train,index=None) data_normalized_val.to_csv(write_path_file_val,index=None) data_normalized_test.to_csv(write_path_file_test,index=None) def normalize_features(select = "select"): if select == "select": path_classify = config.SEL_FEAT_TRAIN_REGULAR_CLASSIFY path_estimate = config.SEL_FEAT_TRAIN_REGULAR_ESTIMATE else: path_classify = config.ALL_FEAT_TRAIN_REGULAR_CLASSIFY path_estimate = config.ALL_FEAT_TRAIN_REGULAR_ESTIMATE list_train_classify = [os.path.join(path_classify, fn) for fn in next(os.walk(config.SEL_FEAT_TRAIN_REGULAR_CLASSIFY))[2]] print list_train_classify for i in range(len(list_train_classify)): get_normalized_features(list_train_classify[i]) list_train_estimate = [os.path.join(path_estimate, fn) for fn in next(os.walk(config.SEL_FEAT_TRAIN_REGULAR_ESTIMATE))[2]] print list_train_estimate for i in range(len(list_train_estimate)): get_normalized_features(list_train_estimate[i]) #normalize_features() if __name__ == '__main__': select = "select" if len(sys.argv) == 2: select = sys.argv[1] print select normalize_features(select)
{ "content_hash": "ac83d4e87a9bb7fcc1e34fb7067355d9", "timestamp": "", "source": "github", "line_count": 86, "max_line_length": 126, "avg_line_length": 36.76744186046512, "alnum_prop": 0.6786843769765971, "repo_name": "ab93/Depression-Identification", "id": "7f51728f4df1a2c6574fae08c38bbb6910b65707", "size": "3162", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "src/helpers/normalized_features.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "213001" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('main', '0001_initial'), ] operations = [ migrations.AddField( model_name='notification', name='queued', field=models.BooleanField(db_index=True, default=False), ), ]
{ "content_hash": "bfaa69fd4e7662855e920b45c9b0f419", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 68, "avg_line_length": 21.444444444444443, "alnum_prop": 0.5958549222797928, "repo_name": "CSchool/SchoolSite", "id": "cc327d73c0f2912f42cfe0dadbde8a56a2632974", "size": "457", "binary": false, "copies": "1", "ref": "refs/heads/develop", "path": "CSchoolSite/main/migrations/0002_notification_queued.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "98937" }, { "name": "CSS", "bytes": "1820" }, { "name": "HTML", "bytes": "60303" }, { "name": "JavaScript", "bytes": "14501" }, { "name": "Makefile", "bytes": "3042" }, { "name": "Python", "bytes": "208264" }, { "name": "TeX", "bytes": "19748" } ], "symlink_target": "" }
import rospy from std_msgs.msg import String def talker(): pub = rospy.Publisher('chatter', String, queue_size=10) rospy.init_node('talker', anonymous=True) rate = rospy.Rate(10) # 10hz while not rospy.is_shutdown(): hello_str = "hello world %s" % rospy.get_time() rospy.loginfo(hello_str) pub.publish(hello_str) rate.sleep() if __name__ == '__main__': try: talker() except rospy.ROSInterruptException: pass
{ "content_hash": "4848ee88a555a3ea15c7186304f5d98a", "timestamp": "", "source": "github", "line_count": 18, "max_line_length": 59, "avg_line_length": 26.72222222222222, "alnum_prop": 0.6112266112266113, "repo_name": "nvl1109/car_control", "id": "1957fb4fdcfceb54140724e60d7d168aae1e1e95", "size": "534", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "scripts/talker.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "CMake", "bytes": "355" }, { "name": "Python", "bytes": "9123" } ], "symlink_target": "" }
from taiga.base.api.permissions import (AllowAny as TruePermissionComponent, DenyAll as FalsePermissionComponent) def test_permission_component_composition(): assert (TruePermissionComponent() | TruePermissionComponent()).check_permissions(None, None, None) assert (TruePermissionComponent() | FalsePermissionComponent()).check_permissions(None, None, None) assert (FalsePermissionComponent() | TruePermissionComponent()).check_permissions(None, None, None) assert not (FalsePermissionComponent() | FalsePermissionComponent()).check_permissions(None, None, None) assert (TruePermissionComponent() & TruePermissionComponent()).check_permissions(None, None, None) assert not (TruePermissionComponent() & FalsePermissionComponent()).check_permissions(None, None, None) assert not (FalsePermissionComponent() & TruePermissionComponent()).check_permissions(None, None, None) assert not (FalsePermissionComponent() & FalsePermissionComponent()).check_permissions(None, None, None) assert (~FalsePermissionComponent()).check_permissions(None, None, None) assert not (~TruePermissionComponent()).check_permissions(None, None, None)
{ "content_hash": "8fe582c4c2974ce0f6b5e3721be371dc", "timestamp": "", "source": "github", "line_count": 17, "max_line_length": 108, "avg_line_length": 71.05882352941177, "alnum_prop": 0.7566225165562914, "repo_name": "curiosityio/taiga-docker", "id": "b858252f37c2885ffe05f512a47fc338cbfea49d", "size": "2120", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "taiga-back/taiga-back/tests/unit/test_base_api_permissions.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "186988" }, { "name": "JavaScript", "bytes": "2007" }, { "name": "Nginx", "bytes": "4140" }, { "name": "Python", "bytes": "2793020" }, { "name": "Shell", "bytes": "1392" } ], "symlink_target": "" }
"""add task_reschedule table Revision ID: 0a2a5b66e19d Revises: 9635ae0956e7 Create Date: 2018-06-17 22:50:00.053620 """ import sqlalchemy as sa from alembic import op from sqlalchemy.dialects import mysql # revision identifiers, used by Alembic. from airflow.models.base import COLLATION_ARGS revision = '0a2a5b66e19d' down_revision = '9635ae0956e7' branch_labels = None depends_on = None TABLE_NAME = 'task_reschedule' INDEX_NAME = 'idx_' + TABLE_NAME + '_dag_task_date' # For Microsoft SQL Server, TIMESTAMP is a row-id type, # having nothing to do with date-time. DateTime() will # be sufficient. def mssql_timestamp(): # noqa: D103 return sa.DateTime() def mysql_timestamp(): # noqa: D103 return mysql.TIMESTAMP(fsp=6) def sa_timestamp(): # noqa: D103 return sa.TIMESTAMP(timezone=True) def upgrade(): # noqa: D103 # See 0e2a74e0fc9f_add_time_zone_awareness conn = op.get_bind() if conn.dialect.name == 'mysql': timestamp = mysql_timestamp elif conn.dialect.name == 'mssql': timestamp = mssql_timestamp else: timestamp = sa_timestamp op.create_table( TABLE_NAME, sa.Column('id', sa.Integer(), nullable=False), sa.Column('task_id', sa.String(length=250, **COLLATION_ARGS), nullable=False), sa.Column('dag_id', sa.String(length=250, **COLLATION_ARGS), nullable=False), # use explicit server_default=None otherwise mysql implies defaults for first timestamp column sa.Column('execution_date', timestamp(), nullable=False, server_default=None), sa.Column('try_number', sa.Integer(), nullable=False), sa.Column('start_date', timestamp(), nullable=False), sa.Column('end_date', timestamp(), nullable=False), sa.Column('duration', sa.Integer(), nullable=False), sa.Column('reschedule_date', timestamp(), nullable=False), sa.PrimaryKeyConstraint('id'), sa.ForeignKeyConstraint( ['task_id', 'dag_id', 'execution_date'], ['task_instance.task_id', 'task_instance.dag_id', 'task_instance.execution_date'], name='task_reschedule_dag_task_date_fkey', ), ) op.create_index(INDEX_NAME, TABLE_NAME, ['dag_id', 'task_id', 'execution_date'], unique=False) def downgrade(): # noqa: D103 op.drop_index(INDEX_NAME, table_name=TABLE_NAME) op.drop_table(TABLE_NAME)
{ "content_hash": "01c9a3d5031351f67bac555504003992", "timestamp": "", "source": "github", "line_count": 73, "max_line_length": 102, "avg_line_length": 32.78082191780822, "alnum_prop": 0.6661094860008357, "repo_name": "airbnb/airflow", "id": "4c572f44ec5a28bed3ff52acc27b999f2d06273e", "size": "3179", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "airflow/migrations/versions/0a2a5b66e19d_add_task_reschedule_table.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "CSS", "bytes": "36374" }, { "name": "HTML", "bytes": "99535" }, { "name": "JavaScript", "bytes": "891618" }, { "name": "Mako", "bytes": "494" }, { "name": "Python", "bytes": "796220" }, { "name": "Shell", "bytes": "9040" } ], "symlink_target": "" }
"""Graph actions tests.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import shutil import tempfile import tensorflow as tf from tensorflow.contrib import testing from tensorflow.contrib.learn.python import learn from tensorflow.contrib.learn.python.learn.monitors import BaseMonitor from tensorflow.python.framework import meta_graph from tensorflow.python.framework import ops from tensorflow.python.framework import test_ops from tensorflow.python.ops import resources from tensorflow.python.ops import variables class _Feeder(object): """Simple generator for `feed_fn`, returning 10 * step.""" def __init__(self, tensor, max_step): self._step = 0 self._tensor = tensor self._max_step = max_step @property def step(self): return self._step def feed_fn(self): if self._step >= self._max_step: raise StopIteration value = self._step * 10.0 self._step += 1 return {self._tensor: value} class _BaseMonitorWrapper(BaseMonitor): """Base monitor wrapper to facilitate testing. This monitor can act as either chief-exclusive or non-exclusive. """ def __init__(self, run_on_all_workers): super(_BaseMonitorWrapper, self).__init__() self._run_on_all_workers = run_on_all_workers self._is_active = False self._has_step = False @property def run_on_all_workers(self): return self._run_on_all_workers @property def is_active(self): return self._is_active @property def has_step(self): return self._has_step def begin(self, max_steps=None): self._is_active = True return super(_BaseMonitorWrapper, self).begin(max_steps) def step_begin(self, step): self._has_step = True return super(_BaseMonitorWrapper, self).step_begin(step) class GraphActionsTest(tf.test.TestCase): """Graph actions tests.""" def setUp(self): learn.graph_actions.clear_summary_writers() self._output_dir = tempfile.mkdtemp() testing.FakeSummaryWriter.install() def tearDown(self): testing.FakeSummaryWriter.uninstall() if self._output_dir: shutil.rmtree(self._output_dir) learn.graph_actions.clear_summary_writers() def _assert_summaries( self, output_dir, writer, expected_summaries=None, expected_graphs=None, expected_meta_graphs=None, expected_session_logs=None): self.assertTrue(isinstance(writer, testing.FakeSummaryWriter)) writer.assert_summaries( self, expected_logdir=output_dir, expected_graph=tf.get_default_graph(), expected_summaries=expected_summaries, expected_added_graphs=expected_graphs, expected_added_meta_graphs=expected_meta_graphs, expected_session_logs=expected_session_logs) # TODO(ptucker): Test number and contents of checkpoint files. def _assert_ckpt(self, output_dir, expected=True): ckpt_state = tf.train.get_checkpoint_state(output_dir) if expected: pattern = '%s/model.ckpt-.*' % output_dir primary_ckpt_path = ckpt_state.model_checkpoint_path self.assertRegexpMatches(primary_ckpt_path, pattern) all_ckpt_paths = ckpt_state.all_model_checkpoint_paths self.assertTrue(primary_ckpt_path in all_ckpt_paths) for ckpt_path in all_ckpt_paths: self.assertRegexpMatches(ckpt_path, pattern) else: self.assertTrue(ckpt_state is None) # TODO(ptucker): Test lock, multi-threaded access? def test_summary_writer(self): writer = learn.graph_actions.get_summary_writer('log/dir/0') self._assert_summaries('log/dir/0', writer) self.assertTrue( learn.graph_actions.get_summary_writer('log/dir/0') is learn.graph_actions.get_summary_writer('log/dir/0')) self.assertTrue( learn.graph_actions.get_summary_writer('log/dir/0') is not learn.graph_actions.get_summary_writer('log/dir/1')) # TODO(ptucker): Test restore_checkpoint_path for eval; this should obsolete # test_evaluate_with_saver(). # TODO(ptucker): Test start_queue_runners for both eval & train. # TODO(ptucker): Test coord.request_stop & coord.join for eval. def _build_inference_graph(self): """Build simple inference graph. This includes a regular variable, local variable, and fake table. Returns: Tuple of 3 `Tensor` objects, 2 input and 1 output. """ tf.contrib.framework.create_global_step() in0 = tf.Variable(1.0) in1 = tf.contrib.framework.local_variable(2.0) fake_table = tf.Variable( 3.0, trainable=False, collections=['fake_tables'], name='fake_table_var') in0.graph.add_to_collections( [tf.GraphKeys.TABLE_INITIALIZERS], fake_table.initializer) out = in0 + in1 + fake_table return in0, in1, out def test_infer(self): with tf.Graph().as_default() as g, self.test_session(g): self._assert_ckpt(self._output_dir, False) in0, in1, out = self._build_inference_graph() self.assertEqual( {'a': 1.0, 'b': 2.0, 'c': 6.0}, learn.graph_actions.infer(None, {'a': in0, 'b': in1, 'c': out})) self._assert_ckpt(self._output_dir, False) @tf.test.mock.patch.object( learn.graph_actions.coordinator.Coordinator, 'request_stop', side_effect=learn.graph_actions.coordinator.Coordinator.request_stop, autospec=True) def test_coordinator_request_stop_called(self, request_stop): with tf.Graph().as_default() as g, self.test_session(g): in0, in1, out = self._build_inference_graph() learn.graph_actions.infer(None, {'a': in0, 'b': in1, 'c': out}) self.assertTrue(request_stop.called) @tf.test.mock.patch.object( learn.graph_actions.coordinator.Coordinator, 'request_stop', side_effect=learn.graph_actions.coordinator.Coordinator.request_stop, autospec=True) def test_run_feeds_iter_cleanup_with_exceptions(self, request_stop): with tf.Graph().as_default() as g, self.test_session(g): in0, in1, out = self._build_inference_graph() try: for _ in learn.graph_actions.run_feeds_iter( {'a': in0, 'b': in1, 'c': out}, [None]*3): self.assertFalse(request_stop.called) raise ValueError('Fake exception') except ValueError: pass self.assertTrue(request_stop.called) def test_run_feeds_iter_calls_resources_init(self): with tf.Graph().as_default() as g: in0, _, _ = self._build_inference_graph() handle = test_ops.stub_resource_handle_op(container='a', shared_name='b') resources.register_resource( handle=handle, create_op=test_ops.resource_create_op(handle), is_initialized_op=test_ops.resource_initialized_op(handle)) for _ in learn.graph_actions.run_feeds_iter({'in0': in0}, feed_dicts=[{}]): self.assertTrue(test_ops.resource_initialized_op(handle).eval()) def test_infer_different_default_graph(self): with self.test_session(): self._assert_ckpt(self._output_dir, False) with tf.Graph().as_default(): in0, in1, out = self._build_inference_graph() with tf.Graph().as_default(): self.assertEqual( {'a': 1.0, 'b': 2.0, 'c': 6.0}, learn.graph_actions.infer(None, {'a': in0, 'b': in1, 'c': out})) self._assert_ckpt(self._output_dir, False) def test_infer_invalid_feed(self): with tf.Graph().as_default() as g, self.test_session(g): self._assert_ckpt(self._output_dir, False) in0, _, _ = self._build_inference_graph() with self.assertRaisesRegexp(TypeError, 'Can not convert a NoneType'): learn.graph_actions.infer(None, {'a': in0}, feed_dict={None: 4.0}) self._assert_ckpt(self._output_dir, False) def test_infer_feed(self): with tf.Graph().as_default() as g, self.test_session(g): self._assert_ckpt(self._output_dir, False) in0, _, out = self._build_inference_graph() self.assertEqual( {'c': 9.0}, learn.graph_actions.infer(None, {'c': out}, feed_dict={in0: 4.0})) self._assert_ckpt(self._output_dir, False) # TODO(ptucker): Test eval for 1 epoch. def test_evaluate_invalid_args(self): with tf.Graph().as_default() as g, self.test_session(g): self._assert_ckpt(self._output_dir, False) with self.assertRaisesRegexp(ValueError, 'utput directory'): learn.graph_actions.evaluate( g, output_dir=None, checkpoint_path=None, eval_dict={'a': tf.constant(1.0)}) with self.assertRaisesRegexp(ValueError, 'utput directory'): learn.graph_actions.evaluate( g, output_dir='', checkpoint_path=None, eval_dict={'a': tf.constant(1.0)}) self._assert_ckpt(self._output_dir, False) def test_evaluate(self): with tf.Graph().as_default() as g, self.test_session(g): _, _, out = self._build_inference_graph() writer = learn.graph_actions.get_summary_writer(self._output_dir) self._assert_summaries(self._output_dir, writer, expected_session_logs=[]) self._assert_ckpt(self._output_dir, False) results = learn.graph_actions.evaluate( g, output_dir=self._output_dir, checkpoint_path=None, eval_dict={'a': out}, max_steps=1) self.assertEqual(({'a': 6.0}, 0), results) self._assert_summaries( self._output_dir, writer, expected_summaries={0: {'a': 6.0}}, expected_session_logs=[]) self._assert_ckpt(self._output_dir, False) def test_evaluate_feed_fn(self): with tf.Graph().as_default() as g, self.test_session(g): in0, _, out = self._build_inference_graph() writer = learn.graph_actions.get_summary_writer(self._output_dir) self._assert_summaries(self._output_dir, writer, expected_session_logs=[]) self._assert_ckpt(self._output_dir, False) feeder = _Feeder(in0, 3) results = learn.graph_actions.evaluate( g, output_dir=self._output_dir, checkpoint_path=None, eval_dict={'a': out}, feed_fn=feeder.feed_fn, max_steps=3) self.assertEqual(3, feeder.step) self.assertEqual(({'a': 25.0}, 0), results) self._assert_summaries( self._output_dir, writer, expected_summaries={0: {'a': 25.0}}, expected_session_logs=[]) self._assert_ckpt(self._output_dir, False) def test_evaluate_feed_fn_with_exhaustion(self): with tf.Graph().as_default() as g, self.test_session(g): in0, _, out = self._build_inference_graph() writer = learn.graph_actions.get_summary_writer(self._output_dir) self._assert_summaries(self._output_dir, writer, expected_session_logs=[]) feeder = _Feeder(in0, 2) results = learn.graph_actions.evaluate( g, output_dir=self._output_dir, checkpoint_path=None, eval_dict={'a': out}, feed_fn=feeder.feed_fn, max_steps=3) self.assertEqual(2, feeder.step) self.assertEqual(({'a': 15.0}, 0), results) self._assert_summaries( self._output_dir, writer, expected_summaries={0: {'a': 15.0}}, expected_session_logs=[]) def test_evaluate_with_saver(self): with tf.Graph().as_default() as g, self.test_session(g): _, _, out = self._build_inference_graph() tf.add_to_collection(tf.GraphKeys.SAVERS, tf.train.Saver()) writer = learn.graph_actions.get_summary_writer(self._output_dir) self._assert_summaries(self._output_dir, writer, expected_session_logs=[]) results = learn.graph_actions.evaluate( g, output_dir=self._output_dir, checkpoint_path=None, eval_dict={'a': out}, max_steps=1) self.assertEqual(({'a': 6.0}, 0), results) self._assert_summaries( self._output_dir, writer, expected_summaries={0: {'a': 6.0}}, expected_session_logs=[]) def test_train_invalid_args(self): with tf.Graph().as_default() as g, self.test_session(g): train_op = tf.constant(1.0) loss_op = tf.constant(2.0) with self.assertRaisesRegexp(ValueError, 'utput directory'): learn.graph_actions._monitored_train(g, # pylint: disable=protected-access output_dir=None, train_op=train_op, loss_op=loss_op) with self.assertRaisesRegexp(ValueError, 'utput directory'): learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir='', train_op=tf.constant(1.0), loss_op=tf.constant(2.0)) with self.assertRaisesRegexp(ValueError, 'train_op'): learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=None, loss_op=loss_op) with self.assertRaisesRegexp(ValueError, 'loss_op'): learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=tf.constant(1.0), loss_op=None) with self.assertRaisesRegexp(ValueError, 'global_step'): learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=tf.constant(1.0), loss_op=loss_op) # TODO(ptucker): Resume training from previous ckpt. # TODO(ptucker): !supervisor_is_chief # TODO(ptucker): Custom init op for training. # TODO(ptucker): Mock supervisor, and assert all interactions. def test_train(self): with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) writer = learn.graph_actions.get_summary_writer(self._output_dir) self._assert_summaries(self._output_dir, writer) self._assert_ckpt(self._output_dir, False) loss = learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), steps=1) meta_graph_def = meta_graph.create_meta_graph_def() self.assertEqual(2.0, loss) self._assert_summaries(self._output_dir, writer, expected_graphs=[g], expected_meta_graphs=[meta_graph_def]) self._assert_ckpt(self._output_dir, True) def test_train_steps_is_incremental(self): with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), steps=10) step = tf.contrib.framework.load_variable( self._output_dir, tf.contrib.framework.get_global_step().name) self.assertEqual(10, step) with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), steps=15) step = tf.contrib.framework.load_variable( self._output_dir, tf.contrib.framework.get_global_step().name) self.assertEqual(25, step) def test_train_max_steps_is_not_incremental(self): with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), max_steps=10) step = tf.contrib.framework.load_variable( self._output_dir, tf.contrib.framework.get_global_step().name) self.assertEqual(10, step) with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), max_steps=15) step = tf.contrib.framework.load_variable( self._output_dir, tf.contrib.framework.get_global_step().name) self.assertEqual(15, step) def test_train_skip_train_if_max_step_already_saved(self): with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), max_steps=10) step = tf.contrib.framework.load_variable( self._output_dir, tf.contrib.framework.get_global_step().name) self.assertEqual(10, step) with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), max_steps=10) step = tf.contrib.framework.load_variable( self._output_dir, tf.contrib.framework.get_global_step().name) self.assertEqual(10, step) def test_train_loss(self): with tf.Graph().as_default() as g, self.test_session(g): tf.contrib.framework.create_global_step() loss_var = tf.contrib.framework.local_variable(10.0) train_op = tf.group( tf.assign_add(tf.contrib.framework.get_global_step(), 1), tf.assign_add(loss_var, -1.0)) writer = learn.graph_actions.get_summary_writer(self._output_dir) self._assert_summaries(self._output_dir, writer) self._assert_ckpt(self._output_dir, False) loss = learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=train_op, loss_op=loss_var.value(), steps=6) meta_graph_def = meta_graph.create_meta_graph_def() self.assertEqual(4.0, loss) self._assert_summaries(self._output_dir, writer, expected_graphs=[g], expected_meta_graphs=[meta_graph_def]) self._assert_ckpt(self._output_dir, True) def test_train_summaries(self): with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) loss_op = tf.constant(2.0) tf.summary.scalar('loss', loss_op) writer = learn.graph_actions.get_summary_writer(self._output_dir) self._assert_summaries(self._output_dir, writer) self._assert_ckpt(self._output_dir, False) loss = learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=train_op, loss_op=loss_op, steps=1) meta_graph_def = meta_graph.create_meta_graph_def() self.assertEqual(2.0, loss) self._assert_summaries(self._output_dir, writer, expected_graphs=[g], expected_meta_graphs=[meta_graph_def], expected_summaries={1: {'loss': 2.0}}) self._assert_ckpt(self._output_dir, True) def test_train_override_saver(self): with tf.Graph().as_default() as g, self.test_session(g): saver = tf.test.mock.Mock() tf.add_to_collection(tf.GraphKeys.SAVERS, saver) with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) self._assert_ckpt(self._output_dir, False) loss = learn.graph_actions._monitored_train( # pylint: disable=protected-access g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), steps=1) self.assertEqual(2.0, loss) self._assert_ckpt(self._output_dir, False) self.assertTrue(saver.build.called) self.assertEqual(1, saver.save.call_count) # TODO(ispir): remove following tests after deprecated train. class GraphActionsTrainTest(tf.test.TestCase): """Tests for train.""" def setUp(self): learn.graph_actions.clear_summary_writers() self._output_dir = tempfile.mkdtemp() testing.FakeSummaryWriter.install() def tearDown(self): testing.FakeSummaryWriter.uninstall() if self._output_dir: shutil.rmtree(self._output_dir) learn.graph_actions.clear_summary_writers() def _assert_summaries(self, output_dir, expected_summaries=None, expected_graphs=None, expected_meta_graphs=None, expected_session_logs=None): writer = learn.graph_actions.get_summary_writer(output_dir) self.assertTrue(isinstance(writer, testing.FakeSummaryWriter)) writer.assert_summaries(self, expected_logdir=output_dir, expected_graph=tf.get_default_graph(), expected_summaries=expected_summaries, expected_added_graphs=expected_graphs, expected_added_meta_graphs=expected_meta_graphs, expected_session_logs=expected_session_logs) # TODO(ptucker): Test number and contents of checkpoint files. def _assert_ckpt(self, output_dir, expected=True): ckpt_state = tf.train.get_checkpoint_state(output_dir) if expected: pattern = '%s/model.ckpt-.*' % output_dir primary_ckpt_path = ckpt_state.model_checkpoint_path self.assertRegexpMatches(primary_ckpt_path, pattern) all_ckpt_paths = ckpt_state.all_model_checkpoint_paths self.assertTrue(primary_ckpt_path in all_ckpt_paths) for ckpt_path in all_ckpt_paths: self.assertRegexpMatches(ckpt_path, pattern) else: self.assertTrue(ckpt_state is None) def _build_inference_graph(self): """Build simple inference graph. This includes a regular variable, local variable, and fake table. Returns: Tuple of 3 `Tensor` objects, 2 input and 1 output. """ tf.contrib.framework.create_global_step() in0 = tf.Variable(1.0) in1 = tf.contrib.framework.local_variable(2.0) fake_table = tf.Variable(3.0, trainable=False, collections=['fake_tables'], name='fake_table_var') in0.graph.add_to_collections([tf.GraphKeys.TABLE_INITIALIZERS], fake_table.initializer) out = in0 + in1 + fake_table return in0, in1, out def test_train_invalid_args(self): with tf.Graph().as_default() as g, self.test_session(g): train_op = tf.constant(1.0) loss_op = tf.constant(2.0) with self.assertRaisesRegexp(ValueError, 'utput directory'): learn.graph_actions.train( g, output_dir=None, train_op=train_op, loss_op=loss_op) with self.assertRaisesRegexp(ValueError, 'utput directory'): learn.graph_actions.train( g, output_dir='', train_op=tf.constant(1.0), loss_op=tf.constant(2.0)) with self.assertRaisesRegexp(ValueError, 'train_op'): learn.graph_actions.train( g, output_dir=self._output_dir, train_op=None, loss_op=loss_op) with self.assertRaisesRegexp(ValueError, 'loss_op'): learn.graph_actions.train( g, output_dir=self._output_dir, train_op=tf.constant(1.0), loss_op=None) with self.assertRaisesRegexp(ValueError, 'global_step'): learn.graph_actions.train( g, output_dir=self._output_dir, train_op=tf.constant(1.0), loss_op=loss_op) # TODO(ptucker): Resume training from previous ckpt. # TODO(ptucker): !supervisor_is_chief # TODO(ptucker): Custom init op for training. # TODO(ptucker): Mock supervisor, and assert all interactions. def test_train(self): with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) self._assert_summaries(self._output_dir) self._assert_ckpt(self._output_dir, False) loss = learn.graph_actions.train( g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), steps=1) # TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the # SaverDef, so we can't add it to the summary assertion test below. # meta_graph_def = meta_graph.create_meta_graph_def() self.assertEqual(2.0, loss) self._assert_summaries(self._output_dir, expected_graphs=[g]) self._assert_ckpt(self._output_dir, True) def test_train_steps_is_incremental(self): with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) learn.graph_actions.train(g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), steps=10) step = tf.contrib.framework.load_variable( self._output_dir, tf.contrib.framework.get_global_step().name) self.assertEqual(10, step) with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) learn.graph_actions.train(g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), steps=15) step = tf.contrib.framework.load_variable( self._output_dir, tf.contrib.framework.get_global_step().name) self.assertEqual(25, step) def test_train_max_steps_is_not_incremental(self): with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) learn.graph_actions.train(g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), max_steps=10) step = tf.contrib.framework.load_variable( self._output_dir, tf.contrib.framework.get_global_step().name) self.assertEqual(10, step) with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) learn.graph_actions.train(g, output_dir=self._output_dir, train_op=train_op, loss_op=tf.constant(2.0), max_steps=15) step = tf.contrib.framework.load_variable( self._output_dir, tf.contrib.framework.get_global_step().name) self.assertEqual(15, step) def test_train_loss(self): with tf.Graph().as_default() as g, self.test_session(g): tf.contrib.framework.create_global_step() loss_var = tf.contrib.framework.local_variable(10.0) train_op = tf.group( tf.assign_add(tf.contrib.framework.get_global_step(), 1), tf.assign_add(loss_var, -1.0)) self._assert_summaries(self._output_dir) self._assert_ckpt(self._output_dir, False) loss = learn.graph_actions.train( g, output_dir=self._output_dir, train_op=train_op, loss_op=loss_var.value(), steps=6) # TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the # SaverDef, so we can't add it to the summary assertion test below. # meta_graph_def = meta_graph.create_meta_graph_def() self.assertEqual(4.0, loss) self._assert_summaries(self._output_dir, expected_graphs=[g]) self._assert_ckpt(self._output_dir, True) def test_train_summaries(self): with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) loss_op = tf.constant(2.0) tf.summary.scalar('loss', loss_op) self._assert_summaries(self._output_dir) self._assert_ckpt(self._output_dir, False) loss = learn.graph_actions.train( g, output_dir=self._output_dir, train_op=train_op, loss_op=loss_op, steps=1) # TODO(ebrevdo,ptucker,ispir): this meta_graph_def lacks the # SaverDef, so we can't add it to the summary assertion test below. # meta_graph_def = meta_graph.create_meta_graph_def() self.assertEqual(2.0, loss) self._assert_summaries( self._output_dir, expected_graphs=[g], expected_summaries={1: {'loss': 2.0}}) self._assert_ckpt(self._output_dir, True) def test_train_chief_monitor(self): with tf.Graph().as_default() as g, self.test_session(g): with tf.control_dependencies(self._build_inference_graph()): train_op = tf.assign_add(tf.contrib.framework.get_global_step(), 1) loss_op = tf.constant(2.0) tf.summary.scalar('loss', loss_op) chief_exclusive_monitor = _BaseMonitorWrapper(False) all_workers_monitor = _BaseMonitorWrapper(True) loss = learn.graph_actions.train( g, output_dir=self._output_dir, train_op=train_op, loss_op=loss_op, supervisor_is_chief=True, steps=1, monitors=[chief_exclusive_monitor, all_workers_monitor]) self.assertEqual(2.0, loss) self.assertTrue(chief_exclusive_monitor.is_active and all_workers_monitor.is_active, 'All monitors must have been active.') self.assertTrue(chief_exclusive_monitor.has_step and all_workers_monitor.has_step, 'All monitors must have a step.') def test_train_worker_monitor(self): # We need to explicitly set device due to check on non-chief workers # requiring all variables to have a device assigned. with tf.Graph().as_default() as g, g.device('/cpu:0'): global_step = tf.contrib.framework.create_global_step(g) train_op = tf.assign_add(global_step, 1) loss_op = tf.constant(2.0) tf.summary.scalar('loss', loss_op) # Add explicit "local" init op to initialize all variables # as there's no chief to init here. init_op = variables.global_variables_initializer() ops.add_to_collection(ops.GraphKeys.LOCAL_INIT_OP, init_op) # Create worker monitors where one should be active on the worker # and the other chief exclusive. chief_exclusive_monitor = _BaseMonitorWrapper(False) all_workers_monitor = _BaseMonitorWrapper(True) with self.test_session(g): loss = learn.graph_actions.train( g, output_dir=self._output_dir, global_step_tensor=global_step, train_op=train_op, loss_op=loss_op, supervisor_is_chief=False, steps=1, monitors=[chief_exclusive_monitor, all_workers_monitor]) self.assertEqual(2.0, loss) self.assertTrue(not chief_exclusive_monitor.is_active and all_workers_monitor.is_active, 'Only non-chief runnable monitor must have been active.') self.assertTrue(not chief_exclusive_monitor.has_step and all_workers_monitor.has_step, 'Only non-chief runnable monitor must have a step.') if __name__ == '__main__': tf.test.main()
{ "content_hash": "3050d1d8785ca6dd8847b90907b4eefc", "timestamp": "", "source": "github", "line_count": 749, "max_line_length": 86, "avg_line_length": 43.57276368491322, "alnum_prop": 0.6335335212648608, "repo_name": "DCSaunders/tensorflow", "id": "caacb1928a2d487174164d10c332bc68faa72922", "size": "33325", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "tensorflow/contrib/learn/python/learn/graph_actions_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "6641" }, { "name": "C", "bytes": "90766" }, { "name": "C++", "bytes": "13984868" }, { "name": "CMake", "bytes": "110983" }, { "name": "CSS", "bytes": "774" }, { "name": "Go", "bytes": "87708" }, { "name": "HTML", "bytes": "534592" }, { "name": "Java", "bytes": "57002" }, { "name": "JavaScript", "bytes": "13406" }, { "name": "Jupyter Notebook", "bytes": "1833475" }, { "name": "Makefile", "bytes": "26235" }, { "name": "Objective-C", "bytes": "7056" }, { "name": "Objective-C++", "bytes": "64592" }, { "name": "Perl", "bytes": "4412" }, { "name": "Protocol Buffer", "bytes": "143116" }, { "name": "Python", "bytes": "13808086" }, { "name": "Shell", "bytes": "276793" }, { "name": "TypeScript", "bytes": "749115" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import models, migrations class Migration(migrations.Migration): dependencies = [ ('stuff', '0002_authenticator'), ] operations = [ migrations.AlterField( model_name='authenticator', name='user_id', field=models.IntegerField(unique=True), preserve_default=True, ), ]
{ "content_hash": "c53ac85be28a32a6bc94308e56d7fb3d", "timestamp": "", "source": "github", "line_count": 19, "max_line_length": 51, "avg_line_length": 21.842105263157894, "alnum_prop": 0.5927710843373494, "repo_name": "hnaoto/CS4501-ISA-Models", "id": "541c9592e0428452a3729e3af56a973d8a145c1d", "size": "439", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "stuff/migrations/0003_auto_20151022_0214.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "41725" }, { "name": "HTML", "bytes": "64" }, { "name": "JavaScript", "bytes": "74674" }, { "name": "Python", "bytes": "30388" } ], "symlink_target": "" }
from unittest import TestCase import watson_developer_cloud import responses import pytest class TestAlchemyLanguageV1(TestCase): def test_api_key(self): default_url = 'https://gateway-a.watsonplatform.net/calls' inited = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey', x_watson_learning_opt_out=True) assert inited.api_key == 'boguskey' assert inited.url == default_url inited.set_url(url="http://google.com") assert inited.url == "http://google.com" # with pytest.raises(watson_developer_cloud.WatsonException): # watson_developer_cloud.AlchemyLanguageV1() # with pytest.raises(watson_developer_cloud.WatsonException): # watson_developer_cloud.AlchemyLanguageV1(api_key='YOUR API KEY') def test_unpack_id(self): testdict = {'one': 10} assert watson_developer_cloud.AlchemyLanguageV1.unpack_id(testdict, 'one') == 10 assert watson_developer_cloud.AlchemyLanguageV1.unpack_id(testdict, 'two') == testdict @responses.activate def test_author(self): url = 'https://gateway-a.watsonplatform.net' default_url = 'https://gateway-a.watsonplatform.net/calls' responses.add(responses.POST, '{0}/html/HTMLGetAuthor'.format(url), body='{"bogus": "response"}', status=200, content_type='application/json') responses.add(responses.POST, '{0}/url/URLGetAuthor'.format(url), body='{"bogus": "response"}', status=200, content_type='application/json') responses.add(responses.POST, '{0}/html/HTMLGetAuthor'.format(default_url), body='{"bogus": "response"}', status=200, content_type='application/json') responses.add(responses.POST, '{0}/url/URLGetAuthor'.format(default_url), body='{"bogus": "response"}', status=200, content_type='application/json') alang = watson_developer_cloud.AlchemyLanguageV1(url=url, api_key='boguskey', x_watson_learning_opt_out=True) alang.author(html="I'm html") alang.author(url="http://google.com") with pytest.raises(watson_developer_cloud.WatsonInvalidArgument): alang.author() alang = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey', x_watson_learning_opt_out=True) alang.author(html="I'm html") alang.author(url="http://google.com") assert len(responses.calls) == 4 @responses.activate def test_auth_exception(self): default_url = 'https://gateway-a.watsonplatform.net/calls' responses.add(responses.POST, '{0}/url/URLGetAuthor'.format(default_url), body='{"bogus": "response"}', status=401, content_type='application/json') alang = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey', x_watson_learning_opt_out=True) with pytest.raises(watson_developer_cloud.WatsonException): alang.author(url="http://google.com") assert len(responses.calls) == 1 @responses.activate def test_authors(self): default_url = 'https://gateway-a.watsonplatform.net/calls' responses.add(responses.POST, '{0}/url/URLGetAuthors'.format(default_url), body='{"bogus": "response"}', status=200, content_type='application/json') responses.add(responses.POST, '{0}/html/HTMLGetAuthors'.format(default_url), body='{"bogus": "response"}', status=200, content_type='application/json') alang = watson_developer_cloud.AlchemyLanguageV1(url=default_url, api_key='boguskey', x_watson_learning_opt_out=True) alang.authors(url="http://google.com") alang.authors(html="<h1>Author</h1>") assert len(responses.calls) == 2
{ "content_hash": "15dbbe64fb76bdc90ab7c33b1203a40f", "timestamp": "", "source": "github", "line_count": 85, "max_line_length": 117, "avg_line_length": 49.529411764705884, "alnum_prop": 0.5954869358669834, "repo_name": "ehdsouza/python-sdk", "id": "e48d7f89ebc655caf24a42d6e0445209227e9f59", "size": "4210", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "test/test_alchemy_language_v1.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "HTML", "bytes": "27851" }, { "name": "Python", "bytes": "354026" }, { "name": "Shell", "bytes": "250" } ], "symlink_target": "" }
"""Tests for tensor2tensor.data_generators.program_search.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import gzip import os import shutil import tempfile from builtins import bytes # pylint: disable=redefined-builtin from tensor2tensor.data_generators import problem from tensor2tensor.data_generators import program_search import tensorflow.compat.v1 as tf class ProgramSearchAlgolispStub(program_search.ProgramSearchAlgolisp): """Stub of ProgramSearchAlgolisp that stubs out maybe_download_dataset. The maybe_download_dataset writes one predetermined example in a zip file self.n number of times and returns the file path. """ EXAMPLE = ('{"funcs": [], "tests": [{"output": 0, "input": {"a": 5}}, ' '{"output": 1, "input": {"a": 20}}, {"output": 2, "input": ' '{"a": 28}}, {"output": 1, "input": {"a": 13}}, {"output": 1, ' '"input": {"a": 27}}, {"output": 1, "input": {"a": 13}}, ' '{"output": 1, "input": {"a": 20}}, {"output": 0, ' '"input": {"a": 8}}, {"output": 0, "input": {"a": 8}}, ' '{"output": 0, "input": {"a": 4}}], "short_tree": ["invoke1", ' '["lambda1", ["if", ["==", ["len", ["digits", "arg1"]], "1"], "0",' ' ["+", "1", ["self", ["reduce", ["digits", "arg1"], "0", ' '"+"]]]]], "a"], "tags": [], "text": ["given", "a", "number", "a",' ' ",", "find", "how", "many", "times", "you", "can", "replace", ' '"a", "with", "sum", "of", "its", "digits", "before", "it", ' '"becomes", "a", "single", "digit", "number"], "return_type": ' '"int", "args": {"a": "int"}, "nodes": ["l1_recursive_digits"]}') EXAMPLE_INPUT = ('given a number a , find how many times you can replace a ' 'with sum of its digits before it becomes a single digit ' 'number') EXAMPLE_TARGET = ('[ invoke1 [ lambda1 [ if [ == [ len [ digits arg1 ] ] 1 ]' ' 0 [ + 1 [ self [ reduce [ digits arg1 ] 0 + ] ] ] ] ] a ' ']') N = 10 def maybe_download_dataset(self, tmp_dir, dataset_split): (_, data_file) = tempfile.mkstemp( suffix='.gz', prefix=str(dataset_split) + '-', dir=tmp_dir) with gzip.open(data_file, 'wb') as gz_file: content = '\n'.join([self.EXAMPLE] * self.N) gz_file.write(bytes(content, 'utf-8')) return data_file class ProgramSearchAlgolispTest(tf.test.TestCase): @classmethod def setUpClass(cls): # Setup the temp directory tree. cls.tmp_dir = tf.test.get_temp_dir() shutil.rmtree(cls.tmp_dir) os.mkdir(cls.tmp_dir) @classmethod def tearDownClass(cls): # Cleanup the temp directory tree. shutil.rmtree(cls.tmp_dir) def testEndToEnd(self): # End-to-end test, the stub problem class creates a .gz file with nps_stub.N # example and we check if we're able to process it correctly. nps_stub = ProgramSearchAlgolispStub() num = 0 for example in nps_stub.generate_samples(None, self.tmp_dir, problem.DatasetSplit.TRAIN): # Only one example in 'file', so this is OK. self.assertEqual(example['inputs'], ProgramSearchAlgolispStub.EXAMPLE_INPUT) self.assertEqual(example['targets'], ProgramSearchAlgolispStub.EXAMPLE_TARGET) num += 1 # assert that we have as many examples as there are in the file. self.assertEqual(num, nps_stub.N) if __name__ == '__main__': tf.test.main()
{ "content_hash": "97fb6af4ae75f9ac91628dd243d0f507", "timestamp": "", "source": "github", "line_count": 97, "max_line_length": 80, "avg_line_length": 37.422680412371136, "alnum_prop": 0.565564738292011, "repo_name": "tensorflow/tensor2tensor", "id": "340b39391241e8536ac5af2e1838662081af5dec", "size": "4236", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "tensor2tensor/data_generators/program_search_test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "32015" }, { "name": "HTML", "bytes": "34684" }, { "name": "JavaScript", "bytes": "78408" }, { "name": "Jupyter Notebook", "bytes": "2859453" }, { "name": "Python", "bytes": "5109255" }, { "name": "Shell", "bytes": "11941" } ], "symlink_target": "" }
from typing import Any, TYPE_CHECKING from azure.core.configuration import Configuration from azure.core.pipeline import policies from azure.mgmt.core.policies import ARMHttpLoggingPolicy, AsyncARMChallengeAuthenticationPolicy from .._version import VERSION if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from azure.core.credentials_async import AsyncTokenCredential class ResourceManagementClientConfiguration(Configuration): # pylint: disable=too-many-instance-attributes """Configuration for ResourceManagementClient. Note that all parameters used to create this instance are saved as instance attributes. :param credential: Credential needed for the client to connect to Azure. Required. :type credential: ~azure.core.credentials_async.AsyncTokenCredential :param subscription_id: The ID of the target subscription. Required. :type subscription_id: str :keyword api_version: Api Version. Default value is "2018-05-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str """ def __init__(self, credential: "AsyncTokenCredential", subscription_id: str, **kwargs: Any) -> None: super(ResourceManagementClientConfiguration, self).__init__(**kwargs) api_version = kwargs.pop("api_version", "2018-05-01") # type: str if credential is None: raise ValueError("Parameter 'credential' must not be None.") if subscription_id is None: raise ValueError("Parameter 'subscription_id' must not be None.") self.credential = credential self.subscription_id = subscription_id self.api_version = api_version self.credential_scopes = kwargs.pop("credential_scopes", ["https://management.azure.com/.default"]) kwargs.setdefault("sdk_moniker", "mgmt-resource/{}".format(VERSION)) self._configure(**kwargs) def _configure(self, **kwargs: Any) -> None: self.user_agent_policy = kwargs.get("user_agent_policy") or policies.UserAgentPolicy(**kwargs) self.headers_policy = kwargs.get("headers_policy") or policies.HeadersPolicy(**kwargs) self.proxy_policy = kwargs.get("proxy_policy") or policies.ProxyPolicy(**kwargs) self.logging_policy = kwargs.get("logging_policy") or policies.NetworkTraceLoggingPolicy(**kwargs) self.http_logging_policy = kwargs.get("http_logging_policy") or ARMHttpLoggingPolicy(**kwargs) self.retry_policy = kwargs.get("retry_policy") or policies.AsyncRetryPolicy(**kwargs) self.custom_hook_policy = kwargs.get("custom_hook_policy") or policies.CustomHookPolicy(**kwargs) self.redirect_policy = kwargs.get("redirect_policy") or policies.AsyncRedirectPolicy(**kwargs) self.authentication_policy = kwargs.get("authentication_policy") if self.credential and not self.authentication_policy: self.authentication_policy = AsyncARMChallengeAuthenticationPolicy( self.credential, *self.credential_scopes, **kwargs )
{ "content_hash": "313ef3c34baed52e5a34a4851a326e49", "timestamp": "", "source": "github", "line_count": 58, "max_line_length": 107, "avg_line_length": 53.12068965517241, "alnum_prop": 0.7198961376176566, "repo_name": "Azure/azure-sdk-for-python", "id": "f6b04935c913654b96f39f29d8ad4b9e05939e7c", "size": "3549", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "sdk/resources/azure-mgmt-resource/azure/mgmt/resource/resources/v2018_05_01/aio/_configuration.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "1224" }, { "name": "Bicep", "bytes": "24196" }, { "name": "CSS", "bytes": "6089" }, { "name": "Dockerfile", "bytes": "4892" }, { "name": "HTML", "bytes": "12058" }, { "name": "JavaScript", "bytes": "8137" }, { "name": "Jinja", "bytes": "10377" }, { "name": "Jupyter Notebook", "bytes": "272022" }, { "name": "PowerShell", "bytes": "518535" }, { "name": "Python", "bytes": "715484989" }, { "name": "Shell", "bytes": "3631" } ], "symlink_target": "" }
try: import scipy except ImportError: scipy = None import pytest from emcee import moves from .test_proposal import _test_normal, _test_uniform __all__ = ["test_normal_kde", "test_uniform_kde", "test_nsplits_kde"] @pytest.mark.skipif(scipy is None, reason="scipy is not available") def test_normal_kde(**kwargs): _test_normal(moves.KDEMove(), **kwargs) @pytest.mark.skipif(scipy is None, reason="scipy is not available") def test_uniform_kde(**kwargs): _test_uniform(moves.KDEMove(), **kwargs) @pytest.mark.skipif(scipy is None, reason="scipy is not available") def test_nsplits_kde(**kwargs): _test_normal(moves.KDEMove(nsplits=5), **kwargs)
{ "content_hash": "9f0972fa82a48f67145b3730001d7cb6", "timestamp": "", "source": "github", "line_count": 26, "max_line_length": 69, "avg_line_length": 25.846153846153847, "alnum_prop": 0.7083333333333334, "repo_name": "dfm/emcee", "id": "5ab06afdfab9506a75a23f566f15de4e65bc6780", "size": "697", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "src/emcee/tests/integration/test_kde.py", "mode": "33188", "license": "mit", "language": [ { "name": "Python", "bytes": "119438" }, { "name": "Shell", "bytes": "1234" }, { "name": "TeX", "bytes": "20222" } ], "symlink_target": "" }
from django.urls import path from .views import createPaypalPayment, executePaypalPayment urlpatterns = [ path('create_payment/', createPaypalPayment, name='createPaypalPayment'), path('execute_payment/', executePaypalPayment, name='executePaypalPayment'), ]
{ "content_hash": "f088c3baa069753c914e6a5f2c272612", "timestamp": "", "source": "github", "line_count": 8, "max_line_length": 80, "avg_line_length": 33.625, "alnum_prop": 0.7806691449814126, "repo_name": "django-danceschool/django-danceschool", "id": "44bc97dc930f2714df3ab042ae64eee13f84996c", "size": "269", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "danceschool/payments/paypal/urls.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "55309" }, { "name": "HTML", "bytes": "334988" }, { "name": "JavaScript", "bytes": "2008559" }, { "name": "Less", "bytes": "21246" }, { "name": "Python", "bytes": "1856445" }, { "name": "SCSS", "bytes": "9564" } ], "symlink_target": "" }
""" This is a small demo, showing how to make an object that can be moved around. It also contains a simple prototype for a "Connector" object -- a line connecting two other objects """ import wx #ver = 'local' ver = 'installed' if ver == 'installed': ## import the installed version from wx.lib.floatcanvas import NavCanvas, Resources from wx.lib.floatcanvas import FloatCanvas as FC from wx.lib.floatcanvas.Utilities import BBox print("using installed version: %s" % wx.lib.floatcanvas.__version__) elif ver == 'local': ## import a local version import sys sys.path.append("..") from floatcanvas import NavCanvas, Resources from floatcanvas import FloatCanvas as FC from floatcanvas.Utilities import BBox import numpy as N ## here we create some new mixins: class MovingObjectMixin: """ Methods required for a Moving object """ def GetOutlinePoints(self): BB = self.BoundingBox OutlinePoints = N.array( ( (BB[0,0], BB[0,1]), (BB[0,0], BB[1,1]), (BB[1,0], BB[1,1]), (BB[1,0], BB[0,1]), ) ) return OutlinePoints class ConnectorObjectMixin: """ Mixin class for DrawObjects that can be connected with lines NOte that this versionony works for Objects that have an "XY" attribute: that is, one that is derived from XHObjectMixin. """ def GetConnectPoint(self): return self.XY class MovingBitmap(FC.ScaledBitmap, MovingObjectMixin, ConnectorObjectMixin): """ ScaledBitmap Object that can be moved """ ## All we need to do is is inherit from: ## ScaledBitmap, MovingObjectMixin and ConnectorObjectMixin pass class MovingCircle(FC.Circle, MovingObjectMixin, ConnectorObjectMixin): """ ScaledBitmap Object that can be moved """ ## All we need to do is is inherit from: ## ScaledBitmap, MovingObjectMixin and ConnectorObjectMixin pass class MovingArc(FC.Arc, MovingObjectMixin, ConnectorObjectMixin): """ ScaledBitmap Object that can be moved """ ## All we need to do is is inherit from: ## ScaledBitmap, MovingObjectMixin and ConnectorObjectMixin pass class ConnectorLine(FC.LineOnlyMixin, FC.DrawObject,): """ A Line that connects two objects -- it uses the objects to get its coordinates """ ##fixme: this should be added to the Main FloatCanvas Objects some day. def __init__(self, Object1, Object2, LineColor = "Black", LineStyle = "Solid", LineWidth = 1, InForeground = False): FC.DrawObject.__init__(self, InForeground) self.Object1 = Object1 self.Object2 = Object2 self.LineColor = LineColor self.LineStyle = LineStyle self.LineWidth = LineWidth self.CalcBoundingBox() self.SetPen(LineColor,LineStyle,LineWidth) self.HitLineWidth = max(LineWidth,self.MinHitLineWidth) def CalcBoundingBox(self): self.BoundingBox = BBox.fromPoints((self.Object1.GetConnectPoint(), self.Object2.GetConnectPoint()) ) if self._Canvas: self._Canvas.BoundingBoxDirty = True def _Draw(self, dc , WorldToPixel, ScaleWorldToPixel, HTdc=None): Points = N.array( (self.Object1.GetConnectPoint(), self.Object2.GetConnectPoint()) ) Points = WorldToPixel(Points) dc.SetPen(self.Pen) dc.DrawLines(Points) if HTdc and self.HitAble: HTdc.SetPen(self.HitPen) HTdc.DrawLines(Points) class TriangleShape1(FC.Polygon, MovingObjectMixin): def __init__(self, XY, L): """ An equilateral triangle object XY is the middle of the triangle L is the length of one side of the Triangle """ XY = N.asarray(XY) XY.shape = (2,) Points = self.CompPoints(XY, L) FC.Polygon.__init__(self, Points, LineColor = "Black", LineStyle = "Solid", LineWidth = 2, FillColor = "Red", FillStyle = "Solid") ## Override the default OutlinePoints def GetOutlinePoints(self): return self.Points def CompPoints(self, XY, L): c = L/ N.sqrt(3) Points = N.array(((0, c), ( L/2.0, -c/2.0), (-L/2.0, -c/2.0)), N.float_) Points += XY return Points class DrawFrame(wx.Frame): """ A simple frame used for the Demo """ def __init__(self, *args, **kwargs): wx.Frame.__init__(self, *args, **kwargs) self.CreateStatusBar() # Add the Canvas Canvas = NavCanvas.NavCanvas(self,-1,(500,500), ProjectionFun = None, Debug = 0, BackgroundColor = "DARK SLATE BLUE", ).Canvas self.Canvas = Canvas Canvas.Bind(FC.EVT_MOTION, self.OnMove ) Canvas.Bind(FC.EVT_LEFT_UP, self.OnLeftUp ) Points = N.array(((0,0), (1,0), (0.5, 1)), N.float) data = (( (0,0), 1), ( (3,3), 2), ( (-2,3), 2.5 ), ) for p, L in data: Tri = TriangleShape1(p, 1) Canvas.AddObject(Tri) Tri.Bind(FC.EVT_FC_LEFT_DOWN, self.ObjectHit) Circle = MovingCircle( (1, 3), 2, FillColor="Blue") Canvas.AddObject(Circle) Circle.Bind(FC.EVT_FC_LEFT_DOWN, self.ObjectHit) Bitmaps = [] ## create the bitmaps first for Point in ((1,1), (-4,3)): Bitmaps.append(MovingBitmap(Resources.getMondrianImage(), Point, Height=1, Position='cc') ) Line = ConnectorLine(Bitmaps[0], Bitmaps[1], LineWidth=3, LineColor="Red") Canvas.AddObject(Line) ## then add them to the Canvas, so they are on top of the line for bmp in Bitmaps: Canvas.AddObject(bmp) bmp.Bind(FC.EVT_FC_LEFT_DOWN, self.ObjectHit) A = MovingArc((-5, 0),(-2, 2),(-5, 2), LineColor="Red", LineWidth=2) self.Canvas.AddObject(A) A.Bind(FC.EVT_FC_LEFT_DOWN, self.ObjectHit) self.Show(True) self.Canvas.ZoomToBB() self.MoveObject = None self.Moving = False return None def ObjectHit(self, object): if not self.Moving: self.Moving = True self.StartPoint = object.HitCoordsPixel self.StartObject = self.Canvas.WorldToPixel(object.GetOutlinePoints()) self.MoveObject = None self.MovingObject = object def OnMove(self, event): """ Updates the status bar with the world coordinates and moves the object it is clicked on """ self.SetStatusText("%.4f, %.4f"%tuple(event.Coords)) if self.Moving: dxy = event.GetPosition() - self.StartPoint # Draw the Moving Object: dc = wx.ClientDC(self.Canvas) dc.SetPen(wx.Pen('WHITE', 2, wx.SHORT_DASH)) dc.SetBrush(wx.TRANSPARENT_BRUSH) dc.SetLogicalFunction(wx.XOR) if self.MoveObject is not None: dc.DrawPolygon(self.MoveObject) self.MoveObject = self.StartObject + dxy dc.DrawPolygon(self.MoveObject) def OnLeftUp(self, event): if self.Moving: self.Moving = False if self.MoveObject is not None: dxy = event.GetPosition() - self.StartPoint dxy = self.Canvas.ScalePixelToWorld(dxy) self.MovingObject.Move(dxy) self.MoveTri = None self.Canvas.Draw(True) if __name__ == "__main__": app = wx.App(0) DrawFrame(None, -1, "FloatCanvas Moving Object App", wx.DefaultPosition, (700,700) ) app.MainLoop()
{ "content_hash": "1e7b75e00aa66d30ed2d5289a5c3707b", "timestamp": "", "source": "github", "line_count": 281, "max_line_length": 88, "avg_line_length": 30.373665480427047, "alnum_prop": 0.5411833626244874, "repo_name": "dnxbjyj/python-basic", "id": "40db285852fb2d4097095e881afacbf830df237f", "size": "8557", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "gui/wxpython/wxPython-demo-4.0.1/samples/floatcanvas/MovingElements.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "70" }, { "name": "HTML", "bytes": "274934" }, { "name": "Jupyter Notebook", "bytes": "868723" }, { "name": "Python", "bytes": "4032747" }, { "name": "Shell", "bytes": "446" } ], "symlink_target": "" }
from django.test import TestCase # permissions imports import permissions.utils # lfc imports from lfc.models import BaseContent from lfc.models import Page from lfc.models import Portal from lfc.tests.utils import create_request class ManagerTestCase(TestCase): """ """ def setUp(self): """ """ Portal.objects.create() self.p1 = Page.objects.create(title="Page 1", slug="page-1") self.p2 = Page.objects.create(title="Page 2", slug="page-2") self.anonymous = permissions.utils.register_role("Anonymous") self.permission = permissions.utils.register_permission("View", "view") permissions.utils.grant_permission(self.p2, self.anonymous, "view") def test_get(self): """ """ obj = BaseContent.objects.get(slug="page-1") self.failUnless(isinstance(obj, BaseContent)) def test_get_content_object(self): """ """ obj = BaseContent.objects.get(slug="page-1").get_content_object() self.failUnless(isinstance(obj, Page)) def test_get_content_objects(self): """ """ obj = BaseContent.objects.filter(slug="page-1").get_content_objects() self.failUnless(isinstance(obj[0], Page))
{ "content_hash": "7b491480c3203850829af1a297af03ce", "timestamp": "", "source": "github", "line_count": 43, "max_line_length": 79, "avg_line_length": 29.25581395348837, "alnum_prop": 0.6359300476947536, "repo_name": "natea/django-lfc", "id": "681acbce13e0fab27f334835a4b44a17cd0ae8f0", "size": "1275", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "lfc/tests/manager_tests.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "JavaScript", "bytes": "590911" }, { "name": "Python", "bytes": "394676" } ], "symlink_target": "" }
""" WSGI config for thingyproject project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.9/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "thingyproject.settings") application = get_wsgi_application()
{ "content_hash": "bfe2725489b4da863122b22a3b068789", "timestamp": "", "source": "github", "line_count": 16, "max_line_length": 78, "avg_line_length": 25.1875, "alnum_prop": 0.7766749379652605, "repo_name": "Guillaume-Docquier/Thingy", "id": "9b8bd1ce737d075cd8e4f8de77174218c0c52284", "size": "403", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "thingyproject/wsgi.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "24435" }, { "name": "HTML", "bytes": "67318" }, { "name": "JavaScript", "bytes": "103866" }, { "name": "Python", "bytes": "80116" } ], "symlink_target": "" }
import mainFile, sys import time fp = mainFile.fingerprintsensor() fp.Open() while(1): fp.SetLED(true) mainFile.delay(1) fp.SetLED(false) mainFile.delay(1)
{ "content_hash": "07a2d635edca3d84c71b7b98cb1c95be", "timestamp": "", "source": "github", "line_count": 9, "max_line_length": 33, "avg_line_length": 19.11111111111111, "alnum_prop": 0.6918604651162791, "repo_name": "ashwinahuja/Engineering-Extended-Project---Ashwin-Ahuja", "id": "8698a1c2e6e6c0dcbed0e6527426b35e7968abe8", "size": "172", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "Code/GT511C3Library/testfornow.py", "mode": "33188", "license": "mit", "language": [ { "name": "C#", "bytes": "7508" }, { "name": "Python", "bytes": "18261" } ], "symlink_target": "" }
WTF_CSRF_ENABLED = True SECRET_KEY = 'secret-key' SQLALCHEMY_DATABASE_URI = "postgresql://postgres:admin@localhost/flask_blog" SQLALCHEMY_TRACK_MODIFICATIONS = False # pagination POSTS_PER_PAGE = 6
{ "content_hash": "66dd73bec2dd6abe958c3d2a9a8cfe77", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 76, "avg_line_length": 28.428571428571427, "alnum_prop": 0.7738693467336684, "repo_name": "andrefaranha/flask-blog", "id": "740bebd5e06db03e4412962717c5830e99352d0e", "size": "199", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "config.py", "mode": "33188", "license": "mit", "language": [ { "name": "CSS", "bytes": "438" }, { "name": "HTML", "bytes": "13407" }, { "name": "Python", "bytes": "35920" } ], "symlink_target": "" }
from smart_pointer_member import * def is_new_style_class(cls): return hasattr(cls, "__class__") f = Foo() f.y = 1 if f.y != 1: raise RuntimeError b = Bar(f) b.y = 2 if f.y != 2: print f.y print b.y raise RuntimeError if b.x != f.x: raise RuntimeError if b.z != f.z: raise RuntimeError if is_new_style_class(Bar): # feature not supported in old style classes if Foo.z == Bar.z: raise RuntimeError
{ "content_hash": "3ee0683085280201456984afcaa8d3b0", "timestamp": "", "source": "github", "line_count": 29, "max_line_length": 73, "avg_line_length": 15.413793103448276, "alnum_prop": 0.6085011185682326, "repo_name": "DEKHTIARJonathan/BilletterieUTC", "id": "ce91da2bd77730961c3d713e1e8b3ce4b50e541c", "size": "447", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "badgingServer/Install/swigwin-3.0.7/Examples/test-suite/python/smart_pointer_member_runme.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "505" }, { "name": "C", "bytes": "1489570" }, { "name": "C#", "bytes": "323243" }, { "name": "C++", "bytes": "2646678" }, { "name": "CSS", "bytes": "1309792" }, { "name": "Common Lisp", "bytes": "13780" }, { "name": "D", "bytes": "260374" }, { "name": "DIGITAL Command Language", "bytes": "16078" }, { "name": "Forth", "bytes": "2411" }, { "name": "Go", "bytes": "95670" }, { "name": "Groff", "bytes": "17548" }, { "name": "HTML", "bytes": "8474268" }, { "name": "Java", "bytes": "517584" }, { "name": "JavaScript", "bytes": "1574272" }, { "name": "Limbo", "bytes": "2902" }, { "name": "Lua", "bytes": "103853" }, { "name": "M", "bytes": "58261" }, { "name": "Makefile", "bytes": "193313" }, { "name": "Mathematica", "bytes": "113" }, { "name": "Matlab", "bytes": "49071" }, { "name": "Mercury", "bytes": "4136" }, { "name": "OCaml", "bytes": "25948" }, { "name": "Objective-C", "bytes": "9721" }, { "name": "PHP", "bytes": "336290" }, { "name": "Perl", "bytes": "140021" }, { "name": "Perl6", "bytes": "6403" }, { "name": "Pike", "bytes": "6601" }, { "name": "Python", "bytes": "271706" }, { "name": "R", "bytes": "6053" }, { "name": "Ruby", "bytes": "129514" }, { "name": "SQLPL", "bytes": "10237" }, { "name": "Scheme", "bytes": "81765" }, { "name": "Scilab", "bytes": "84725" }, { "name": "Shell", "bytes": "86284" }, { "name": "Standard ML", "bytes": "2587" }, { "name": "Tcl", "bytes": "38028" }, { "name": "Yacc", "bytes": "211262" } ], "symlink_target": "" }
from __future__ import unicode_literals from django.db import migrations def create_user_profile(apps, schema_editor): UserProfile = apps.get_model("ploghubapp", "UserProfile") User = apps.get_model("auth", "User") for user in User.objects.all(): user_profile = UserProfile(user=user, about="(empty)") user_profile.save() class Migration(migrations.Migration): dependencies = [ ('ploghubapp', '0003_historicaluserprofile_userprofile'), ] operations = [ migrations.RunPython(create_user_profile), ]
{ "content_hash": "ca84e50c92622e03f0dfe946d340caa0", "timestamp": "", "source": "github", "line_count": 20, "max_line_length": 65, "avg_line_length": 28.15, "alnum_prop": 0.6714031971580817, "repo_name": "ploggingdev/ploghub", "id": "cdb37ca34a040cadc9f3d6580921993e410e7c41", "size": "636", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ploghubapp/migrations/0004_auto_20170803_1139.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "38789" }, { "name": "JavaScript", "bytes": "8540" }, { "name": "Python", "bytes": "71833" } ], "symlink_target": "" }
from __future__ import print_function from distutils.core import setup, Extension import RDConfig # force the use of g++ please from distutils import sysconfig save_init_posix = sysconfig._init_posix def my_init_posix(): print('my_init_posix: changing gcc to g++') save_init_posix() g = sysconfig.get_config_vars() g['CC'] = 'g++' g['LDSHARED'] = 'g++ -shared' g['PY_CFLAGS'] = g['PY_CFLAGS'].replace('-O3', '') sysconfig._init_posix = my_init_posix destDir = RDConfig.RDCodeDir extDir = RDConfig.RDBaseDir + "/External" # this is how things are done with BPLv2 boostInc = '-isystem%s' % (extDir + "/boost_1_29_0") incDirs = [] # FIX: there's gotta be a better way of doing this pyLibDir = '/usr/lib/python2.2/config' boostLibDir = extDir + "/boost_1_29_0/libs/python/build/bin/libboost_python.so/gcc/debug/runtime-link-dynamic/shared-linkable-true/" boostLib = "boost_python" libDirs = [boostLibDir, pyLibDir] libraries = [boostLib, "python2.2"] # have to include g++ here or we get link errors with boost compileArgs = ['-ftemplate-depth-150', '-DBOOST_PYTHON_DYNAMIC_LIB', boostInc, ] setup(name="demo", version="1.0", ext_modules=[Extension("linalg", ["linalg.cpp"], include_dirs=incDirs, library_dirs=libDirs, libraries=libraries, extra_compile_args=compileArgs)])
{ "content_hash": "2bc6ed9fb75e212811c0d4177625bb89", "timestamp": "", "source": "github", "line_count": 38, "max_line_length": 132, "avg_line_length": 35.6578947368421, "alnum_prop": 0.6767527675276753, "repo_name": "rvianello/rdkit", "id": "6258f5b45daa1c68dd3f21c19bf81b438a370cde", "size": "1415", "binary": false, "copies": "4", "ref": "refs/heads/master", "path": "Code/Demos/boost/numpy/setup.py", "mode": "33261", "license": "bsd-3-clause", "language": [ { "name": "ApacheConf", "bytes": "385" }, { "name": "C", "bytes": "227962" }, { "name": "C#", "bytes": "6745" }, { "name": "C++", "bytes": "8796795" }, { "name": "CMake", "bytes": "632104" }, { "name": "Fortran", "bytes": "7661" }, { "name": "HTML", "bytes": "18138" }, { "name": "Java", "bytes": "301151" }, { "name": "JavaScript", "bytes": "11595" }, { "name": "Jupyter Notebook", "bytes": "43461" }, { "name": "LLVM", "bytes": "30376" }, { "name": "Lex", "bytes": "4508" }, { "name": "Makefile", "bytes": "10552" }, { "name": "Objective-C", "bytes": "298" }, { "name": "Python", "bytes": "3363330" }, { "name": "QMake", "bytes": "389" }, { "name": "SMT", "bytes": "3010" }, { "name": "Shell", "bytes": "9082" }, { "name": "Smarty", "bytes": "5864" }, { "name": "Yacc", "bytes": "51959" } ], "symlink_target": "" }
import os import numpy as np import tensorflow as tf from yarll.agents.agent import Agent from yarll.agents.env_runner import EnvRunner from yarll.misc.utils import discount_rewards, FastSaver from yarll.misc.reporter import Reporter from yarll.misc.network_ops import create_accumulative_gradients_op, add_accumulative_gradients_op, reset_accumulative_gradients_op class TaskPolicy(object): """Policy for a specific class.""" def __init__(self, action, master): super(TaskPolicy, self).__init__() self.action = action self.master = master def choose_action(self, state): """Choose an action.""" return self.master.session.run([self.action], feed_dict={self.master.states: [state]})[0] def new_trajectory(self): pass class KnowledgeTransfer(Agent): """Learner for variations of a task.""" def __init__(self, envs, monitor_path, **usercfg): super(KnowledgeTransfer, self).__init__(**usercfg) self.envs = envs self.n_tasks = len(envs) self.monitor_path = monitor_path self.nA = envs[0].action_space.n self.config.update(dict( timesteps_per_batch=10000, trajectories_per_batch=10, batch_update="timesteps", n_iter=100, switch_at_iter=None, gamma=0.99, # Discount past rewards by a percentage decay=0.9, # Decay of RMSProp optimizer epsilon=1e-9, # Epsilon of RMSProp optimizer learning_rate=0.005, n_hidden_units=10, repeat_n_actions=1, n_sparse_units=10, feature_extraction=False )) self.config.update(usercfg) self.build_networks() self.task_runners = [EnvRunner(envs[i], TaskPolicy(action, self), self.config) for i, action in enumerate(self.action_tensors)] if self.config["save_model"]: for action_tensor in self.action_tensors: tf.add_to_collection("action", action_tensor) tf.add_to_collection("states", self.states) self.saver = FastSaver() def build_networks(self): self.session = tf.Session() with tf.variable_scope("shared"): self.states = tf.placeholder(tf.float32, [None] + list(self.envs[0].observation_space.shape), name="states") self.action_taken = tf.placeholder(tf.float32, name="action_taken") self.advantage = tf.placeholder(tf.float32, name="advantage") L1 = None if self.config["feature_extraction"]: L1 = tf.contrib.layers.fully_connected( inputs=self.states, num_outputs=self.config["n_hidden_units"], activation_fn=tf.tanh, weights_initializer=tf.truncated_normal_initializer(mean=0.0, stddev=0.02), biases_initializer=tf.zeros_initializer(), scope="L1") else: L1 = self.states knowledge_base = tf.Variable(tf.truncated_normal([L1.get_shape()[-1].value, self.config["n_sparse_units"]], mean=0.0, stddev=0.02), name="knowledge_base") self.shared_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope="shared") # Every task has its own (sparse) representation sparse_representations = [ tf.Variable(tf.truncated_normal([self.config["n_sparse_units"], self.nA], mean=0.0, stddev=0.02), name="sparse%d" % i) for i in range(self.n_tasks) ] self.probs_tensors = [tf.nn.softmax(tf.matmul(L1, tf.matmul(knowledge_base, s))) for s in sparse_representations] self.action_tensors = [tf.squeeze(tf.multinomial(tf.log(probs), 1)) for probs in self.probs_tensors] self.optimizer = tf.train.RMSPropOptimizer( learning_rate=self.config["learning_rate"], decay=self.config["decay"], epsilon=self.config["epsilon"] ) net_vars = self.shared_vars + sparse_representations self.accum_grads = create_accumulative_gradients_op(net_vars, 0) self.loss = tf.placeholder("float", name="loss") summary_loss = tf.summary.scalar("Loss", self.loss) self.rewards = tf.placeholder("float", name="Rewards") summary_rewards = tf.summary.scalar("Reward", self.rewards) self.episode_lengths = tf.placeholder("float", name="Episode_lengths") summary_episode_lengths = tf.summary.scalar("Length", self.episode_lengths) self.summary_op = tf.summary.merge([summary_loss, summary_rewards, summary_episode_lengths]) self.writers = [] self.losses = [] regularizer = tf.contrib.layers.l1_regularizer(.05) for i, probabilities in enumerate(self.probs_tensors): good_probabilities = tf.reduce_sum(tf.multiply(probabilities, tf.one_hot(tf.cast(self.action_taken, tf.int32), self.nA)), reduction_indices=[1]) eligibility = tf.log(good_probabilities) * self.advantage loss = -tf.reduce_sum(eligibility) + regularizer(sparse_representations[i]) self.losses.append(loss) writer = tf.summary.FileWriter(os.path.join(self.monitor_path, "task" + str(i)), self.session.graph) self.writers.append(writer) # An add op for every task & its loss self.add_accum_grads = [] for i, loss in enumerate(self.losses): # Use all variables if the switch tasks experiment is disactivated or it's not the last task all_vars = self.config["switch_at_iter"] is None or i != len(self.losses) - 1 self.add_accum_grads.append(add_accumulative_gradients_op( (self.shared_vars if all_vars else []) + [sparse_representations[i]], ([self.accum_grads[0]] if all_vars else []) + [self.accum_grads[i + 1]], loss, i )) self.apply_gradients = self.optimizer.apply_gradients( zip(self.accum_grads, net_vars)) self.reset_accum_grads = reset_accumulative_gradients_op(net_vars, self.accum_grads, 0) self.init_op = tf.global_variables_initializer() def _initialize(self): self.session.run(self.init_op) def learn(self): """Run learning algorithm""" self._initialize() reporter = Reporter() config = self.config total_n_trajectories = np.zeros(len(self.envs)) for iteration in range(config["n_iter"]): self.session.run([self.reset_accum_grads]) for i, task_runner in enumerate(self.task_runners): if self.config["switch_at_iter"] is not None: if iteration >= self.config["switch_at_iter"] and i != (len(self.task_runners) - 1): continue elif iteration < self.config["switch_at_iter"] and i == len(self.task_runners) - 1: continue # Collect trajectories until we get timesteps_per_batch total timesteps trajectories = task_runner.get_trajectories() total_n_trajectories[i] += len(trajectories) all_state = np.concatenate([trajectory["state"] for trajectory in trajectories]) # Compute discounted sums of rewards rets = [discount_rewards(trajectory["reward"], config["gamma"]) for trajectory in trajectories] max_len = max(len(ret) for ret in rets) padded_rets = [np.concatenate([ret, np.zeros(max_len - len(ret))]) for ret in rets] # Compute time-dependent baseline baseline = np.mean(padded_rets, axis=0) # Compute advantage function advs = [ret - baseline[:len(ret)] for ret in rets] all_action = np.concatenate([trajectory["action"] for trajectory in trajectories]) all_adv = np.concatenate(advs) # Do policy gradient update step episode_rewards = np.array([trajectory["reward"].sum() for trajectory in trajectories]) # episode total rewards episode_lengths = np.array([len(trajectory["reward"]) for trajectory in trajectories]) # episode lengths results = self.session.run([self.losses[i], self.add_accum_grads[i], self.accum_grads], feed_dict={ self.states: all_state, self.action_taken: all_action, self.advantage: all_adv }) summary = self.session.run([self.summary_op], feed_dict={ self.loss: results[0], self.rewards: np.mean(episode_rewards), self.episode_lengths: np.mean(episode_lengths) }) self.writers[i].add_summary(summary[0], iteration) self.writers[i].flush() print("Task:", i) reporter.print_iteration_stats(iteration, episode_rewards, episode_lengths, total_n_trajectories[i]) # Apply accumulated gradient after all the gradients of each task are summed self.session.run([self.apply_gradients]) if self.config["save_model"]: if not os.path.exists(self.monitor_path): os.makedirs(self.monitor_path) self.saver.save(self.session, os.path.join(self.monitor_path, "model"))
{ "content_hash": "6116e0ddb3841eb7382eb3fc76ab8b97", "timestamp": "", "source": "github", "line_count": 194, "max_line_length": 166, "avg_line_length": 48.93814432989691, "alnum_prop": 0.5977459448072466, "repo_name": "arnomoonens/DeepRL", "id": "3cd092fcc2a6850973d9f9111bc12973c9bac171", "size": "9518", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "yarll/agents/knowledgetransfer/knowledge_transfer.py", "mode": "33188", "license": "mit", "language": [ { "name": "Jupyter Notebook", "bytes": "6107" }, { "name": "Python", "bytes": "236593" } ], "symlink_target": "" }
""" """ __author__ = "hupantingxue(hupantingxue@126.com)" __version__ = "$v1.0" __date__ = "$Date: 2014/12/15 15:06" import sys import urllib2 import threading import Queue import time from optparse import OptionParser class ThreadPool(object): def __init__(self, urlpth, req_number, thread_num): """ initialize threads """ self.work_queue = Queue.Queue() self.threads = [] self.__init_work_queue(req_number, urlpth) self.__init_thread_pool(thread_num) def __init_thread_pool(self, thread_num): for i in range(thread_num): self.threads.append(MyThread(self.work_queue)) def __init_work_queue(self, req_number, urlpth): """ initialize work queue """ for i in range(req_number): self.add_job(do_job, urlpth) def add_job(self, func, args): """ add a job to the queue """ self.work_queue.put((func, args)) def wait_all_complete(self): """ wait for all the threads to be completed """ for item in self.threads: if item.isAlive(): item.join() class MyThread(threading.Thread): def __init__(self, work_queue): threading.Thread.__init__(self) self.work_queue = work_queue self.start() def run(self): while True: try: do, args = self.work_queue.get(block=False) do(args) self.work_queue.task_done()#notify the completement of the job except: break ERROR_NUM = 0 def do_job(args): try: html = urllib2.urlopen(args) except Exception, e: print e global ERROR_NUM ERROR_NUM += 1 def parse(): """parse the args""" parser = OptionParser(description="The scripte is used to simulate apache benchmark(sending requests and testing the server)") parser.add_option("-n", "--number", dest="num_of_req", action="store", help="Number of requests you want to send", default=1) parser.add_option("-c", "--concurrent", dest="con_req", action="store", help="Number of concurrent requests you set", default=1) parser.add_option("-u", "--url", dest="urlpth", action="store", help="The url of server you want to send to") (options, args) = parser.parse_args() return options def main(): """main function""" start = time.time() options = parse() if not options.urlpth: print 'Need to specify the parameter option "-u"!' if '-h' in sys.argv or '--help' in sys.argv: print __doc__ tp = ThreadPool(options.urlpth, int(options.num_of_req), int(options.con_req)) tp.wait_all_complete() end = time.time() print "===============================================" print "URL: ", options.urlpth print "Total Requests Number: ", options.num_of_req print "Concurrent Requests Number: ", options.con_req print "Total Time Cost(seconds): ", (end-start) print "Average Time Per Request: ", (end-start)/int(options.num_of_req) print "Average Requests Number Per Second: ", int(options.num_of_req)/(end-start) print "Total Error Number: ", ERROR_NUM if __name__ == '__main__': main()
{ "content_hash": "c839c08d2817734d7af7ca1c0416b971", "timestamp": "", "source": "github", "line_count": 108, "max_line_length": 132, "avg_line_length": 30.60185185185185, "alnum_prop": 0.5763993948562783, "repo_name": "hupantingxue/benchmark-scripts", "id": "d6971efff30917e2c255c96bee5fd96057f1cbee", "size": "3305", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "ab/ab.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "3305" }, { "name": "Shell", "bytes": "901" } ], "symlink_target": "" }
"""Plugin loader """ #------------------------------------------------------------------------------- __author__ = "Dmitry Dolzhenko" __email__ = "d.dolzhenko@gmail.com" #------------------------------------------------------------------------------- import os import inspect import unittest import glob import importlib import logging from jacis import utils #------------------------------------------------------------------------------- def __setup_root_logger(name): # create logger logger = logging.getLogger(name) # logger.setLevel(logging.WARNING) # create console handler and set level to debug ch = logging.StreamHandler() # ch.setLevel(logging.WARNING) # create formatter # formatter = logging.Formatter('%(name)s>>%(levelname)1s: %(message)s') formatter = logging.Formatter('%(message)s') ch.setFormatter(formatter) # add ch to logger logger.addHandler(ch) return logger __root_logger = __setup_root_logger('jacis') def verbosity_to_level(verbosity): if verbosity>= 2: return logging.DEBUG elif verbosity== 1: return logging.INFO elif verbosity== 0: return logging.WARNING assert verbosity>= 0, 'invalid logger verbosity' def get_logger(name): return logging.getLogger(name) def set_log_verbosity(verbosity): # print('level: ', verbosity_to_level(verbosity)) logging.getLogger().setLevel(verbosity_to_level(verbosity)) def get_self_path(): return os.path.dirname(__file__) ############################################################################### # dir def jacis_dir(): j = '.jacis' return j def jacis_global_dir(): j = '.jacis' return os.path.join(utils.home_dir(), j) ############################################################################### # plugins and tests def get_plugins(): from jacis import plugins members = (x for x in inspect.getmembers(plugins)) modules = (x for x in members if inspect.ismodule(x[1])) plugins = (x for x in modules if hasattr(x[1], "jacis_plugin")) return dict(plugins) def get_test_module_names(): mask = '**/*_test.py' for filename in glob.iglob(mask, recursive=True): name = os.path.splitext(filename)[0] module_name = name.replace('\\', '.').replace('/', '.') yield module_name def get_tests(): suite = unittest.TestSuite() loader = unittest.TestLoader() for module_name in get_test_module_names(): module = importlib.import_module(module_name) tests = loader.loadTestsFromModule(module) suite.addTests(tests) return suite
{ "content_hash": "0b2ea0e72040a6420df9eb7c777e1d97", "timestamp": "", "source": "github", "line_count": 101, "max_line_length": 80, "avg_line_length": 26.00990099009901, "alnum_prop": 0.5572896840502475, "repo_name": "ddolzhenko/jacis", "id": "d00bdcc0292ac0188a1adc41acea471f29f6ee89", "size": "3801", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "jacis/core.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "209" }, { "name": "Python", "bytes": "56290" } ], "symlink_target": "" }
import pyttsx engine = pyttsx.init() engine.say('Sally sells seashells by the seashore.') engine.say('The quick brown fox jumped over the lazy dog.') engine.runAndWait()
{ "content_hash": "119ee247d59920a3830fbefaa5a1380e", "timestamp": "", "source": "github", "line_count": 7, "max_line_length": 59, "avg_line_length": 24.571428571428573, "alnum_prop": 0.7558139534883721, "repo_name": "gbiggs/ros_book_sample_code", "id": "2e839a4f4ac2f626e3acceb062bd651d7ed08771", "size": "195", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "chapter19/scripts/pyttsx_example.py", "mode": "33261", "license": "apache-2.0", "language": [ { "name": "C++", "bytes": "2000" }, { "name": "CMake", "bytes": "122354" }, { "name": "EmberScript", "bytes": "2726" }, { "name": "Python", "bytes": "59105" }, { "name": "Shell", "bytes": "338" } ], "symlink_target": "" }
''' Approximate the Unix find(1) command and return a list of paths that meet the specified criteria. The options include match criteria: name = file-glob # case sensitive iname = file-glob # case insensitive regex = file-regex # case sensitive iregex = file-regex # case insensitive type = file-types # match any listed type user = users # match any listed user group = groups # match any listed group size = [+-]number[size-unit] # default unit = byte mtime = interval # modified since date grep = regex # search file contents and/or actions: delete [= file-types] # default type = 'f' exec = command [arg ...] # where {} is replaced by pathname print [= print-opts] and/or depth criteria: maxdepth = maximum depth to transverse in path mindepth = minimum depth to transverse before checking files or directories The default action is 'print=path'. file-glob: * = match zero or more chars ? = match any char [abc] = match a, b, or c [!abc] or [^abc] = match anything except a, b, and c [x-y] = match chars x through y [!x-y] or [^x-y] = match anything except chars x through y {a,b,c} = match a or b or c file-regex: a Python re (regular expression) pattern file-types: a string of one or more of the following: a: all file types b: block device c: character device d: directory p: FIFO (named pipe) f: plain file l: symlink s: socket users: a space and/or comma separated list of user names and/or uids groups: a space and/or comma separated list of group names and/or gids size-unit: b: bytes k: kilobytes m: megabytes g: gigabytes t: terabytes interval: [<num>w] [<num>[d]] [<num>h] [<num>m] [<num>s] where: w: week d: day h: hour m: minute s: second print-opts: a comma and/or space separated list of one or more of the following: group: group name md5: MD5 digest of file contents mode: file permissions (as as integer) mtime: last modification time (as time_t) name: file basename path: file absolute path size: file size in bytes type: file type user: user name ''' # Import python libs from __future__ import absolute_import, print_function import logging import os import re import stat import shutil import sys import time from subprocess import Popen, PIPE try: import grp import pwd # TODO: grp and pwd are both used in the code, we better make sure that # that code never gets run if importing them does not succeed except ImportError: pass # Import 3rd-party libs import salt.ext.six as six # Import salt libs import salt.utils import salt.defaults.exitcodes from salt.utils.filebuffer import BufferedReader # Set up logger log = logging.getLogger(__name__) _REQUIRES_PATH = 1 _REQUIRES_STAT = 2 _REQUIRES_CONTENTS = 4 _FILE_TYPES = {'b': stat.S_IFBLK, 'c': stat.S_IFCHR, 'd': stat.S_IFDIR, 'f': stat.S_IFREG, 'l': stat.S_IFLNK, 'p': stat.S_IFIFO, 's': stat.S_IFSOCK, stat.S_IFBLK: 'b', stat.S_IFCHR: 'c', stat.S_IFDIR: 'd', stat.S_IFREG: 'f', stat.S_IFLNK: 'l', stat.S_IFIFO: 'p', stat.S_IFSOCK: 's'} _INTERVAL_REGEX = re.compile(r''' ^\s* (?P<modifier>[+-]?) (?: (?P<week> \d+ (?:\.\d*)? ) \s* [wW] )? \s* (?: (?P<day> \d+ (?:\.\d*)? ) \s* [dD] )? \s* (?: (?P<hour> \d+ (?:\.\d*)? ) \s* [hH] )? \s* (?: (?P<minute> \d+ (?:\.\d*)? ) \s* [mM] )? \s* (?: (?P<second> \d+ (?:\.\d*)? ) \s* [sS] )? \s* $ ''', flags=re.VERBOSE) def _parse_interval(value): ''' Convert an interval string like 1w3d6h into the number of seconds, time resolution (1 unit of the smallest specified time unit) and the modifier( '+', '-', or ''). w = week d = day h = hour m = minute s = second ''' match = _INTERVAL_REGEX.match(str(value)) if match is None: raise ValueError('invalid time interval: \'{0}\''.format(value)) result = 0 resolution = None for name, multiplier in [('second', 1), ('minute', 60), ('hour', 60 * 60), ('day', 60 * 60 * 24), ('week', 60 * 60 * 24 * 7)]: if match.group(name) is not None: result += float(match.group(name)) * multiplier if resolution is None: resolution = multiplier return result, resolution, match.group('modifier') def _parse_size(value): scalar = value.strip() if scalar.startswith(('-', '+')): style = scalar[0] scalar = scalar[1:] else: style = '=' if len(scalar) > 0: multiplier = {'b': 2 ** 0, 'k': 2 ** 10, 'm': 2 ** 20, 'g': 2 ** 30, 't': 2 ** 40}.get(scalar[-1].lower()) if multiplier: scalar = scalar[:-1].strip() else: multiplier = 1 else: multiplier = 1 try: num = int(scalar) * multiplier except ValueError: try: num = int(float(scalar) * multiplier) except ValueError: raise ValueError('invalid size: "{0}"'.format(value)) if style == '-': min_size = 0 max_size = num elif style == '+': min_size = num max_size = six.MAXSIZE else: min_size = num max_size = num + multiplier - 1 return min_size, max_size class Option(object): ''' Abstract base class for all find options. ''' def requires(self): return _REQUIRES_PATH class NameOption(Option): ''' Match files with a case-sensitive glob filename pattern. Note: this is the 'basename' portion of a pathname. The option name is 'name', e.g. {'name' : '*.txt'}. ''' def __init__(self, key, value): self.regex = re.compile(value.replace('.', '\\.') .replace('?', '.?') .replace('*', '.*') + '$') def match(self, dirname, filename, fstat): return self.regex.match(filename) class InameOption(Option): ''' Match files with a case-insensitive glob filename pattern. Note: this is the 'basename' portion of a pathname. The option name is 'iname', e.g. {'iname' : '*.TXT'}. ''' def __init__(self, key, value): self.regex = re.compile(value.replace('.', '\\.') .replace('?', '.?') .replace('*', '.*') + '$', re.IGNORECASE) def match(self, dirname, filename, fstat): return self.regex.match(filename) class RegexOption(Option): ''' Match files with a case-sensitive regular expression. Note: this is the 'basename' portion of a pathname. The option name is 'regex', e.g. {'regex' : '.*\\.txt'}. ''' def __init__(self, key, value): try: self.regex = re.compile(value) except re.error: raise ValueError('invalid regular expression: "{0}"'.format(value)) def match(self, dirname, filename, fstat): return self.regex.match(filename) class IregexOption(Option): ''' Match files with a case-insensitive regular expression. Note: this is the 'basename' portion of a pathname. The option name is 'iregex', e.g. {'iregex' : '.*\\.txt'}. ''' def __init__(self, key, value): try: self.regex = re.compile(value, re.IGNORECASE) except re.error: raise ValueError('invalid regular expression: "{0}"'.format(value)) def match(self, dirname, filename, fstat): return self.regex.match(filename) class TypeOption(Option): ''' Match files by their file type(s). The file type(s) are specified as an optionally comma and/or space separated list of letters. b = block device c = character device d = directory f = regular (plain) file l = symbolic link p = FIFO (named pipe) s = socket The option name is 'type', e.g. {'type' : 'd'} or {'type' : 'bc'}. ''' def __init__(self, key, value): # remove whitespace and commas value = "".join(value.strip().replace(',', '').split()) self.ftypes = set() for ftype in value: try: self.ftypes.add(_FILE_TYPES[ftype]) except KeyError: raise ValueError('invalid file type "{0}"'.format(ftype)) def requires(self): return _REQUIRES_STAT def match(self, dirname, filename, fstat): return stat.S_IFMT(fstat[stat.ST_MODE]) in self.ftypes class OwnerOption(Option): ''' Match files by their owner name(s) and/or uid(s), e.g. 'root'. The names are a space and/or comma separated list of names and/or integers. A match occurs when the file's uid matches any user specified. The option name is 'owner', e.g. {'owner' : 'root'}. ''' def __init__(self, key, value): self.uids = set() for name in value.replace(',', ' ').split(): if name.isdigit(): self.uids.add(int(name)) else: try: self.uids.add(pwd.getpwnam(value).pw_uid) except KeyError: raise ValueError('no such user "{0}"'.format(name)) def requires(self): return _REQUIRES_STAT def match(self, dirname, filename, fstat): return fstat[stat.ST_UID] in self.uids class GroupOption(Option): ''' Match files by their group name(s) and/or uid(s), e.g. 'admin'. The names are a space and/or comma separated list of names and/or integers. A match occurs when the file's gid matches any group specified. The option name is 'group', e.g. {'group' : 'admin'}. ''' def __init__(self, key, value): self.gids = set() for name in value.replace(',', ' ').split(): if name.isdigit(): self.gids.add(int(name)) else: try: self.gids.add(grp.getgrnam(value).gr_gid) except KeyError: raise ValueError('no such group "{0}"'.format(name)) def requires(self): return _REQUIRES_STAT def match(self, dirname, filename, fstat): return fstat[stat.ST_GID] in self.gids class SizeOption(Option): ''' Match files by their size. Prefix the size with '-' to find files the specified size and smaller. Prefix the size with '+' to find files the specified size and larger. Without the +/- prefix, match the exact file size. The size can be suffixed with (case-insensitive) suffixes: b = bytes k = kilobytes m = megabytes g = gigabytes t = terabytes The option name is 'size', e.g. {'size' : '+1G'}. ''' def __init__(self, key, value): self.min_size, self.max_size = _parse_size(value) def requires(self): return _REQUIRES_STAT def match(self, dirname, filename, fstat): return self.min_size <= fstat[stat.ST_SIZE] <= self.max_size class MtimeOption(Option): ''' Match files modified since the specified time. The option name is 'mtime', e.g. {'mtime' : '3d'}. The value format is [<num>w] [<num>[d]] [<num>h] [<num>m] [<num>s] where num is an integer or float and the case-insensitive suffixes are: w = week d = day h = hour m = minute s = second Whitespace is ignored in the value. ''' def __init__(self, key, value): secs, resolution, modifier = _parse_interval(value) self.mtime = time.time() - int(secs / resolution) * resolution self.modifier = modifier def requires(self): return _REQUIRES_STAT def match(self, dirname, filename, fstat): if self.modifier == '-': return fstat[stat.ST_MTIME] >= self.mtime else: return fstat[stat.ST_MTIME] <= self.mtime class GrepOption(Option): '''Match files when a pattern occurs within the file. The option name is 'grep', e.g. {'grep' : '(foo)|(bar}'}. ''' def __init__(self, key, value): try: self.regex = re.compile(value) except re.error: raise ValueError('invalid regular expression: "{0}"'.format(value)) def requires(self): return _REQUIRES_CONTENTS | _REQUIRES_STAT def match(self, dirname, filename, fstat): if not stat.S_ISREG(fstat[stat.ST_MODE]): return None dfilename = os.path.join(dirname, filename) with BufferedReader(dfilename, mode='rb') as bread: for chunk in bread: if self.regex.search(chunk): return dfilename return None class PrintOption(Option): ''' Return information about a matched file. Print options are specified as a comma and/or space separated list of one or more of the following: group = group name md5 = MD5 digest of file contents mode = file mode (as integer) mtime = last modification time (as time_t) name = file basename path = file absolute path size = file size in bytes type = file type user = user name ''' def __init__(self, key, value): self.need_stat = False self.print_title = False self.fmt = [] for arg in value.replace(',', ' ').split(): self.fmt.append(arg) if arg not in ['name', 'path']: self.need_stat = True if len(self.fmt) == 0: self.fmt.append('path') def requires(self): return _REQUIRES_STAT if self.need_stat else _REQUIRES_PATH def execute(self, fullpath, fstat, test=False): result = [] for arg in self.fmt: if arg == 'path': result.append(fullpath) elif arg == 'name': result.append(os.path.basename(fullpath)) elif arg == 'size': result.append(fstat[stat.ST_SIZE]) elif arg == 'type': result.append( _FILE_TYPES.get(stat.S_IFMT(fstat[stat.ST_MODE]), '?') ) elif arg == 'mode': result.append(int(oct(fstat[stat.ST_MODE])[-3:])) elif arg == 'mtime': result.append(fstat[stat.ST_MTIME]) elif arg == 'user': uid = fstat[stat.ST_UID] try: result.append(pwd.getpwuid(uid).pw_name) except KeyError: result.append(uid) elif arg == 'group': gid = fstat[stat.ST_GID] try: result.append(grp.getgrgid(gid).gr_name) except KeyError: result.append(gid) elif arg == 'md5': if stat.S_ISREG(fstat[stat.ST_MODE]): md5digest = salt.utils.get_hash(fullpath, 'md5') result.append(md5digest) else: result.append('') if len(result) == 1: return result[0] else: return result class DeleteOption(TypeOption): ''' Deletes matched file. Delete options are one or more of the following: a: all file types b: block device c: character device d: directory p: FIFO (named pipe) f: plain file l: symlink s: socket ''' def __init__(self, key, value): if 'a' in value: value = 'bcdpfls' super(self.__class__, self).__init__(key, value) def execute(self, fullpath, fstat, test=False): if test: return fullpath try: if os.path.isfile(fullpath) or os.path.islink(fullpath): os.remove(fullpath) elif os.path.isdir(fullpath): shutil.rmtree(fullpath) except (OSError, IOError) as exc: return None return fullpath class ExecOption(Option): ''' Execute the given command, {} replaced by filename. Quote the {} if commands might include whitespace. ''' def __init__(self, key, value): self.command = value def execute(self, fullpath, fstat, test=False): try: command = self.command.replace('{}', fullpath) print(salt.utils.shlex_split(command)) p = Popen(salt.utils.shlex_split(command), stdout=PIPE, stderr=PIPE) (out, err) = p.communicate() if err: log.error( 'Error running command: {0}\n\n{1}'.format( command, salt.utils.to_str(err))) return "{0}:\n{1}\n".format(command, salt.utils.to_str(out)) except Exception as e: log.error( 'Exception while executing command "{0}":\n\n{1}'.format( command, e)) return '{0}: Failed'.format(fullpath) class Finder(object): def __init__(self, options): self.actions = [] self.maxdepth = None self.mindepth = 0 self.test = False criteria = {_REQUIRES_PATH: list(), _REQUIRES_STAT: list(), _REQUIRES_CONTENTS: list()} if 'mindepth' in options: self.mindepth = options['mindepth'] del options['mindepth'] if 'maxdepth' in options: self.maxdepth = options['maxdepth'] del options['maxdepth'] if 'test' in options: self.test = options['test'] del options['test'] for key, value in six.iteritems(options): if key.startswith('_'): # this is a passthrough object, continue continue if value is None or len(str(value)) == 0: raise ValueError('missing value for "{0}" option'.format(key)) try: obj = globals()[key.title() + "Option"](key, value) except KeyError: raise ValueError('invalid option "{0}"'.format(key)) if hasattr(obj, 'match'): requires = obj.requires() if requires & _REQUIRES_CONTENTS: criteria[_REQUIRES_CONTENTS].append(obj) elif requires & _REQUIRES_STAT: criteria[_REQUIRES_STAT].append(obj) else: criteria[_REQUIRES_PATH].append(obj) if hasattr(obj, 'execute'): self.actions.append(obj) if len(self.actions) == 0: self.actions.append(PrintOption('print', '')) # order criteria so that least expensive checks are done first self.criteria = criteria[_REQUIRES_PATH] + \ criteria[_REQUIRES_STAT] + \ criteria[_REQUIRES_CONTENTS] def find(self, path): ''' Generate filenames in path that satisfy criteria specified in the constructor. This method is a generator and should be repeatedly called until there are no more results. ''' for dirpath, dirs, files in os.walk(path): depth = dirpath[len(path) + len(os.path.sep):].count(os.path.sep) if depth >= self.mindepth: for name in dirs + files: fstat = None matches = True fullpath = None for criterion in self.criteria: if fstat is None and criterion.requires() & _REQUIRES_STAT: fullpath = os.path.join(dirpath, name) try: fstat = os.stat(fullpath) except OSError: fstat = os.lstat(fullpath) if not criterion.match(dirpath, name, fstat): matches = False break if matches: if fullpath is None: fullpath = os.path.join(dirpath, name) for action in self.actions: if (fstat is None and action.requires() & _REQUIRES_STAT): try: fstat = os.stat(fullpath) except OSError: fstat = os.lstat(fullpath) result = action.execute(fullpath, fstat, test=self.test) if result is not None: yield result if depth == self.maxdepth: dirs[:] = [] def find(path, options): ''' WRITEME ''' finder = Finder(options) for path in finder.find(path): yield path def _main(): if len(sys.argv) < 2: sys.stderr.write('usage: {0} path [options]\n'.format(sys.argv[0])) sys.exit(salt.defaults.exitcodes.EX_USAGE) path = sys.argv[1] criteria = {} for arg in sys.argv[2:]: key, value = arg.split('=') criteria[key] = value try: finder = Finder(criteria) except ValueError as ex: sys.stderr.write('error: {0}\n'.format(ex)) sys.exit(salt.defaults.exitcodes.EX_GENERIC) for result in finder.find(path): print(result) if __name__ == '__main__': _main()
{ "content_hash": "84893bcb8173286772a7ca1f5effa45c", "timestamp": "", "source": "github", "line_count": 698, "max_line_length": 84, "avg_line_length": 32.336676217765046, "alnum_prop": 0.5154401665854415, "repo_name": "stephane-martin/salt-debian-packaging", "id": "2c0bc43466470348ba94da97022b909fedd9111a", "size": "22595", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "salt-2016.3.3/salt/utils/find.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Batchfile", "bytes": "13798" }, { "name": "C", "bytes": "986" }, { "name": "Groff", "bytes": "13634346" }, { "name": "HTML", "bytes": "39558" }, { "name": "Makefile", "bytes": "20902" }, { "name": "NSIS", "bytes": "22316" }, { "name": "PowerShell", "bytes": "38719" }, { "name": "Python", "bytes": "40857506" }, { "name": "SaltStack", "bytes": "58278" }, { "name": "Scheme", "bytes": "1790" }, { "name": "Shell", "bytes": "829927" }, { "name": "Tcl", "bytes": "6532" }, { "name": "TeX", "bytes": "11632" } ], "symlink_target": "" }
import logging from django.db import models from django.conf import settings from django.dispatch import receiver from django.utils.translation import gettext_lazy as _ logger = logging.getLogger(__name__) class FoiSite(models.Model): country_code = models.CharField(_("Country Code"), max_length=5) country_name = models.CharField(_("Country Name"), max_length=255) name = models.CharField(_("Name"), max_length=255) url = models.CharField(_("URL"), max_length=255) text = models.TextField(_("Text"), blank=True) enabled = models.BooleanField(_("Enabled"), default=True) class Meta: verbose_name = _("FOI Site") verbose_name_plural = _("FOI Sites") def __str__(self): return "%s (%s)" % (self.name, self.country_name) def save(self, *args, **kwargs): self.country_code = self.country_code.upper() super(FoiSite, self).save(*args, **kwargs) try: from django.contrib.gis.geoip2 import GeoIP2 except ImportError: GeoIP2 = None # noqa class SiteAdivsor(object): def __init__(self): self.geoip = self.get_geoip() self.sites = None def get_geoip(self): if GeoIP2 is None: return None try: return GeoIP2() except Exception as e: logger.exception(e) def update(self): sites = FoiSite.objects.filter(enabled=True) self.sites = dict([(f.country_code, f) for f in sites]) def refresh(self): self.sites = None def get_site(self, ip): if self.sites is None: self.update() if ip == "127.0.0.1": return None try: if self.geoip is None: self.geoip = self.get_geoip() if self.geoip is None: return result = self.geoip.country(ip) except Exception as e: logger.warning(e) # try recreating the geoIP2 object self.geoip = self.get_geoip() return None return self.sites.get(result["country_code"], None) class DummyAdvisor(object): def refresh(self): pass def get_site(self, ip): pass if GeoIP2 and getattr(settings, "GEOIP_PATH", False): advisor = SiteAdivsor() else: advisor = DummyAdvisor() @receiver(models.signals.post_save, sender=FoiSite, dispatch_uid="foisite_saved") def foisite_saved(instance=None, created=False, **kwargs): advisor.refresh()
{ "content_hash": "30f02e10c6abac6a099a6611c83c5982", "timestamp": "", "source": "github", "line_count": 92, "max_line_length": 81, "avg_line_length": 26.91304347826087, "alnum_prop": 0.6042003231017771, "repo_name": "fin/froide", "id": "5549c425a9536adf1f650d66bde511e740d927c4", "size": "2476", "binary": false, "copies": "1", "ref": "refs/heads/main", "path": "froide/foisite/models.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "302838" }, { "name": "JavaScript", "bytes": "47357" }, { "name": "Makefile", "bytes": "535" }, { "name": "Python", "bytes": "1706123" }, { "name": "SCSS", "bytes": "39397" }, { "name": "TypeScript", "bytes": "57910" }, { "name": "Vue", "bytes": "218866" } ], "symlink_target": "" }
""" Convergence loop for a node-specific dataset agent. In practice most of the code is generic, but a few bits assume this agent is node-specific. The convergence agent runs a loop that attempts to converge the local state with the desired configuration as transmitted by the control service. This involves two state machines: ClusterStatus and ConvergenceLoop. The ClusterStatus state machine receives inputs from the connection to the control service, and sends inputs to the ConvergenceLoop state machine. """ from zope.interface import implementer from eliot import ActionType, Field, writeFailure, MessageType from eliot.twisted import DeferredContext from characteristic import attributes from machinist import ( trivialInput, TransitionTable, constructFiniteStateMachine, MethodSuffixOutputer, ) from twisted.application.service import MultiService from twisted.python.constants import Names, NamedConstant from twisted.internet.protocol import ReconnectingClientFactory from twisted.protocols.tls import TLSMemoryBIOFactory from . import run_state_change from ..control import ( NodeStateCommand, IConvergenceAgent, AgentAMP, ) class ClusterStatusInputs(Names): """ Inputs to the cluster status state machine. """ # The client has connected to the control service: CONNECTED_TO_CONTROL_SERVICE = NamedConstant() # A status update has been received from the control service: STATUS_UPDATE = NamedConstant() # THe client has disconnected from the control service: DISCONNECTED_FROM_CONTROL_SERVICE = NamedConstant() # The system is shutting down: SHUTDOWN = NamedConstant() @attributes(["client"]) class _ConnectedToControlService( trivialInput(ClusterStatusInputs.CONNECTED_TO_CONTROL_SERVICE)): """ A rich input indicating the client has connected. :ivar AMP client: An AMP client connected to the control service. """ @attributes(["configuration", "state"]) class _StatusUpdate(trivialInput(ClusterStatusInputs.STATUS_UPDATE)): """ A rich input indicating the cluster status has been received from the control service. :ivar Deployment configuration: Desired cluster configuration. :ivar Deployment state: Actual cluster state. """ class ClusterStatusStates(Names): """ States of the cluster status state machine. """ # The client is currently disconnected: DISCONNECTED = NamedConstant() # The client is connected, we don't know cluster status: IGNORANT = NamedConstant() # The client is connected and we know the cluster status: KNOWLEDGEABLE = NamedConstant() # The system is shut down: SHUTDOWN = NamedConstant() class ClusterStatusOutputs(Names): """ Outputs of the cluster status state machine. """ # Store the AMP protocol instance connected to the server: STORE_CLIENT = NamedConstant() # Notify the convergence loop state machine of new cluster status: UPDATE_STATUS = NamedConstant() # Stop the convergence loop state machine: STOP = NamedConstant() # Disconnect the AMP client: DISCONNECT = NamedConstant() class ClusterStatus(object): """ World object for cluster state machine, executing the actions indicated by the outputs. :ivar AMP client: The latest AMP protocol instance to connect to the control service. Initially ``None``. """ def __init__(self, convergence_loop_fsm): """ :param convergence_loop_fsm: An convergence loop FSM as output by ``build_convergence_loop_fsm``. """ self.convergence_loop_fsm = convergence_loop_fsm self.client = None def output_STORE_CLIENT(self, context): self.client = context.client def output_UPDATE_STATUS(self, context): self.convergence_loop_fsm.receive( _ClientStatusUpdate(client=self.client, configuration=context.configuration, state=context.state)) def output_STOP(self, context): self.convergence_loop_fsm.receive(ConvergenceLoopInputs.STOP) def output_DISCONNECT(self, context): self.client.transport.loseConnection() self.client = None def build_cluster_status_fsm(convergence_loop_fsm): """ Create a new cluster status FSM. The automatic reconnection logic is handled by the ``AgentLoopService``; the world object here just gets notified of disconnects, it need schedule the reconnect itself. :param convergence_loop_fsm: A convergence loop FSM as output by ``build_convergence_loop_fsm``. """ S = ClusterStatusStates I = ClusterStatusInputs O = ClusterStatusOutputs table = TransitionTable() # We may be shut down in any state, in which case we disconnect if # necessary. table = table.addTransitions( S.DISCONNECTED, { # Store the client, then wait for cluster status to be sent # over AMP: I.CONNECTED_TO_CONTROL_SERVICE: ([O.STORE_CLIENT], S.IGNORANT), I.SHUTDOWN: ([], S.SHUTDOWN), }) table = table.addTransitions( S.IGNORANT, { # We never told agent to start, so no need to tell it to stop: I.DISCONNECTED_FROM_CONTROL_SERVICE: ([], S.DISCONNECTED), # Tell agent latest cluster status, implicitly starting it: I.STATUS_UPDATE: ([O.UPDATE_STATUS], S.KNOWLEDGEABLE), I.SHUTDOWN: ([O.DISCONNECT], S.SHUTDOWN), }) table = table.addTransitions( S.KNOWLEDGEABLE, { # Tell agent latest cluster status: I.STATUS_UPDATE: ([O.UPDATE_STATUS], S.KNOWLEDGEABLE), I.DISCONNECTED_FROM_CONTROL_SERVICE: ([O.STOP], S.DISCONNECTED), I.SHUTDOWN: ([O.STOP, O.DISCONNECT], S.SHUTDOWN), }) table = table.addTransitions( S.SHUTDOWN, { I.DISCONNECTED_FROM_CONTROL_SERVICE: ([], S.SHUTDOWN), I.STATUS_UPDATE: ([], S.SHUTDOWN), }) return constructFiniteStateMachine( inputs=I, outputs=O, states=S, initial=S.DISCONNECTED, table=table, richInputs=[_ConnectedToControlService, _StatusUpdate], inputContext={}, world=MethodSuffixOutputer(ClusterStatus(convergence_loop_fsm))) class ConvergenceLoopInputs(Names): """ Inputs for convergence loop FSM. """ # Updated references to latest AMP client, desired configuration and # cluster state: STATUS_UPDATE = NamedConstant() # Stop the convergence loop: STOP = NamedConstant() # Finished applying necessary changes to local state, a single # iteration of the convergence loop: ITERATION_DONE = NamedConstant() @attributes(["client", "configuration", "state"]) class _ClientStatusUpdate(trivialInput(ConvergenceLoopInputs.STATUS_UPDATE)): """ A rich input with a cluster status update - we are currently connected to the control service, and know latest desired configuration and cluster state. :ivar AMP client: An AMP client connected to the control service. :ivar Deployment configuration: Desired cluster configuration. :ivar Deployment state: Actual cluster state. """ class ConvergenceLoopStates(Names): """ Convergence loop FSM states. """ # The loop is stopped: STOPPED = NamedConstant() # Local state is being discovered and changes applied: CONVERGING = NamedConstant() # Local state is being converged, and once that is done we will # immediately stop: CONVERGING_STOPPING = NamedConstant() class ConvergenceLoopOutputs(Names): """ Converence loop FSM outputs. """ # Store AMP client, desired configuration and cluster state for later # use: STORE_INFO = NamedConstant() # Start an iteration of the covergence loop: CONVERGE = NamedConstant() _FIELD_CONNECTION = Field( u"connection", lambda client: repr(client), "The AMP connection to control service") _FIELD_LOCAL_CHANGES = Field( u"local_changes", repr, "Changes discovered in local state.") LOG_SEND_TO_CONTROL_SERVICE = ActionType( u"flocker:agent:send_to_control_service", [_FIELD_CONNECTION, _FIELD_LOCAL_CHANGES], [], "Send the local state to the control service.") _FIELD_CLUSTERSTATE = Field( u"cluster_state", repr, "The state of the cluster, according to control service.") _FIELD_CONFIGURATION = Field( u"desired_configuration", repr, "The configuration of the cluster according to the control service.") _FIELD_ACTIONS = Field( u"calculated_actions", repr, "The actions we decided to take to converge with configuration.") LOG_CONVERGE = ActionType( u"flocker:agent:converge", [_FIELD_CLUSTERSTATE, _FIELD_CONFIGURATION], [], "The convergence action within the loop.") LOG_CALCULATED_ACTIONS = MessageType( u"flocker:agent:converge:actions", [_FIELD_ACTIONS], "The actions we're going to attempt.") class ConvergenceLoop(object): """ World object for the convergence loop state machine, executing the actions indicated by the outputs from the state machine. :ivar AMP client: An AMP client connected to the control service. Initially ``None``. :ivar Deployment configuration: Desired cluster configuration. Initially ``None``. :ivar DeploymentState cluster_state: Actual cluster state. Initially ``None``. :ivar fsm: The finite state machine this is part of. """ def __init__(self, reactor, deployer): """ :param IReactorTime reactor: Used to schedule delays in the loop. :param IDeployer deployer: Used to discover local state and calculate necessary changes to match desired configuration. """ self.reactor = reactor self.deployer = deployer self.cluster_state = None def output_STORE_INFO(self, context): self.client, self.configuration, self.cluster_state = ( context.client, context.configuration, context.state) def output_CONVERGE(self, context): known_local_state = self.cluster_state.get_node( self.deployer.node_uuid, hostname=self.deployer.hostname) with LOG_CONVERGE(self.fsm.logger, cluster_state=self.cluster_state, desired_configuration=self.configuration).context(): d = DeferredContext( self.deployer.discover_state(known_local_state)) def got_local_state(state_changes): # Current cluster state is likely out of date as regards the local # state, so update it accordingly. for state in state_changes: self.cluster_state = state.update_cluster_state( self.cluster_state ) with LOG_SEND_TO_CONTROL_SERVICE( self.fsm.logger, connection=self.client, local_changes=list(state_changes)) as context: self.client.callRemote(NodeStateCommand, state_changes=state_changes, eliot_context=context) action = self.deployer.calculate_changes( self.configuration, self.cluster_state ) LOG_CALCULATED_ACTIONS(calculated_actions=action).write( self.fsm.logger) return run_state_change(action, self.deployer) d.addCallback(got_local_state) # If an error occurred we just want to log it and then try # converging again; hopefully next time we'll have more success. d.addErrback(writeFailure, self.fsm.logger, u"") # It would be better to have a "quiet time" state in the FSM and # transition to that next, then have a timeout input kick the machine # back around to the beginning of the loop in the FSM. However, we're # not going to keep this sleep-for-a-bit solution in the long term. # Instead, we'll be more event driven. So just going with the simple # solution and inserting a side-effect-y delay directly here. d.addCallback( lambda _: self.reactor.callLater( 1.0, self.fsm.receive, ConvergenceLoopInputs.ITERATION_DONE ) ) d.addActionFinish() def build_convergence_loop_fsm(reactor, deployer): """ Create a convergence loop FSM. :param IReactorTime reactor: Used to schedule delays in the loop. :param IDeployer deployer: Used to discover local state and calcualte necessary changes to match desired configuration. """ I = ConvergenceLoopInputs O = ConvergenceLoopOutputs S = ConvergenceLoopStates table = TransitionTable() table = table.addTransition( S.STOPPED, I.STATUS_UPDATE, [O.STORE_INFO, O.CONVERGE], S.CONVERGING) table = table.addTransitions( S.CONVERGING, { I.STATUS_UPDATE: ([O.STORE_INFO], S.CONVERGING), I.STOP: ([], S.CONVERGING_STOPPING), I.ITERATION_DONE: ([O.CONVERGE], S.CONVERGING), }) table = table.addTransitions( S.CONVERGING_STOPPING, { I.STATUS_UPDATE: ([O.STORE_INFO], S.CONVERGING), I.ITERATION_DONE: ([], S.STOPPED), }) loop = ConvergenceLoop(reactor, deployer) fsm = constructFiniteStateMachine( inputs=I, outputs=O, states=S, initial=S.STOPPED, table=table, richInputs=[_ClientStatusUpdate], inputContext={}, world=MethodSuffixOutputer(loop)) loop.fsm = fsm return fsm @implementer(IConvergenceAgent) @attributes(["reactor", "deployer", "host", "port"]) class AgentLoopService(object, MultiService): """ Service in charge of running the convergence loop. :ivar reactor: The reactor. :ivar IDeployer deployer: Deployer for discovering local state and then changing it. :ivar host: Host to connect to. :ivar port: Port to connect to. :ivar cluster_status: A cluster status FSM. :ivar factory: The factory used to connect to the control service. :ivar reconnecting_factory: The underlying factory used to connect to the control service, without the TLS wrapper. """ def __init__(self, context_factory): """ :param context_factory: TLS context factory for the AMP client. """ MultiService.__init__(self) convergence_loop = build_convergence_loop_fsm( self.reactor, self.deployer ) self.logger = convergence_loop.logger self.cluster_status = build_cluster_status_fsm(convergence_loop) self.reconnecting_factory = ReconnectingClientFactory.forProtocol( lambda: AgentAMP(self.reactor, self) ) self.factory = TLSMemoryBIOFactory(context_factory, True, self.reconnecting_factory) def startService(self): MultiService.startService(self) self.reactor.connectTCP(self.host, self.port, self.factory) def stopService(self): MultiService.stopService(self) self.reconnecting_factory.stopTrying() self.cluster_status.receive(ClusterStatusInputs.SHUTDOWN) # IConvergenceAgent methods: def connected(self, client): self.cluster_status.receive(_ConnectedToControlService(client=client)) def disconnected(self): self.cluster_status.receive( ClusterStatusInputs.DISCONNECTED_FROM_CONTROL_SERVICE) def cluster_updated(self, configuration, cluster_state): self.cluster_status.receive(_StatusUpdate(configuration=configuration, state=cluster_state))
{ "content_hash": "30d0e480c71d7c3fef9e26b5f706ed12", "timestamp": "", "source": "github", "line_count": 444, "max_line_length": 79, "avg_line_length": 35.583333333333336, "alnum_prop": 0.6668776504842079, "repo_name": "lukemarsden/flocker", "id": "209ad57f9c74dfb0408469e4e0bdcb7209741c0b", "size": "15861", "binary": false, "copies": "6", "ref": "refs/heads/master", "path": "flocker/node/_loop.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "2366120" }, { "name": "Ruby", "bytes": "6229" }, { "name": "Shell", "bytes": "3418" } ], "symlink_target": "" }
""" FlaskBB ======= FlaskBB is a forum software written in Python using the microframework Flask. And Easy to Setup ----------------- .. code:: bash $ python manage.py createall $ python manage.py runserver * Running on http://localhost:8080/ Resources --------- * `website <http://flaskbb.org>`_ * `source <https://github.com/sh4nks/flaskbb>`_ * `issues <https://github.com/sh4nks/flaskbb/issues>`_ """ from setuptools import setup setup( name='FlaskBB', version='0.1-dev', url='http://github.com/sh4nks/flaskbb/', license='BSD', author='sh4nks', author_email='sh4nks7@gmail.com', description='A forum software written with flask', long_description=__doc__, packages=['flaskbb'], include_package_data=True, zip_safe=False, platforms='any', install_requires=[ 'Babel', 'Flask', 'Flask-Cache', 'Flask-DebugToolbar', 'Flask-Login', 'Flask-Mail', 'Flask-Migrate', 'Flask-Plugins', 'Flask-Redis', 'Flask-SQLAlchemy', 'Flask-Script', 'Flask-Themes2', 'Flask-WTF', 'Flask-WhooshAlchemy', 'Flask-BabelEx', 'Jinja2', 'Mako', 'MarkupSafe', 'Pygments', 'SQLAlchemy', 'Unidecode', 'WTForms', 'Werkzeug', 'Whoosh', 'alembic', 'blinker', 'cov-core', 'coverage', 'itsdangerous', 'mistune', 'py', 'pytest', 'pytest-cov', 'pytest-random', 'pytz', 'redis', 'requests', 'simplejson', 'speaklater', 'sqlalchemy-utils' ], dependency_links=[ 'https://github.com/jshipley/Flask-WhooshAlchemy/archive/master.zip#egg=Flask-WhooshAlchemy', 'https://github.com/sh4nks/flask-babelex/tarball/master#egg=Flask-BabelEx' ], classifiers=[ 'Development Status :: 4 - Beta', 'Environment :: Web Environment', 'Intended Audience :: Developers, Users', 'License :: OSI Approved :: BSD License', 'Operating System :: OS Independent', 'Programming Language :: Python', 'Programming Language :: Python :: 3', 'Topic :: Internet :: WWW/HTTP :: Dynamic Content', 'Topic :: Software Development :: Libraries :: Python Modules' ] )
{ "content_hash": "b08b1e46cd515d4a410a57819f748a72", "timestamp": "", "source": "github", "line_count": 98, "max_line_length": 101, "avg_line_length": 24.408163265306122, "alnum_prop": 0.5535117056856187, "repo_name": "SeanChen0617/flaskbb-1", "id": "b6c4e08c50f182933c9d0d0657f94816f3525669", "size": "2392", "binary": false, "copies": "3", "ref": "refs/heads/master", "path": "setup.py", "mode": "33188", "license": "bsd-3-clause", "language": [ { "name": "CSS", "bytes": "17314" }, { "name": "HTML", "bytes": "140999" }, { "name": "JavaScript", "bytes": "96631" }, { "name": "Makefile", "bytes": "537" }, { "name": "Mako", "bytes": "412" }, { "name": "Python", "bytes": "291551" } ], "symlink_target": "" }
"""Set up common fixtures and helpers for pytest.""" from unittest.mock import Mock import pytest import voluptuous as vol from camacq.const import IMAGE_EVENT from camacq.control import Center from camacq.plugins import api as api_mod, sample as sample_mod SET_SAMPLE_SCHEMA = vol.Schema({vol.Required("name"): str}, extra=vol.ALLOW_EXTRA) @pytest.fixture(name="center") async def center_fixture(event_loop): """Give access to center via fixture.""" _center = Center(loop=event_loop) _center._track_tasks = True # pylint: disable=protected-access return _center @pytest.fixture(name="config") def config_fixture(): """Return a config.""" return {"test_api": None, "sample": {}} @pytest.fixture(name="api") async def api_fixture(center, config): """Set up a mock api.""" mock_api = MockApi() await api_mod.setup_module(center, config) api_mod.register_api(center, mock_api) return mock_api @pytest.fixture(name="sample") async def sample_fixture(center, config): """Set up a mock sample.""" mock_sample = MockSample() await sample_mod.setup_module(center, config) sample_mod.register_sample(center, mock_sample) return mock_sample class TestSampleEvent(sample_mod.SampleEvent): """Represent a test sample event.""" event_type = "test_sample_event" @property def feature(self): """Return a sample feature.""" return "test_feature" @property def sample(self): """Return the sample instance of the event.""" return self.data.get("container") class MockApi(api_mod.Api): """Represent a mock microscope API.""" def __init__(self): """Set up instance.""" self.calls = [] @property def name(self): """Return the name of the API.""" return "test_api" async def send(self, command, **kwargs): """Send a command to the microscope API. Parameters ---------- command : str The command to send. """ self.calls.append((self.send.__name__, command)) async def start_imaging(self): """Send a command to the microscope to start the imaging.""" self.calls.append((self.start_imaging.__name__,)) async def stop_imaging(self): """Send a command to the microscope to stop the imaging.""" self.calls.append((self.stop_imaging.__name__,)) class MockSample(sample_mod.Sample): """Represent a mock sample.""" def __init__(self): """Set up instance.""" self.image_events = [] self._images = {} self._values = {} self.mock_set_sample = Mock() @property def change_event(self): """:str: Return the image event type to listen to for the sample.""" return TestSampleEvent @property def images(self): """:dict: Return a dict with all images for the container.""" return self._images @property def image_event_type(self): """:str: Return the image event type to listen to for the sample.""" return IMAGE_EVENT @property def name(self): """Return the name of the sample.""" return "test" @property def set_sample_schema(self): """Return the validation schema of the set_sample method.""" return SET_SAMPLE_SCHEMA @property def values(self): """:dict: Return a dict with the values set for the container.""" return self._values async def on_image(self, center, event): """Handle image event for this sample.""" self.image_events.append(event) field_args = { "plate_name": event.plate_name, "well_x": event.well_x, "well_y": event.well_y, "field_x": event.field_x, "field_y": event.field_y, } await self.set_sample( "image", path=event.path, channel_id=event.channel_id, z_slice_id=event.z_slice_id, **field_args ) await self.set_sample("field", **field_args) async def _set_sample(self, name, values, **kwargs): """Set an image container of the sample. Returns ------- ImageContainer instance Return the ImageContainer instance that was updated. """ self.mock_set_sample(name, **values, **kwargs) if name == "image": sample = sample_mod.Image(values=values, **kwargs) else: sample = MockContainer(name, values, kwargs) return sample class MockContainer(sample_mod.ImageContainer): """A mock container for images.""" def __init__(self, name, values, attrs): """Set up instance.""" self._images = {} self._values = values self._name = name for attr, val in attrs.items(): setattr(self, attr, val) @property def change_event(self): """:Event: Return an event class to fire on container change.""" return TestSampleEvent @property def images(self): """:dict: Return a dict with all images for the container.""" return self._images @property def name(self): """:str: Return an identifying name for the container.""" return self._name @property def values(self): """:dict: Return a dict with the values set for the container.""" return self._values
{ "content_hash": "dc28a7e9710887a92e995247b02dc017", "timestamp": "", "source": "github", "line_count": 197, "max_line_length": 82, "avg_line_length": 27.741116751269036, "alnum_prop": 0.5961573650503202, "repo_name": "CellProfiling/cam_acq", "id": "24b8a0741ac4acd0d9b99c44bca5abc1c410b3e7", "size": "5465", "binary": false, "copies": "2", "ref": "refs/heads/master", "path": "tests/conftest.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Makefile", "bytes": "1767" }, { "name": "Python", "bytes": "150329" } ], "symlink_target": "" }
from solution import Solution n = 4 sol = Solution() res = sol.totalNQueens(n) print(res)
{ "content_hash": "2fe24967d24859d79232b466f3c330f0", "timestamp": "", "source": "github", "line_count": 6, "max_line_length": 29, "avg_line_length": 15.166666666666666, "alnum_prop": 0.7252747252747253, "repo_name": "zhlinh/leetcode", "id": "1ee36a8caa280600a305a91cacb4997cd5348350", "size": "138", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "0052.N-Queens II/test.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "Python", "bytes": "478111" } ], "symlink_target": "" }
from swgpy.object import * def create(kernel): result = Creature() result.template = "object/mobile/shared_dressed_feral_marauder.iff" result.attribute_template_id = 9 result.stfName("npc_name","marauder_base_female") #### BEGIN MODIFICATIONS #### #### END MODIFICATIONS #### return result
{ "content_hash": "07207dc6f9b3a8a4c531d080eabd5dcc", "timestamp": "", "source": "github", "line_count": 13, "max_line_length": 68, "avg_line_length": 23.615384615384617, "alnum_prop": 0.7003257328990228, "repo_name": "anhstudios/swganh", "id": "773f29680505699c3d58b14f8bc84b86d3a8f18e", "size": "452", "binary": false, "copies": "2", "ref": "refs/heads/develop", "path": "data/scripts/templates/object/mobile/shared_dressed_feral_marauder.py", "mode": "33188", "license": "mit", "language": [ { "name": "Batchfile", "bytes": "11887" }, { "name": "C", "bytes": "7699" }, { "name": "C++", "bytes": "2357839" }, { "name": "CMake", "bytes": "41264" }, { "name": "PLSQL", "bytes": "42065" }, { "name": "Python", "bytes": "7503510" }, { "name": "SQLPL", "bytes": "42770" } ], "symlink_target": "" }
import sublime import sublime_plugin import subprocess # Change this to the full path if clang-format is not on the path. binary = 'clang-format' # Change this to format according to other formatting styles # (see clang-format -help). style = 'LLVM' class ClangFormatCommand(sublime_plugin.TextCommand): def run(self, edit): encoding = self.view.encoding() if encoding == 'Undefined': encoding = 'utf-8' regions = [] command = [binary, '-style', style] for region in self.view.sel(): regions.append(region) region_offset = min(region.a, region.b) region_length = abs(region.b - region.a) command.extend(['-offset', str(region_offset), '-length', str(region_length)]) old_viewport_position = self.view.viewport_position() buf = self.view.substr(sublime.Region(0, self.view.size())) p = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE) output, error = p.communicate(buf.encode(encoding)) if not error: self.view.replace( edit, sublime.Region(0, self.view.size()), output.decode(encoding)) self.view.sel().clear() for region in regions: self.view.sel().add(region) # FIXME: Without the 10ms delay, the viewport sometimes jumps. sublime.set_timeout(lambda: self.view.set_viewport_position( old_viewport_position, False), 10) else: print error
{ "content_hash": "b7b1d0a1b9226511013aeefcca6750b9", "timestamp": "", "source": "github", "line_count": 41, "max_line_length": 71, "avg_line_length": 36.21951219512195, "alnum_prop": 0.6518518518518519, "repo_name": "indashnet/InDashNet.Open.UN2000", "id": "d41404ed0f5c993b65cf349da0ce48693be8a7d9", "size": "2205", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "android/external/clang/tools/clang-format/clang-format-sublime.py", "mode": "33188", "license": "apache-2.0", "language": [], "symlink_target": "" }
from collections import deque class RingBuffer(deque): """ inherits deque, pops the oldest data to make room for the newest data when size is reached """ def __init__(self, size): deque.__init__(self) self.size = size def full_append(self, item): deque.append(self, item) # full, pop the oldest item, left most item self.popleft() def append(self, item): deque.append(self, item) # max size reached, append becomes full_append if len(self) == self.size: self.append = self.full_append def get(self): """returns a list of size items (newest items)""" return list(self)
{ "content_hash": "ef5c15c00c5ddf1010bf6a6710901770", "timestamp": "", "source": "github", "line_count": 27, "max_line_length": 57, "avg_line_length": 25.85185185185185, "alnum_prop": 0.5931232091690545, "repo_name": "mahiso/poloniexlendingbot", "id": "889ce12e3d430a66d702385d409ff5edbab77425", "size": "916", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "coinlendingbot/RingBuffer.py", "mode": "33188", "license": "mit", "language": [ { "name": "HTML", "bytes": "14604" }, { "name": "JavaScript", "bytes": "16985" }, { "name": "Python", "bytes": "149711" }, { "name": "Shell", "bytes": "344" } ], "symlink_target": "" }
import sys import os import pybind11 import mpi4py from setuptools import setup, Extension import setuptools.command.build_py import sysconfig import subprocess #TODO: Maybe produce a meta-package which download the library, mui4py branch and produce the binary package with some # extra configuration like MPI compiler and flags. # os.environ["CC"] = "mpic++" # os.environ["LD"] = "mpic++" # extra_compile_args = sysconfig.get_config_var('CFLAGS').split() # extra_compile_args += ["-Wall", "-std=c++11", "-O3"] # extra_link_args = sysconfig.get_config_var('LDFLAGS').split() # extra_link_args += ["-Wl,-undefined dynamic_lookup"] # includedir_mpi4py = os.path.dirname(sys.modules['mpi4py'].__file__) # includedir_mpi4py = os.path.join(includedir_mpi4py, "include") # includedir_pybind = pybind11.get_include() # mui4py_mod = Extension('mui4py_mod', # # Do this for MAC/LINUX/compiiler # define_macros = [('MAJOR_VERSION', '1'), # ('MINOR_VERSION', '0')], # include_dirs = [includedir_mpi4py, includedir_pybind], # extra_compile_args = extra_compile_args, # extra_link_args = extra_link_args, # sources = ['mui4py/mui4py.cpp']) # # class Build(setuptools.command.build_py.build_py): # """Customized setuptools build command - builds mui_mod on build.""" # def run(self): # protoc_command = ["make", "mui4py_mod"] # if subprocess.call(protoc_command) != 0: # sys.exit(-1) # setuptools.command.build_py.build_py.run(self) # setup( # cmdclass={ # 'build_py': Build, # }, # ext_modules = [mui4py_mod], name='mui4py', version='0.1', description='Python bindings for MUI coupling library.', url='', author='Eduardo Ramos Fernandez', author_email='eduardo.rf159@gmail.com', license='Apache v2', packages=['mui4py'], install_requires=[ 'mpi4py', 'numpy', ], include_package_data=True, zip_safe=False)
{ "content_hash": "b67de821bc981fef1365a0038e6ca0b1", "timestamp": "", "source": "github", "line_count": 57, "max_line_length": 118, "avg_line_length": 37.01754385964912, "alnum_prop": 0.6037914691943128, "repo_name": "yhtang/MUI", "id": "c8bf25ccf6176354166fd796209618440341fe9c", "size": "2110", "binary": false, "copies": "1", "ref": "refs/heads/master", "path": "wrappers/Python/setup.py", "mode": "33188", "license": "apache-2.0", "language": [ { "name": "C", "bytes": "6102" }, { "name": "C++", "bytes": "250232" }, { "name": "Fortran", "bytes": "4871" }, { "name": "Makefile", "bytes": "1209" } ], "symlink_target": "" }