repo_name stringlengths 5 100 | path stringlengths 4 231 | language stringclasses 1 value | license stringclasses 15 values | size int64 6 947k | score float64 0 0.34 | prefix stringlengths 0 8.16k | middle stringlengths 3 512 | suffix stringlengths 0 8.17k |
|---|---|---|---|---|---|---|---|---|
ahtn/keyplus | host-software/keyplus/keycodes/lang_map/French0.py | Python | mit | 2,191 | 0.00046 | # Copyright 2018 jem@seethis.link
# Licensed under the MIT license (http://opensource.org/licenses/MIT)
from hid_keycodes import *
lang = 'French'
country = 'Belgium, Luxembourg'
scancode_map = {
KC_0: ('à', '0', '}', '', '', ''),
KC_1: ('&', '1', '|', '', '', ''),
KC_2: ('é', '2', '@', '', '', ''),
KC_3: ('"', '3', '#', '', '', ''),
KC_4: ("'", '4', '', '', '', ''),
KC_5: ('(', '5', '', '', '', ''),
KC_6: ('§', '6', '^', '', '', ''),
KC_7: ('è', '7', '', '', '', ''),
KC_8: ('!', '8', '', '', '', ''),
KC_9: ('ç', '9', '{', '', '', ''),
KC_A: ('q', 'Q', '', '', '', ''),
KC_B: ('b', 'B', '', '', '', ''),
KC_C: ('c', 'C', '', '', '', ''),
KC_D: ('d', 'D' | , '', '', '', ''),
KC_E: ('e', 'E', '€', '', '', ''),
KC_F: ('f', 'F', '', '', '', ''),
KC_G: ('g', 'G', '', '', '', ''),
KC_H: | ('h', 'H', '', '', '', ''),
KC_I: ('i', 'I', '', '', '', ''),
KC_J: ('j', 'J', '', '', '', ''),
KC_K: ('k', 'K', '', '', '', ''),
KC_L: ('l', 'L', '', '', '', ''),
KC_M: (',', '?', '', '', '', ''),
KC_N: ('n', 'N', '', '', '', ''),
KC_O: ('o', 'O', '', '', '', ''),
KC_P: ('p', 'P', '', '', '', ''),
KC_Q: ('a', 'A', '', '', '', ''),
KC_R: ('r', 'R', '', '', '', ''),
KC_S: ('s', 'S', '', '', '', ''),
KC_T: ('t', 'T', '', '', '', ''),
KC_U: ('u', 'U', '', '', '', ''),
KC_V: ('v', 'V', '', '', '', ''),
KC_W: ('z', 'Z', '', '', '', ''),
KC_X: ('x', 'X', '', '', '', ''),
KC_Y: ('y', 'Y', '', '', '', ''),
KC_Z: ('w', 'W', '', '', '', ''),
KC_APOSTROPHE: ('ù', '%', '́', '', '', ''),
KC_COMMA: (';', '.', '', '', '', ''),
KC_EQUAL: ('-', '_', '', '', '', ''),
KC_FORWARD_SLASH: ('=', '+', '̃', '', '', ''),
KC_GRAVE: ('²', '³', '', '', '', ''),
KC_ISO_BACK_SLASH: ('<', '>', '\\', '', '', ''),
KC_ISO_HASH: ('µ', '£', '̀', '', '', ''),
KC_LEFT_BRACKET: ('̂', '̈', '[', '', '', ''),
KC_MINUS: (')', '°', '', '', '', ''),
KC_PERIOD: (':', '/', '', '', '', ''),
KC_RIGHT_BRACKET: ('$', '*', ']', '', '', ''),
KC_SEMICOLON: ('m', 'M', '', '', '', ''),
KC_SPACEBAR: (' ', ' ', '', '', '', ''),
} |
Microvellum/Fluid-Designer | win64-vc/2.78/Python/bin/2.78/scripts/addons/object_edit_linked.py | Python | gpl-3.0 | 10,814 | 0.002312 | # ***** BEGIN GPL LICENSE BLOCK *****
#
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See t | he
# GNU General Public License for more details.
#
# You should | have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ***** END GPL LICENCE BLOCK *****
bl_info = {
"name": "Edit Linked Library",
"author": "Jason van Gumster (Fweeb), Bassam Kurdali, Pablo Vazquez",
"version": (0, 8, 1),
"blender": (2, 74, 0),
"location": "View3D > Toolshelf > Edit Linked Library",
"description": "Allows editing of objects linked from a .blend library.",
"wiki_url": "http://wiki.blender.org/index.php/Extensions:2.6/Py/"
"Scripts/Object/Edit_Linked_Library",
"category": "Object",
}
import bpy
from bpy.app.handlers import persistent
import os
settings = {
"original_file": "",
"linked_file": "",
"linked_objects": [],
}
@persistent
def linked_file_check(context):
if settings["linked_file"] != "":
if os.path.samefile(settings["linked_file"], bpy.data.filepath):
print("Editing a linked library.")
bpy.ops.object.select_all(action='DESELECT')
for ob_name in settings["linked_objects"]:
bpy.data.objects[ob_name].select = True # XXX Assumes selected object is in the active scene
if len(settings["linked_objects"]) == 1:
bpy.context.scene.objects.active = bpy.data.objects[settings["linked_objects"][0]]
else:
# For some reason, the linked editing session ended
# (failed to find a file or opened a different file
# before returning to the originating .blend)
settings["original_file"] = ""
settings["linked_file"] = ""
class EditLinked(bpy.types.Operator):
"""Edit Linked Library"""
bl_idname = "object.edit_linked"
bl_label = "Edit Linked Library"
use_autosave = bpy.props.BoolProperty(
name="Autosave",
description="Save the current file before opening the linked library",
default=True)
use_instance = bpy.props.BoolProperty(
name="New Blender Instance",
description="Open in a new Blender instance",
default=False)
@classmethod
def poll(cls, context):
return settings["original_file"] == "" and context.active_object is not None and (
(context.active_object.dupli_group and
context.active_object.dupli_group.library is not None) or
(context.active_object.proxy and
context.active_object.proxy.library is not None) or
context.active_object.library is not None)
#return context.active_object is not None
def execute(self, context):
#print(bpy.context.active_object.library)
target = context.active_object
if target.dupli_group and target.dupli_group.library:
targetpath = target.dupli_group.library.filepath
settings["linked_objects"].extend({ob.name for ob in target.dupli_group.objects})
elif target.library:
targetpath = target.library.filepath
settings["linked_objects"].append(target.name)
elif target.proxy:
target = target.proxy
targetpath = target.library.filepath
settings["linked_objects"].append(target.name)
if targetpath:
print(target.name + " is linked to " + targetpath)
if self.use_autosave:
if not bpy.data.filepath:
# File is not saved on disk, better to abort!
self.report({'ERROR'}, "Current file does not exist on disk, we cannot autosave it, aborting")
return {'CANCELLED'}
bpy.ops.wm.save_mainfile()
settings["original_file"] = bpy.data.filepath
settings["linked_file"] = bpy.path.abspath(targetpath)
if self.use_instance:
import subprocess
try:
subprocess.Popen([bpy.app.binary_path, settings["linked_file"]])
except:
print("Error on the new Blender instance")
import traceback
traceback.print_exc()
else:
bpy.ops.wm.open_mainfile(filepath=settings["linked_file"])
print("Opened linked file!")
else:
self.report({'WARNING'}, target.name + " is not linked")
print(target.name + " is not linked")
return {'FINISHED'}
class ReturnToOriginal(bpy.types.Operator):
"""Load the original file"""
bl_idname = "wm.return_to_original"
bl_label = "Return to Original File"
use_autosave = bpy.props.BoolProperty(
name="Autosave",
description="Save the current file before opening original file",
default=True)
@classmethod
def poll(cls, context):
return (settings["original_file"] != "")
def execute(self, context):
if self.use_autosave:
bpy.ops.wm.save_mainfile()
bpy.ops.wm.open_mainfile(filepath=settings["original_file"])
settings["original_file"] = ""
settings["linked_objects"] = []
print("Back to the original!")
return {'FINISHED'}
# UI
# TODO:Add operators to the File menu?
# Hide the entire panel for non-linked objects?
class PanelLinkedEdit(bpy.types.Panel):
bl_label = "Edit Linked Library"
bl_space_type = "VIEW_3D"
bl_region_type = "TOOLS"
bl_category = "Relations"
bl_context = "objectmode"
@classmethod
def poll(cls, context):
return (context.active_object is not None) or (settings["original_file"] != "")
def draw(self, context):
layout = self.layout
scene = context.scene
icon = "OUTLINER_DATA_" + context.active_object.type
target = None
if context.active_object.proxy:
target = context.active_object.proxy
else:
target = context.active_object.dupli_group
if settings["original_file"] == "" and (
(target and
target.library is not None) or
context.active_object.library is not None):
if (target is not None):
props = layout.operator("object.edit_linked", icon="LINK_BLEND",
text="Edit Library: %s" % target.name)
else:
props = layout.operator("object.edit_linked", icon="LINK_BLEND",
text="Edit Library: %s" % context.active_object.name)
props.use_autosave = scene.use_autosave
props.use_instance = scene.use_instance
layout.prop(scene, "use_autosave")
layout.prop(scene, "use_instance")
if (target is not None):
layout.label(text="Path: %s" %
target.library.filepath)
else:
layout.label(text="Path: %s" %
context.active_object.library.filepath)
elif settings["original_file"] != "":
if scene.use_instance:
layout.operator("wm.return_to_original",
text="Reload Current File",
icon="FILE_REFRESH").use_autosave = False
layout.separator()
#XXX - This is for nested linked assets... but it only works
# when launching a new Blender instance. Nested links don't
# currently work when using a single i |
edmorley/django | django/core/files/locks.py | Python | bsd-3-clause | 3,512 | 0.000285 | """
Portable file locking utilities.
Based partially on an example by Jonathan Feignberg in the Python
Cookbook [1] (licensed under the Python Software License) and a ctypes port by
Anatoly Techtonik for Roundup [2] (license [3]).
[1] http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/65203
[2] http://sourceforge.net/p/roundup/code/ci/default/tree/roundup/backends/portalocker.py
[3] http://sourceforge.net/p/roundup/code/ci/default/tree/COPYING.txt
Example Usage::
>>> from django.core.files import locks
>>> with open('./file', 'wb') as f:
... locks.lock(f, locks.LOCK_EX)
... f.write('Django')
"""
import os
__all__ = ('LOCK_EX', 'LOCK_SH', 'LOCK_NB', 'lock', 'unlock')
def _fd(f):
"""Get a filedescriptor from something which could be a file or an fd."""
return f.fileno() if hasattr(f, 'fileno') else f
if os.name == 'nt':
import msvcrt
from ctypes import (sizeof, c_ulong, c_void_p, c_int64,
Structure, Union, POINTER, windll, byref)
from ctypes.wintypes import BOOL, DWORD, HANDLE
LOCK_SH = 0 # the default
LOCK_NB = 0x1 # LOCKFILE_FAIL_IMMEDIATELY
LOCK_EX = 0x2 # LOCKFILE_EXCLUSIVE_LOCK
# --- Adapted from the pyserial project ---
# detect size of ULONG_PTR
if sizeof(c_ulong) != sizeof(c_void_p):
ULONG_PTR = c_int64
else:
ULONG_PTR = c_ulong
PVOID = c_void_p
# --- Union inside Structure by stackoverflow:3480240 ---
class _OFFSET(Structure):
_fields_ = [
('Offset', DWORD),
('OffsetHigh', DWORD)]
class _OFFSET_UNION(Union):
_anonymous_ = ['_offset']
_fields_ = [
('_offset', _OFFSET),
('Pointer', PVOID)]
class OVERLAPPED(Structure):
_anonymous_ = ['_offset_union']
_fields_ = [
('Internal', ULONG_PTR),
('InternalHigh', ULONG_PTR),
('_offset_union', _OFFSET_UNION),
('hEvent', HANDLE)]
LPOVERLAPPED = POINTER(OVERLAPPED)
# --- Define function prototypes for extra safety ---
LockFileEx = windll.kernel32.LockFileEx
LockFileEx.restype = BOOL
LockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, DWORD, LPOVERL | APPED]
UnlockFileEx = windll.kernel32.UnlockFileEx
UnlockFileEx.restype = BOOL
UnlockFileEx.argtypes = [HANDLE, DWORD, DWORD, DWORD, LPOVERLAPPED]
def lock(f, flags):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = LockFileEx(hfile, flags, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
def unlock(f):
hfile = msvcrt.get_osfhandle(_fd(f))
overlapped = OVERLAPPED()
ret = UnlockFileEx(hfil | e, 0, 0, 0xFFFF0000, byref(overlapped))
return bool(ret)
else:
try:
import fcntl
LOCK_SH = fcntl.LOCK_SH # shared lock
LOCK_NB = fcntl.LOCK_NB # non-blocking
LOCK_EX = fcntl.LOCK_EX
except (ImportError, AttributeError):
# File locking is not supported.
LOCK_EX = LOCK_SH = LOCK_NB = 0
# Dummy functions that don't do anything.
def lock(f, flags):
# File is not locked
return False
def unlock(f):
# File is unlocked
return True
else:
def lock(f, flags):
ret = fcntl.flock(_fd(f), flags)
return ret == 0
def unlock(f):
ret = fcntl.flock(_fd(f), fcntl.LOCK_UN)
return ret == 0
|
endavis/bastproxy | plugins/core/events.py | Python | gpl-2.0 | 13,653 | 0.007691 | """
This plugin handles events.
You can register/unregister with events, raise events
## Using
### Registering an event from a plugin
* ```self.api('events.register')(eventname, function)```
### Unregistering an event
* ```self.api('events.unregister')(eventname, function)```
### Raising an event
* ```self.api('events.eraise')(eventname, argdictionary)```
"""
from __future__ import print_function
import libs.argp as argp
from plugins._baseplugin import BasePlugin
NAME = 'Event Handler'
SNAME = 'events'
PURPOSE = 'Handle events'
AUTHOR = 'Bast'
VERSION = 1
PRIORITY = 3
AUTOLOAD = True
class EFunc(object): # pylint: disable=too-few-public-methods
"""
a basic event class
"""
def __init__(self, func, funcplugin):
"""
init the class
"""
self.funcplugin = funcplugin
self.timesexecuted = 0
self.func = func
self.name = func.__name__
def execute(self, args):
"""
execute the event
"""
self.timesexecuted = self.timesexecuted + 1
return self.func(args)
def __str__(self):
"""
return a string representation of the function
"""
return '%-10s : %-15s' % (self.name, self.funcplugin)
def __eq__(self, other):
"""
check equality between two event functions
"""
if callable(other):
if other == self.func:
return True
try:
if self.func == other.func:
return True
except AttributeError:
return False
return False
class EventContainer(object):
"""
a container of functions for an event
"""
def __init__(self, plugin, name):
"""
init the class
"""
self.name = name
self.priod = {}
self.plugin = plugin
self.api = self.plugin.api
self.numraised = 0
def isregistered(self, func):
"""
check if a function is registered to this event
"""
for prio in self.priod:
if func in self.priod[prio]:
return True
return False
def isempty(self):
"""
check if an event has no functions registered
"""
for prio in self.priod:
if self.priod[prio]:
return False
return True
def register(self, func, funcplugin, prio=50):
"""
register a function to this event container
"""
if not prio:
prio = 50
if prio not in self.priod:
self.priod[prio] = []
eventfunc = EFunc(func, funcplugin)
if eventfunc not in self.priod[prio]:
self.priod[prio].append(eventfunc)
self.api('send.msg')('%s - register function %s with prio %s' \
% (self.name, eventfunc, prio), secondary=eventfunc.funcplugin)
return True
return False
def unregister(self, func):
"""
unregister a function from this event container
"""
for prio in self.priod:
if func in self.priod[prio]:
eventfunc = self.priod[prio][self.priod[prio].index(func)]
self.api('send.msg')('%s - unregister function %s with prio %s' \
% (self.name, eventfunc, prio), secondary=eventfunc.funcplugin)
self.priod[prio].remove(eventfunc)
return True
self.api('send.error')('Could not find function %s in event %s' % \
(func.__name__, self.name))
return False
def removeplugin(self, plugin):
"""
remove all functions related to a plugin
"""
removel = []
for prio in self.priod:
for eventfunc in self.priod[prio]:
if eventfunc.funcplugin == plugin:
removel.append(eventfunc)
for eventf in removel:
self.api('events.unregister')(self.name, eventf.func)
def detail(self):
"""
format a detail of the event
"""
tmsg = []
tmsg.append('%-13s : %s' % ('Event', self.name))
tmsg.append('%-13s : %s' % ('Raised', self.numraised))
tmsg.append('@B' + self.api('utils.center')('Registrations', '-', 60))
tmsg.append('%-4s : %-15s - %-s' % ('prio',
'plugin',
'function name'))
tmsg.append('@B' + '-' * 60)
funcmsg = []
tkeys = self.priod.keys()
tkeys.sort()
for prio in tkeys:
for eventfunc in self.priod[prio]:
funcmsg.append('%-4s : %-15s - %-s' % (prio,
eventfunc.funcplugin,
eventfunc.name))
if not funcmsg:
tmsg.append('None')
else:
tmsg.extend(funcmsg)
tmsg.append('')
return tmsg
def eraise(self, nargs, calledfrom):
"""
raise this event
"""
self.numraised = self.numraised + 1
if self.name != 'global_timer':
self.api('send.msg')('event %s raised by %s with args %s' % \
(self.name, calledfrom, nargs),
secondary=calledfrom)
keys = self.priod.keys()
if keys:
keys.sort()
for prio in keys:
for eventfunc in self.priod[prio][:]:
try:
tnargs = eventfunc.execute(nargs)
if tnargs:
nargs = tnargs
except Exception: # pylint: disable=broad-except
self.api('send.traceback')(
"error when calling function for event %s" % self.name)
return nargs
class Plugin(BasePlugin):
"""
a class to manage events, events include
events
"""
def __init__(self, *args, **kwargs):
BasePlugin.__init__(self, *args, **kwargs)
self.canreload = False
self.numglobalraised = 0
self.eventstats = {}
self.events = {}
self.pluginlookup = {}
self.api('api.add')('register', self.api_register)
self.api('api.add')('unregister', self.api_unregister)
self.api('api.add')('eraise', self.api_eraise)
self.api('api.add')('isregistered', self.api_isregistered)
self.api('api.add')('removeplugin', self.api_removeplugin)
self.api('api.add')('gete', self.api_getevent)
self.api('api.add')('detail', self.api_detail)
def load(self):
"""
load the module
"""
BasePlugin.load(self)
self.api('events.register')('log_plugin_loaded', self.logloaded)
self.api('events.eraise')('event_plugin_loaded', {})
parser = argp.ArgumentParser(add_help=False,
description='get details of an event')
parser.add_argument('event',
help='the event name to get details for',
default=[],
nargs='*')
self.api('commands.add')('detail',
self.cmd_detail,
parser=parser)
parser = argp.ArgumentParser(add_help=False,
description='list events and the ' \
'plugins registered with them')
parser.add_argument('match',
help='list only events that have this argument in their name',
default='',
nargs='?')
self.api('commands.add')('list',
self.cmd_list,
parser= | parser)
parser = argp.ArgumentParser(add_help=False,
description='raise an event')
parser.add_argument('event',
| help='the event to raise',
default='',
nargs='?')
self.api('commands.add')('raise',
self.cmd_raise,
parser=parser)
self.api('events.register')('plugin_unloaded', self.pluginunloaded, prio=10)
def pluginunloaded(self, args):
"""
a plugin was unloaded
"""
self.api('send.msg')('removing events for plugin %s' % args['name'],
secondary=args['name'])
self.api('%s.removeplugin' % self.sname)(args['name'])
# return the event, will have registered functions
def api_getevent(self, eventname):
""" return an event
@Yeventname@w = the event to return
this function returns an EventContainer object
"""
if eventname in self.events:
return self.events[eventname]
return None
def api_isregistered(self, eventname, func):
|
lamdnhan/osf.io | website/project/views/comment.py | Python | apache-2.0 | 8,811 | 0.000454 | # -*- coding: utf-8 -*-
import collections
import httplib as http
import pytz
from flask import request
from modularodm import Q
from framework.exceptions import HTTPError
from framework.auth.decorators import must_be_logged_in
from framework.auth.utils import privacy_info_handle
from framework.forms.utils import sanitize
from website import settings
from website.notifications.emails import notify
from website.filters import gravatar
from website.models import Guid, Comment
from website.project.decorators import must_be_contributor_or_public
from datetime import datetime
from website.project.model import has_anonymous_link
def resolve_target(node, guid):
if not guid:
return node
target = Guid.load(guid)
if target is None:
raise HTTPError(http.BAD_REQUEST)
return target.referent
def collect_discussion(target, users=None):
users = users or collections.defaultdict(list)
for comment in getattr(target, 'commented', []):
if not comment.is_deleted:
users[comment.user].append(comment)
collect_discussion(comment, users=users)
return users
@must_be_contributor_or_public
def comment_discussion(**kwargs):
node = kwargs['node'] or kwargs['project']
auth = kwargs['auth']
users = collect_discussion(node)
anonymous = has_anonymous_link(node, auth)
# Sort users by comment frequency
# TODO: Allow sorting by recency, combination of frequency and recency
sorted_users = sorted(
users.keys(),
key=lambda item: len(users[item]),
reverse=True,
)
return {
'discussion': [
{
'id': privacy_info_handle(user._id, anonymous),
'url': privacy_info_handle(user.url, anonymous),
'fullname': privacy_info_handle(user.fullname, anonymous, name=True),
'isContributor': node.is_contributor(user),
'gravatarUrl': privacy_info_handle(
gravatar(
user, use_ssl=True, size=settings.GRAVATAR_SIZE_DISCUSSION,
),
anonymous
),
}
for user in sorted_users
]
}
def serialize_comment(comment, auth, anonymous=False):
return {
'id': comment._id,
'author': {
'id': privacy_info_handle(comment.user._id, anonymous),
'url': privacy_info_handle(comment.user.url, anonymous),
'name': privacy_info_handle(
comment.user.fullname, anonymous, name=True
),
'gravatarUrl': privacy_info_handle(
gravatar(
comment.user, use_ssl=True,
size=settings.GRAVATAR_SIZE_DISCUSSION
),
anonymous
),
},
'dateCreated': comment.date_created.isoformat(),
'dateModified': comment.date_modified.isoformat(),
'content': comment.content,
'hasChildren': bool(getattr(comment, 'commented', [])),
'canEdit': comment.user == auth.user,
'modified': comment.modified,
'isDeleted': comment.is_deleted,
'isAbuse': auth.user and auth.user._id in comment.reports,
}
def serialize_comments(record, auth, anonymous=False):
return [
serialize_comment(comment, auth, anonymous)
for comment in getattr(record, 'commented', [])
]
def kwargs_to_comment(kwargs, owner=False):
comment = Comment.load(kwargs.get('cid'))
if comment is None:
raise HTTPError(http.BAD_REQUEST)
if owner:
auth = kwargs['auth']
if auth.user != comment.user:
raise HTTPError(http.FORBIDDEN)
return comment
@must_be_logged_in
@must_be_contributor_or_public
def add_comment(**kwargs):
auth = kwargs['auth']
node = kwargs['node'] or kwargs['project']
if not node.comment_level:
raise HTTPError(http.BAD_REQUEST)
if not node.can_comment(auth):
raise HTTPError(http.FORBIDDEN)
guid = request.json.get('target')
target = resolve_target(node, guid)
content = request.json.get('content').strip()
content = sanitize(content)
if not content:
raise HTTPError(http.BAD_REQUEST)
if len(content) > settings.COMMENT_MAXLENGTH:
raise HTTPError(http.BAD_REQUEST)
comment = Comment.create(
auth=auth,
node=node,
target=target,
user=auth.user,
content=content,
)
comment.save()
context = dict(
gravatar_url=auth.user.gravatar_url,
content=content,
target_user=target.user if is_reply(target) else None,
parent_comment=target.content if is_reply(target) else "",
url=node.absolute_url
)
time_now = datetime.utcnow().replace(tzinfo=pytz.utc)
sent_subscribers = notify(
uid=node._id,
event="comments",
user=auth.user,
node=node,
timestamp=time_now,
**context
)
if is_reply(target):
if target.user and target.user not in sent_subscribers:
notify(
uid=target.user._id,
event='comment_replies',
user=auth.user,
node=node,
timestamp=time_now,
**context
)
return {
'comment': serialize_comment(comment, auth)
}, http.CREATED
def is_reply(target):
return isinstance(target, Comment)
@must_be_contributor_or_public
def list_comments(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
anonymous = has_anonymous_link(node, auth)
guid = request.args.get('target')
target = resolve_target(node, guid)
serialized_comments = serialize_comments(target, auth, anonymous)
n_unread = 0
if node.is_contributor(auth.user):
if auth.user.comments_viewed_timestamp is None:
auth.user.comments_viewed_timestamp = {}
auth.user.save()
n_unread = n_unread_comments(target, auth.user)
return {
'comments': serialized_comments,
'nUnread': n_unread
}
def n_unread_comments(node, user):
"""Return the number of unread comments on a node for a user."""
default_timestamp = datetime(1970, 1, 1, 12, 0, 0)
view_timestamp = user.comments_viewed_timestamp.get(node._id, default_timestamp)
return Comment.find(Q('node', 'eq', node) &
Q('user', 'ne', user) &
Q('date_created', 'gt', view_timestamp) &
Q('date_modified', 'gt', view_timestamp)).count()
@must_be_logged_in
@must_be_contributor_or_public
def edit_comment(**kwargs):
auth = kwargs['auth']
comment = kwargs_to_comment(kwargs, owner=True)
content = request.json.get('content').strip()
content = sanitize(content)
if not content:
raise HTTPError(http.BAD_REQUEST)
if len(content) > settings.COMMENT_MAXLENGTH:
raise HTTPError(http.BAD_REQUEST)
comment.edit(
content=content,
auth=auth,
save=True
)
return serialize_comment(comment, aut | h)
@must_be_logged_in
@must_be_contributor_or_public
def delete_comment(**kwargs):
auth = kwargs['auth']
comment = kwargs_to_comment(kwargs, owner=True)
comment.delete(auth=a | uth, save=True)
return {}
@must_be_logged_in
@must_be_contributor_or_public
def undelete_comment(**kwargs):
auth = kwargs['auth']
comment = kwargs_to_comment(kwargs, owner=True)
comment.undelete(auth=auth, save=True)
return {}
@must_be_logged_in
@must_be_contributor_or_public
def update_comments_timestamp(auth, **kwargs):
node = kwargs['node'] or kwargs['project']
if node.is_contributor(auth.user):
auth.user.comments_viewed_timestamp[node._id] = datetime.utcnow()
auth.user.save()
list_comments(**kwargs)
return {node._id: auth.user.comments_viewed_timestamp[node._id].isoformat()}
else:
return {}
@must_be_logged_in
@must_be_contributor_or_public
def report_abuse(**kwargs):
auth = kwargs['auth']
user = auth.user
comment = kwargs_to_comment( |
egrigg9000/taskbuster_boilerplate | taskbuster/wsgi.py | Python | mit | 397 | 0 | """
WSGI config for taskbuster project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/ho | wto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJ | ANGO_SETTINGS_MODULE", "taskbuster.settings")
application = get_wsgi_application()
|
prophile/jacquard | setup.py | Python | mit | 4,910 | 0.001018 | from setuptools import setup, find_packages
import os
import sys
# This monstrous hack is to support /etc generation for the Debian package
# with fpm.
if sys.argv[1] == 'install' and os.environ.get('JACQUARD_DEBIAN_HACK'):
def debian_etc_hack(root):
import pathlib
root_path = pathlib.Path(root)
config_dir = root_path / 'etc' / 'jacquard'
try:
config_dir.mkdir(parents=True)
except FileExistsError:
pass
try:
(config_dir / 'plugins').mkdir()
except FileExistsError:
pass
with (config_dir / 'config.cfg').open('wb') as f_out:
with open('debian.cfg', 'rb') as f_in:
config_file = f_in.read()
f_out.write(config_file)
try:
(root_path / 'var' / 'jacquard').mkdir(parents=True)
except FileExistsError:
pass
debian_etc_hack(sys.argv[3])
del debian_etc_hack
with open('README.rst', 'r', encoding='utf-8') as f:
long_description = f.read()
setup(
name='jacquard-split',
version='0.7.0',
url='https://github.com/prophile/jacquard',
description="Split testing server",
long_description=long_description,
author="Alistair Lynn",
author_email="alistair@alynn.co.uk",
keywords = (
'ab-testing',
'e-commerce',
'experiments',
'jacquard',
'metrics',
'redis',
'science',
'split-testing',
'testing',
'zucchini',
),
license='MIT',
zip_safe=False,
packages=find_packages(),
classifiers=(
'Development Status :: 2 - Pre-Alpha',
'Environment :: Console',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python',
'Programming Language :: Python :: 3 :: Only',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Office/Business',
),
install_requires=(
'redis',
'werkzeug',
'python-dateutil',
'pyyaml',
'sqlalchemy',
),
setup_requires=(
'pytest-runner',
),
tests_require=(
'pytest',
'redis==2.10.6',
'fakeredis==0.16.0',
'hypothesis<4',
),
entry_points={
'console_scripts': (
'jacquard = jacquard.cli:main',
),
'jacquard.storage_engines': (
'dummy = jacquard.storage.dummy:DummyStore',
'redis = jacquard.storage.redis:RedisStore',
'redis-cloned = jacquard.storage.cloned_redis:ClonedRedisStore',
'file = jacquard.storage.file:FileStore',
),
'jacquard.commands': (
'storage-dump = jacquard.storage.commands:StorageDump',
'storage-flush = jacquard.storage.commands:StorageFlush',
'storage-import = jacquard.storage.commands:StorageImport',
'storage-export = jacquard.storage.commands:StorageExport',
'set-default = jacquard.users.commands:SetDefault',
'override = jacquard.users.commands:Override',
'cle | ar-overrides = jacquard.users.commands:OverrideClear',
'runserver = jacquard.service.commands:RunServer',
'launch = jacquard.experiments.commands:Launch',
'conclude = jacquard.experiments.commands:Conclude',
'load-experiment = jacquard.experiments.commands:Load',
'rollout = jacquard.buckets.commands:Rollout',
'settings-under-experiment = jacquard.experiments.c | ommands:SettingsUnderActiveExperiments',
'bugpoint = jacquard.commands_dev:Bugpoint',
),
'jacquard.commands.list': (
'experiments = jacquard.experiments.commands:ListExperiments',
),
'jacquard.commands.show': (
'user = jacquard.users.commands:Show',
'defaults = jacquard.users.commands:Show',
'directory-entry = jacquard.directory.commands:ShowDirectoryEntry',
'experiment = jacquard.experiments.commands:Show',
),
'jacquard.directory_engines': (
'dummy = jacquard.directory.dummy:DummyDirectory',
'django = jacquard.directory.django:DjangoDirectory',
'union = jacquard.directory.union:UnionDirectory',
),
'jacquard.http_endpoints': (
'root = jacquard.service.endpoints:Root',
'user = jacquard.service.endpoints:User',
'experiments-overview = jacquard.service.endpoints:ExperimentsOverview',
'experiment = jacquard.service.endpoints:ExperimentDetail',
'experiment-partition = jacquard.service.endpoints:ExperimentPartition',
'defaults = jacquard.service.endpoints:Defaults',
),
},
)
|
procangroup/edx-platform | lms/djangoapps/ccx/utils.py | Python | agpl-3.0 | 16,422 | 0.001705 | """
CCX Enrollment operations for use by Coach APIs.
Does not include any access control, be sure to check access before calling.
"""
import datetime
import logging
from contextlib import contextmanager
from smtplib import SMTPException
import pytz
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.core.validators import validate_email
from django.utils.translation import ugettext as _
from courseware.courses import get_course_by_id
from lms.djangoapps.ccx.custom_exception import CCXUserValidationException
from lms.djangoapps.ccx.models import CustomCourseForEdX
from lms.djangoapps.ccx.overrides import get_override_for_ccx
from lms.djangoapps.instructor.access import allow_access, list_with_level, revoke_access
from lms.djangoapps.instructor.enrollment import enroll_email, get_email_params, unenroll_email
from lms.djangoapps.instructor.views.api import _split_input_list
from lms.djangoapps.instructor.views.tools import get_student_from_identifier
from openedx.core.djangoapps.content.course_overviews.models import CourseOverview
from openedx.core.djangoapps.content.course_structures.models import CourseStructure
from student.models import CourseEnrollment, CourseEnrollmentException
from student.roles import CourseCcxCoachRole, CourseInstructorRole, CourseStaffRole
log = logging.getLogger("edx.ccx")
def get_ccx_creation_dict(course):
"""
Return dict of rendering create ccx form.
Arguments:
course (CourseDescriptorWithMixins): An edx course
Returns:
dict: A attribute dict for view rendering
"""
context = {
'course': course,
'create_ccx_url': reverse('create_ccx', kwargs={'course_id': course.id}),
'has_ccx_connector': "true" if hasattr(course, 'ccx_connector') and course.ccx_connector else "false",
'use_ccx_con_error_message': _(
"A CCX can only be created on this course through an external service."
" Contact a course admin to give you access."
)
}
return context
def get_ccx_from_ccx_locator(course_id):
""" helper function to allow querying ccx fields from templates """
ccx_id = getattr(course_id, 'ccx', None)
ccx = None
if ccx_id:
ccx = CustomCourseForEdX.objects.filter(id=ccx_id)
if not ccx:
log.warning(
"CCX does not exist for course with id %s",
course_id
)
return None
return ccx[0]
def get_date(ccx, node, date_type=None, parent_node=None):
"""
This returns override or master date for section, subsection or a unit.
:param ccx: ccx instance
:param node: chapter, subsection or unit
:param date_type: start or due
:param parent_node: parent of node
:return: start or due date
"""
date = get_override_for_ccx(ccx, node, date_type, None)
if date_type == "start":
master_date = node.start
else:
master_date = node.due
if date is not None:
# Setting override date [start or due]
date = date.strftime('%Y-%m-%d %H:%M')
elif not parent_node and master_date is not None:
# Setting date from master course
date = master_date.strftime('%Y-%m-%d %H:%M')
elif parent_node is not None:
# Set parent date (vertical has same dates as subsections)
date = get_date(ccx, node=parent_node, date_type=date_type)
return date
def get_enrollment_action_and_identifiers(request):
"""
Extracts action type and student identifiers from the request
on Enrollment tab of CCX Dashboard.
"""
action, identifiers = None, None
student_action = request.POST.get('student-action', None)
batch_action = request.POST.get('enrollment-button', None)
if student_action:
action = student_action
student_id = request.POST.get('student-id', '')
identifiers = [student_id]
elif batch_action:
action = batch_action
identifiers_raw = request.POST.get('student-ids')
identifiers = _split_input_list(identifiers_raw)
return action, identifiers
def validate_date(year, month, day, hour, minute):
"""
avoid corrupting db if bad dates come in
"""
valid = True
if year < 0:
valid = False
if month < 1 or month > 12:
valid = False
if day < 1 or day > 31:
valid = False
if hour < 0 or hour > 23:
valid = False
if minute < 0 or minute > 59:
valid = False
return valid
def parse_date(datestring):
"""
Generate a UTC datetime.datetime object from a string of the form
'YYYY-MM-DD HH:MM'. If string is empty or `None`, returns `None`.
"""
if datestring:
date, time = datestring.split(' ')
year, month, day = map(int, date.split('-'))
hour, minute = map(int, time.split(':'))
if validate_date(year, month, day, hour, minute):
return datetime.datetime(
year, month, day, hour, minute, tzinfo=pytz.UTC)
return None
def get_ccx_for_coach(course, coach):
"""
Looks to see if user is coach of a CCX for this course. Returns the CCX or
None.
"""
ccxs = CustomCourseForEdX.objects.filter(
course_id=course.id,
coach=coach
)
# XXX: In the future, it would be nice to support more than one ccx per
# coach per course. This is a place where t | hat might happen.
if ccxs.exists():
return ccxs[0]
return None
def get_ccx_by_ccx_id(course, coach, ccx_id):
"""
Finds a CCX of given coach on given master course.
Arguments:
course (CourseDescriptor): Master course
coach (User): Coach to ccx
ccx_id (long): Id of ccx
Returns:
| ccx (CustomCourseForEdX): Instance of CCX.
"""
try:
ccx = CustomCourseForEdX.objects.get(
id=ccx_id,
course_id=course.id,
coach=coach
)
except CustomCourseForEdX.DoesNotExist:
return None
return ccx
def get_valid_student_with_email(identifier):
"""
Helper function to get an user email from an identifier and validate it.
In the UI a Coach can enroll users using both an email and an username.
This function takes care of:
- in case the identifier is an username, extracting the user object from
the DB and then the associated email
- validating the email
Arguments:
identifier (str): Username or email of the user to enroll
Returns:
(tuple): tuple containing:
email (str): A validated email for the user to enroll.
user (User): A valid User object or None.
Raises:
CCXUserValidationException: if the username is not found or the email
is not valid.
"""
user = email = None
try:
user = get_student_from_identifier(identifier)
except User.DoesNotExist:
email = identifier
else:
email = user.email
try:
validate_email(email)
except ValidationError:
raise CCXUserValidationException('Could not find a user with name or email "{0}" '.format(identifier))
return email, user
def ccx_students_enrolling_center(action, identifiers, email_students, course_key, email_params, coach):
"""
Function to enroll or unenroll/revoke students.
Arguments:
action (str): type of action to perform (Enroll, Unenroll/revoke)
identifiers (list): list of students username/email
email_students (bool): Flag to send an email to students
course_key (CCXLocator): a CCX course key
email_params (dict): dictionary of settings for the email to be sent
coach (User): ccx coach
Returns:
list: list of error
"""
errors = []
if action == 'Enroll':
ccx_course_overview = CourseOverview.get_from_id(course_key)
course_locator = course_key.to_course_locator()
staff = CourseStaffRole(course_locator).users_with_role()
admins = CourseInstructorRole(course_locator).users_with_role()
for identifier in identifiers:
|
jorisvandenbossche/2015-EuroScipy-pandas-tutorial | snippets/07 - Case study - air quality data64.py | Python | bsd-2-clause | 110 | 0.009091 | df2011 = | data['2011'].dropna()
df2011.groupby(df2011.index.week)[['BETN029', 'BETR801']].quantile(0.95).plo | t() |
pybursa/homeworks | a_berezovsky/hw2/task08.py | Python | gpl-2.0 | 745 | 0.001653 | # coding=utf-8
"""
Задание 8: Двууровневый кортеж. (бонусное)
УСЛОВИЕ:
Фраг | мент кода, который принимает кортеж любых чисел и модифицирует его
в кортеж кортежей по два элемента (парами).
Пример:
(1,4,8,6,3,7,1) >> ((1,4),(8,6),(3,7),(1,))
"""
def task08(input_data):
return_data = []
for index in xrange(0, len(input_data), 2):
try:
return_data.append((input_data[index], input_data[index + 1]))
except IndexError:
return_data.append((input_data[index],))
return tuple(return_data)
if __name__ == '__main__':
print task0 | 8((1, 4, 8, 6, 3, 7, 1))
|
googleapis/python-bigquery-sqlalchemy | sqlalchemy_bigquery/base.py | Python | mit | 38,084 | 0.000814 | # Copyright (c) 2017 The sqlalchemy-bigquery Authors
#
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
# the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies o | r substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO | THE WARRANTIES OF MERCHANTABILITY, FITNESS
# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""Integration between SQLAlchemy and BigQuery."""
from __future__ import absolute_import
from __future__ import unicode_literals
from decimal import Decimal
import random
import operator
import uuid
from google import auth
import google.api_core.exceptions
from google.cloud.bigquery import dbapi
from google.cloud.bigquery.table import TableReference
from google.api_core.exceptions import NotFound
import sqlalchemy
import sqlalchemy.sql.expression
import sqlalchemy.sql.functions
import sqlalchemy.sql.sqltypes
import sqlalchemy.sql.type_api
from sqlalchemy.exc import NoSuchTableError
from sqlalchemy import util
from sqlalchemy.sql.compiler import (
SQLCompiler,
GenericTypeCompiler,
DDLCompiler,
IdentifierPreparer,
)
from sqlalchemy.sql.sqltypes import Integer, String, NullType, Numeric
from sqlalchemy.engine.default import DefaultDialect, DefaultExecutionContext
from sqlalchemy.engine.base import Engine
from sqlalchemy.sql.schema import Column
from sqlalchemy.sql.schema import Table
from sqlalchemy.sql.selectable import CTE
from sqlalchemy.sql import elements, selectable
import re
from .parse_url import parse_url
from . import _helpers, _struct, _types
FIELD_ILLEGAL_CHARACTERS = re.compile(r"[^\w]+")
TABLE_VALUED_ALIAS_ALIASES = "bigquery_table_valued_alias_aliases"
def assert_(cond, message="Assertion failed"): # pragma: NO COVER
if not cond:
raise AssertionError(message)
class BigQueryIdentifierPreparer(IdentifierPreparer):
"""
Set containing everything
https://github.com/dropbox/PyHive/blob/master/pyhive/sqlalchemy_presto.py
"""
def __init__(self, dialect):
super(BigQueryIdentifierPreparer, self).__init__(
dialect, initial_quote="`",
)
def quote_column(self, value):
"""
Quote a column.
Fields are quoted separately from the record name.
"""
parts = value.split(".")
return ".".join(self.quote_identifier(x) for x in parts)
def quote(self, ident, force=None, column=False):
"""
Conditionally quote an identifier.
"""
force = getattr(ident, "quote", None)
if force is None or force:
return self.quote_column(ident) if column else self.quote_identifier(ident)
else:
return ident
def format_label(self, label, name=None):
name = name or label.name
# Fields must start with a letter or underscore
if not name[0].isalpha() and name[0] != "_":
name = "_" + name
# Fields must contain only letters, numbers, and underscores
name = FIELD_ILLEGAL_CHARACTERS.sub("_", name)
result = self.quote(name)
return result
class BigQueryExecutionContext(DefaultExecutionContext):
def create_cursor(self):
# Set arraysize
c = super(BigQueryExecutionContext, self).create_cursor()
if self.dialect.arraysize:
c.arraysize = self.dialect.arraysize
return c
def get_insert_default(self, column): # pragma: NO COVER
# Only used by compliance tests
if isinstance(column.type, Integer):
return random.randint(-9223372036854775808, 9223372036854775808) # 1<<63
elif isinstance(column.type, String):
return str(uuid.uuid4())
__remove_type_from_empty_in = _helpers.substitute_string_re_method(
r"""
\sIN\sUNNEST\(\[\s # ' IN UNNEST([ '
(
(?:NULL|\(NULL(?:,\sNULL)+\))\) # '(NULL)' or '((NULL, NULL, ...))'
\s(?:AND|OR)\s\(1\s!?=\s1 # ' and 1 != 1' or ' or 1 = 1'
)
(?:[:][A-Z0-9]+)? # Maybe ':TYPE' (e.g. ':INT64')
\s\]\) # Close: ' ])'
""",
flags=re.IGNORECASE | re.VERBOSE,
repl=r" IN(\1)",
)
@_helpers.substitute_re_method(
r"""
\sIN\sUNNEST\(\[\s # ' IN UNNEST([ '
( # Placeholders. See below.
%\([^)]+_\d+\)s # Placeholder '%(foo_1)s'
(?:,\s # 0 or more placeholders
%\([^)]+_\d+\)s
)*
)?
:([A-Z0-9]+) # Type ':TYPE' (e.g. ':INT64')
\s\]\) # Close: ' ])'
""",
flags=re.IGNORECASE | re.VERBOSE,
)
def __distribute_types_to_expanded_placeholders(self, m):
# If we have an in parameter, it sometimes gets expaned to 0 or more
# parameters and we need to move the type marker to each
# parameter.
# (The way SQLAlchemy handles this is a bit awkward for our
# purposes.)
# In the placeholder part of the regex above, the `_\d+
# suffixes refect that when an array parameter is expanded,
# numeric suffixes are added. For example, a placeholder like
# `%(foo)s` gets expaneded to `%(foo_0)s, `%(foo_1)s, ...`.
placeholders, type_ = m.groups()
if placeholders:
placeholders = placeholders.replace(")", f":{type_})")
else:
placeholders = ""
return f" IN UNNEST([ {placeholders} ])"
def pre_exec(self):
self.statement = self.__distribute_types_to_expanded_placeholders(
self.__remove_type_from_empty_in(self.statement)
)
class BigQueryCompiler(_struct.SQLCompiler, SQLCompiler):
compound_keywords = SQLCompiler.compound_keywords.copy()
compound_keywords[selectable.CompoundSelect.UNION] = "UNION DISTINCT"
compound_keywords[selectable.CompoundSelect.UNION_ALL] = "UNION ALL"
def __init__(self, dialect, statement, *args, **kwargs):
if isinstance(statement, Column):
kwargs["compile_kwargs"] = util.immutabledict({"include_table": False})
super(BigQueryCompiler, self).__init__(dialect, statement, *args, **kwargs)
def visit_insert(self, insert_stmt, asfrom=False, **kw):
# The (internal) documentation for `inline` is confusing, but
# having `inline` be true prevents us from generating default
# primary-key values when we're doing executemany, which seem broken.
# We can probably do this in the constructor, but I want to
# make sure this only affects insert, because I'm paranoid. :)
self.inline = False
return super(BigQueryCompiler, self).visit_insert(
insert_stmt, asfrom=False, **kw
)
def visit_table_valued_alias(self, element, **kw):
# When using table-valued functions, like UNNEST, BigQuery requires a
# FROM for any table referenced in the function, including expressions
# in function arguments.
#
# For example, given SQLAlchemy code:
#
# print(
# select([func.unnest(foo.c.objects).alias('foo_objects').column])
# .compile(engine))
#
# Left to it's own devices, SQLAlchemy would outout:
#
# SELECT `foo_objects`
# FROM unnest(`foo`.`objects`) AS `foo_objects`
#
|
amcat/amcat | api/rest/viewsets/sentence.py | Python | agpl-3.0 | 2,276 | 0.002197 | ########################### | ################################## | ##############
# (C) Vrije Universiteit, Amsterdam (the Netherlands) #
# #
# This file is part of AmCAT - The Amsterdam Content Analysis Toolkit #
# #
# AmCAT is free software: you can redistribute it and/or modify it under #
# the terms of the GNU Affero General Public License as published by the #
# Free Software Foundation, either version 3 of the License, or (at your #
# option) any later version. #
# #
# AmCAT is distributed in the hope that it will be useful, but WITHOUT #
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or #
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public #
# License for more details. #
# #
# You should have received a copy of the GNU Affero General Public #
# License along with AmCAT. If not, see <http://www.gnu.org/licenses/>. #
###########################################################################
from rest_framework.viewsets import ReadOnlyModelViewSet
from amcat.models import Sentence
from amcat.tools import sbd
from api.rest.mixins import DatatablesMixin
from api.rest.serializer import AmCATModelSerializer
from api.rest.viewset import AmCATViewSetMixin
__all__ = ("SentenceSerializer", "SentenceViewSetMixin", "SentenceViewSet")
class SentenceSerializer(AmCATModelSerializer):
class Meta:
model = Sentence
fields = '__all__'
class SentenceViewSetMixin(AmCATViewSetMixin):
serializer_class = SentenceSerializer
model_key = "sentence"
model = Sentence
class SentenceViewSet(SentenceViewSetMixin, DatatablesMixin, ReadOnlyModelViewSet):
model = Sentence
def filter_queryset(self, queryset):
qs = super(SentenceViewSet, self).filter_queryset(queryset)
return qs.filter(article=self.article, id__in=sbd.get_or_create_sentences(self.article))
|
joshzarrabi/e-mission-server | emission/core/wrapper/tour_model.py | Python | bsd-3-clause | 318 | 0.018868 | import logging
import emission.core.wrapper.wrapperbase as ecwb
class TourModel(ecwb.WrapperBase):
props = {"user_id" : ecwb.WrapperBase.Access.WORM, # user_id of the E-Missions user the graph represnts
}
|
geojson = []
enums = {}
nullable = []
def _populateDependencies(self):
pass
| |
gloaec/bamboo | tests/__init__.py | Python | gpl-3.0 | 2,708 | 0 | # -*- coding: utf-8 -*-
"""
Unit Tests
~~~~~~~~~~
Define TestCase as base class for unit tests.
Ref: http://packages.python.org/Flask-Testing/
"""
from flask.ext.testing import TestCase as Base, Twill
from bambooapp import create_app
from bambooapp.user import User, UserDetail, ADMIN, USER, ACTIVE
from bambooapp.config import TestConfig
from bambooapp.extensions import db, assets
from bambooapp.utils import MALE
class TestCase(Base):
"""Base TestClass for your application."""
def create_app(self):
"""Create and return a testing flask app."""
assets._named_bundles = {}
app = create_app(TestConfig)
self.twill = Twill(app, port=3000)
return app
def init_data(self):
demo = User(
name=u'demo',
email=u'demo@example.com',
password=u'123456',
role_code=USER,
status_code=ACTIVE,
user_detail=UserDetail(
sex_code=MALE,
age= | 10,
url=u'http://demo.example.com',
deposit=100.00,
location=u'Hangzhou',
bio=u'admin Guy is ... hmm ... just a demo guy.'))
admin = User(
name=u'admin',
email=u'admin@example.com',
| password=u'123456',
role_code=ADMIN,
status_code=ACTIVE,
user_detail=UserDetail(
sex_code=MALE,
age=10,
url=u'http://admin.example.com',
deposit=100.00,
location=u'Hangzhou',
bio=u'admin Guy is ... hmm ... just a admin guy.'))
db.session.add(demo)
db.session.add(admin)
db.session.commit()
def setUp(self):
"""Reset all tables before testing."""
db.create_all()
self.init_data()
def tearDown(self):
"""Clean db session and drop all tables."""
db.drop_all()
def login(self, username, password):
data = {
'login': username,
'password': password,
}
response = self.client.post('/login', data=data, follow_redirects=True)
assert "alert-success" in response.data
return response
def _logout(self):
response = self.client.get('/logout')
self.assertRedirects(response, location='/')
def _test_get_request(self, endpoint, template=None):
response = self.client.get(endpoint)
self.assert_200(response)
if template:
self.assertTemplateUsed(name=template)
return response
|
PeterPetrik/QGIS | tests/src/python/test_qgscheckablecombobox.py | Python | gpl-2.0 | 1,824 | 0 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsCheckableComboBox
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Alexander Bruy'
__date__ = '22/03/2017'
__copyright__ = 'Copyright 2017, The QGIS Project'
import qgis # NOQA
from qgis.PyQt.QtCore import Qt
from qgis.PyQt.QtTest import QSignalSpy
from qgis.gui import QgsCheckableComboBox
from qgis.testing import start_app, unittest
start_app()
class TestQgsCheckableComboBox(unittest.TestCase):
def testGettersSetters(self):
""" test widget getters/setters """
w = qgis.gui.QgsCheckableComboBox()
w.setSeparator('|') |
self.assertEqual(w.separator(), '|')
w.setDefaultText('Select items...')
self.assertEqual(w.defaultText(), 'Select items...')
w.addItems(['One', 'Two', 'Three'])
w.setCheckedItems( | ['Two'])
self.assertEqual(len(w.checkedItems()), 1)
self.assertEqual(w.checkedItems(), ['Two'])
w.setCheckedItems(['Three'])
self.assertEqual(len(w.checkedItems()), 2)
self.assertEqual(w.checkedItems(), ['Two', 'Three'])
w.setItemCheckState(2, Qt.Unchecked)
self.assertEqual(w.itemCheckState(2), Qt.Unchecked)
def test_ChangedSignals(self):
""" test that signals are correctly emitted when clearing"""
w = qgis.gui.QgsCheckableComboBox()
w.addItems(['One', 'Two', 'Three'])
checkedItemsChanged_spy = QSignalSpy(w.checkedItemsChanged)
w.setCheckedItems(['Two'])
self.assertEqual(len(checkedItemsChanged_spy), 1)
if __name__ == '__main__':
unittest.main()
|
slavpetroff/sweetshop | backend/django/apps/testimonials/apps.py | Python | mit | 99 | 0 | from django.apps import AppConfig
class TestimonialsConfi | g(AppConfig): |
name = 'testimonials'
|
privateLittleVaginalTeasingMogicSchool/Fall-of-Cinnamon-Twig | Demo-Kazeshima-Python/card.py | Python | gpl-3.0 | 2,152 | 0.014405 | # KazeshimaAya@gmail.com
# 03.13.2017 UTC
# A simple relization of idea.
from math import pow as power
class Hero:
'Class of Hero car | d. Dinstinct hero has fixed Chuang, Zhi and Fu properties'
Name = ''
Chuang = 0
Zhi = 0
Fu = 0
MaxFanNum = 1 # The number of fans is HP
BaseAtk = 1
BaseDef = 1
BaseRep = 1 # Reputation is Mana. If a hero wants to use magic attack, typically an evil and dispicable behavior, it will consume his reputation. Of course this will also influence FanNum in most situations.
CyberAge | = 0 # unit of CyberAge is 3 months
def __init__(self, initCyberAge = 0, genChuang, genZhi, genFu, name = ''):
self.Chuang = genChuang
self.Zhi = genZhi
self.Fu = genFu
self.Name = name
CA = self.CyberAge
self.MaxFanNum = CA*genFu*1000 + CA*genZhi*750 + CA*genChuang*550# Simple growth curve. Linear dependence on CyberAge(hero's level)
self.BaseAtk = CA*genFu*80 + CA*genZhi*120 + CA*genChuang*200 # unbalanced values. Linear growth
self.BaseDef = CA*genFu*160 + CA*genZhi*200 + CA*genChuang*100 # unbalanced. Linear
self.BaseRep = CA*genFu*80 + CA*genZhi*60 + CA*genChuang*160 # unbalanced. Linear
self.BattleStatus = Status(self)
class Status:
'Class of the temporary status of a hero card. Used in computation during battle'
CurrentFanNum = 1
CurrentAtk = 1
CurrentDef = 1
CurrentRep = 1
ChuangBar = 0
ZhiBar = 0
FuBar = 0
MAX_BAR =
SkillReady1 = False
SkillReady2 = False
SkillReady3 = False
BarNormalization = 1
def __init__(self, card):
self.CurrentFanNum = card.BaseFanNum
self.CurrentAtk = card.BaseAtk
self.CurrentDef = card.BaseDef
self.CurrentRep = card.BaseRep
self.BarNormalization = power(card.Zhi*card.Chuang*card.Fu,1.0/3)
def barUpdate(self, card):
if ChuangBar <= MAX_BAR:
ChuangBar += int(MAX_BAR*card.Chuang/(5.0*BarNormalization))
if ChuangBar > MAX_BAR:
ChuangBar = MAX_BAR
if ZhiBar <= MAX_BAR:
|
kinow-io/kinow-python-sdk | test/test_customer_group_video_stats_list_response.py | Python | apache-2.0 | 939 | 0.003195 | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 2. | 0.6
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import abso | lute_import
import os
import sys
import unittest
import kinow_client
from kinow_client.rest import ApiException
from kinow_client.models.customer_group_video_stats_list_response import CustomerGroupVideoStatsListResponse
class TestCustomerGroupVideoStatsListResponse(unittest.TestCase):
""" CustomerGroupVideoStatsListResponse unit test stubs """
def setUp(self):
pass
def tearDown(self):
pass
def testCustomerGroupVideoStatsListResponse(self):
"""
Test CustomerGroupVideoStatsListResponse
"""
model = kinow_client.models.customer_group_video_stats_list_response.CustomerGroupVideoStatsListResponse()
if __name__ == '__main__':
unittest.main()
|
Mirantis/pumphouse | pumphouse/tasks/user.py | Python | apache-2.0 | 4,857 | 0 | # Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an AS IS BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and#
# limitations under the License.
import logging
from taskflow.patterns import linear_flow
from pumphouse import exceptions
from pumphouse import events
from pumphouse import task
LOG = logging.getLogger(__name__)
class RetrieveUser(task.BaseCloudTask):
def execute(self, user_id):
user = self.cloud.keystone.users.get(user_id)
self.cloud.identity.fetch(user.id)
return user.to_dict()
class EnsureUser(task.BaseCloudTask):
def execute(self, user_info, tenant_info):
try:
user = self.cloud.keystone.users.find(name=user_info["name"])
# TODO(akscram): Current password should be replaced by temporary.
except exceptions.keystone_excs.NotFound:
user = self.cloud.keystone.users.create(
name=user_info["name"],
# TODO(akscram): Here we should generate a temporary
# password for the user and use them
# along the migration process.
# The RepairUserPasswords should repair
# original after all operations.
password="default",
email=user_info["email"],
tenant_id=tenant_info["id"] if tenant_info else None,
enabled=user_info["enabled"],
)
self.created_event(user)
return user.to_dict()
def created_event(self, user):
LOG.info("Created user: %s", user)
events.emit("create", {
"id": user.id,
"type": "user",
"cloud": self.cloud.name,
"data": user.to_dict(),
}, namespace="/events")
class EnsureOrphanUser(EnsureUser):
def execute(self, user_info):
super(EnsureOrphanUser, self).execute(user_info, None)
class EnsureUserRole(task.BaseCloudTask):
def execute(self, user_info, role_info, tenant_info):
try:
self.cloud.keystone.tenants.add_user(tenant_info["id"],
user_info["id"],
role_info["id"])
except exceptions.keystone_excs.Conflict:
pass
else:
self.role_assigned_event(role_info, user_info, tenant_info)
return user_info
def role_assigned_event(self, role_info, user_info, tenant_info):
LOG.info("Created role %s assignment for user %s in tenant %s",
role_info["id"], user_info["id"], tenant_info["id"])
def migrate_membership(context, user_id, role_id, tenant_id):
user_ensure = "user-{}-ensure".format(user_id)
role_ensure = "role-{}-ensure".format(role_id)
tenant_ensure = "tenant-{}-ensure".format(tenant_id)
user_role_ensure = "user-role-{}-{}-{}-ensure".format(user_id, role_id,
tenant_id)
task = EnsureUserRole(context.dst_cloud,
name=user_role_ensure,
provides=user_role_ensure,
rebind=[user_ensure, role_ensure,
tenant_ensure])
context.store[user_role_ensure] = user_role_ensure
return task
def migrate_user(context, user_id, tenant_id=None):
user_binding = "user-{}".for | mat(user_id)
user_retrieve = "{}-retrieve".format(user_binding)
user_ensure = "{}-ensure".format(user_binding)
fl | ow = linear_flow.Flow("migrate-user-{}".format(user_id))
flow.add(RetrieveUser(context.src_cloud,
name=user_binding,
provides=user_binding,
rebind=[user_retrieve]))
if tenant_id is not None:
tenant_ensure = "tenant-{}-ensure".format(tenant_id)
flow.add(EnsureUser(context.dst_cloud,
name=user_ensure,
provides=user_ensure,
rebind=[user_binding, tenant_ensure]))
else:
flow.add(EnsureUser(context.dst_cloud,
name=user_ensure,
provides=user_ensure,
rebind=[user_binding],
inject={"tenant_info": None}))
context.store[user_retrieve] = user_id
return flow
|
spatialfrog/soil_tools | qgis/qgis_progressing_framework_scripts/src/2_calculate_cmp_soil_table_column.py | Python | gpl-3.0 | 3,985 | 0.011292 | """
purpose:
process the cmp table for single column calculation and csv output
notes:
user must select shapefile column that defines the slc ids. thses are used for processing
if no polygons selected than all polygons processed
input:
slc shapefile field defining the slc ids
cmp table column for processing
output:
csv of single user selected column for calculation
license:
- gpl3
by:
richard burcher
richardburcher@gmail.com
2014
"""
#==========
# sets up gui in qgis processing framework
##[AAFC Soil Tools]=group
##cmp_soil_table=table
##cmp_soil_column_to_calculate=field cmp_soil_table
##slc_shapefile=vector
##slc_shapefile_polygon_id_column=field slc_shapefile
##option_soil_cmp_table_slc_id_column=string sl
##option_soil_cmp_table_percent_column=string percent
##option_csv_load_file_into_qgis=boolean True
##option_csv_output_directory=folder
##option_csv_file_prefix=string calculation
#===========
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from qgis.core import *
from qgis.gui import *
from qgis.utils import *
import os
import sys
import sqlite3
# aafc module name containing submodules for soil db work
aafcModuleName = "aafc_modules"
# add aafc module name directory to python path. found as .qgis2/processing/scripts/name_of_aafc_module
scriptDirectory = os.path.join(QFileInfo(QgsApplication.qgisUserDbFilePath()).path(), "processing/scripts",aafcModuleName)
# add to aafc module directory to python path
sys.path.append(scriptDirectory)
# import aafc_modules
import aafc_io as inout
import aafc_utilities as utilities
import aafc_database as database
# ========== create class instances
# create utility class instance. pass qgis supplied iface
utils = utilities.Utils(iface)
# get path to temp directory
tempSystemDirectoryPath = utils.determineSystemTempDirectory()
# io instance
io = inout.Io(tempSystemDirectoryPath=tempSystemDirectoryPath)
# get db path from cmp layer in qgis
inSoilDbPath = utils.getQgisTableLayerFilePathInfo(cmp_soil_table)
# db instance
db = database.Db(inSoilDbPath, tempSystemDirectoryPath)
# db performance tuning
db.sqliteLoadingPerformanceTuning(enable=True)
#========== get spatial selection of polygon slc units to process
# if no sub-selection, assume all polygons to be processed
msg,slcIds,status = utils.getVectorLayerFieldValues(slc_shapefile, slc_shapefile_polygon_id_column)
if not status:
# problem with getting values from vector layer for slc ids
utils.communicateWithUserInQgis("No values for field in given vector layer for slc ids. Stopping.",level="CRITICAL", messageExistanceDuration=15)
raise Exception(msg)
#===== process soil field
# column field must be qouted as '"field_to_calculate"'
calculationColumnName = '"%s"' %(cmp_soil_column_to_calculate)
# convert full connection path of user selected table in qgis toc to | actual table name
tableName = utils.getQgisTableLayerFilePathInfo(cmp_soil_table, pathKey="table")
# warn user process may take several minutes
message = "Calculating column %s may take several minutes" % ( | calculationColumnName)
utils.communicateWithUserInQgis(message,messageExistanceDuration=10)
headers, results = db.calculateField(slcIds, dbSlcKey=option_soil_cmp_table_slc_id_column, tableName=tableName, columnName=calculationColumnName, dbPercentKey=option_soil_cmp_table_percent_column)
outCsvFilePath = io.writeCsvFile(calculationColumnName, headers, results, option_csv_output_directory, csvFilePrefixName=option_csv_file_prefix)
# inform user processing finished
msg = "Finished processing column %s. Find output CSV in directory %s" %(calculationColumnName, option_csv_output_directory)
utils.communicateWithUserInQgis(msg, messageExistanceDuration=10)
# ========== load calculated csv into qgis
if option_csv_load_file_into_qgis:
# load csv into qgis csv into toc as vector layer
utils.loadVectorLayerIntoQgis(outCsvFilePath)
#========== clean up
# remove added aafc soil module from python path
sys.path.pop() |
OCA/stock-logistics-warehouse | account_move_line_product/__manifest__.py | Python | agpl-3.0 | 560 | 0 | # Copyright 2019 ForgeFlow | S.L. (https://www.forgeflow.com)
# License AGPL-3.0 or later (https://www.gnu.org/licenses/agpl.html).
{
"name": "Account Move Line Product",
"version": "14.0.1.0.0",
"summary": "Displays the product in the journal entries and items",
"author": "ForgeFlow, Odoo Community Association (OCA)",
"website": "https://github.com/OCA/stock-logistics-warehouse",
"category": "Generic",
"depends": ["account"],
"license": "AGPL-3",
| "data": ["views/account_move_line_view.xml"],
"installable": True,
}
|
ChileanVirtualObservatory/acalib | setup.py | Python | gpl-3.0 | 2,129 | 0.014091 | from setuptools import setup, find_packages
from setuptools.command.install import install
from shutil import copyfile
import os
import glob
import sys
import subprocess
def check_build():
good_commands = ('develop', 'sdist', 'build', 'build_ext', 'build_py',
'build_clib', 'build_scripts', 'bdist_wheel', 'bdist_rpm',
'bdist_wininst', 'bdist_msi', 'bdist_mpkg')
for command in good_commands:
if command in sys.argv[1::]:
return True
def build_and_move(path):
print("Building: {}".format(path))
cwd = os.getcwd()
rel_module = path
module_dir = os.path.join(cwd, rel_module)
os.chdir(module_dir)
ret = subprocess.call(["python","setup.py", "build"])
if ret != 0:
exit(ret)
for fbuilded in glob.glob("build/lib*/*.so"):
dest_directory = os.getcwd() + '/' + os.path.basename(fbuilded)
copyfile(fbuilded, dest_directory)
os.chdir(cwd)
def setup_package():
if "--force" in sys.argv:
run_build = True
else:
run_build = False
#Building packages
if check_build() or run_build:
build_and_move('acalib/core/_morph')
setup(
name = "acalib",
version = "0.1.3",
| description = "Advanced Computing for Astronomy Library",
url = "https://github.com/ChileanVirtualObservatory/ACALIB",
author = "CSRG",
author_email = 'contact@lirae.cl',
| classifiers = [
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering :: Astronomy',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6'
],
zip_safe = False,
packages = find_packages(),
include_package_data = True,
setup_requires = ['numpy>=1.8', 'cython>=0.18'],
install_requires = ['numpy>=1.11.2', 'astropy>=1.2', 'cython>=0.18',
'matplotlib>=1.5', 'scipy>=0.18',
'scikit-image>=0.13', 'urllib3', 'pycupid', 'dask', 'distributed']
)
setup_package()
|
log2timeline/plaso | tests/parsers/winevt.py | Python | apache-2.0 | 2,807 | 0.002494 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the Windows EventLog (EVT) parser."""
import unittest
from plaso.lib import definitions
from plaso.parsers import winevt
from tests.parsers import test_lib
class WinEvtParserTest(test_lib.ParserTestCase):
"""Tests for the Windows EventLog (EVT) parser."""
def testParse(self):
"""Tests the Parse function."""
parser = winevt.WinEvtParser()
storage_writer = self._ParseFile(['SysEvent.Evt'], parser)
# Windows Event Log (EVT) information:
# Version : 1.1
# Number of records : 6063
# Number of recovered records : 438
# Log type : System
number_of_events = storage_writer.GetNumberOfAttributeContainers('event')
self.assertEqual(number_of_events, (6063 + 438) * 2)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'extraction_warning')
self.assertEqual(number_of_warnings, 0)
number_of_warnings = storage_writer.GetNumberOfAttributeContainers(
'recovery_warning')
self.assertEqual(number_of_warnings, 0)
events = list(storage_writer.GetEvents())
expected_event_values = {
'date_time': '2011-07-27 06:41:47',
'timestamp_desc': definitions.TIME_DESCRIPTION_CREATION}
self.CheckEventValues(storage_writer, events[0], expected_event_values)
# Event number : 1392
# Creation time : Jul 27, 2011 06:41:47 UTC
# Written time : Jul 27, 2011 06:41:47 UTC
# Event type : Warning event (2)
# Computer name : WKS-WINXP32BIT
# Source name : LSASRV
# Event category : 3
# Event identifier : 0x8000a001 (2147524609)
# Number of strings : 2
# String: 1 : cifs/CONTROLLER
# String: 2 : "The system detected a possible attempt to compromise
# security. Please ensure that you can contact the
# server that authenticated you.\r\n (0x | c0000388)"
expected_string2 = (
'"The system detected a possible attempt to compromise security. '
'Please ensure that you can contact the server that authenticated you.'
'\r\n (0xc0000388)"')
expected_event_values = {
'computer_name': 'WKS-WINXP32BIT',
'date_time': '2011-07-27 06:41:47',
'data_type': 'windows:evt:re | cord',
'event_category': 3,
'event_identifier': 40961,
'event_type': 2,
'record_number': 1392,
'severity': 2,
'source_name': 'LSASRV',
'strings': ['cifs/CONTROLLER', expected_string2],
'timestamp_desc': definitions.TIME_DESCRIPTION_WRITTEN}
self.CheckEventValues(storage_writer, events[1], expected_event_values)
if __name__ == '__main__':
unittest.main()
|
galihmelon/sendgrid-python | sendgrid/helpers/mail/subscription_tracking.py | Python | mit | 1,603 | 0.000624 | class SubscriptionTracking(object):
def __init__(self, enable=None, text=None, html=None, substitution_tag=None):
self._enable = None
self._text = None
self._html = None
self._substitution_tag = None
if enable is not None:
self.enable = enable
if text is not None:
self.text | = text
if html is not None:
self.html = html
if substitution_tag is not None:
self.substitution_tag = substitution_tag
@property
def ena | ble(self):
return self._enable
@enable.setter
def enable(self, value):
self._enable = value
@property
def text(self):
return self._text
@text.setter
def text(self, value):
self._text = value
@property
def html(self):
return self._html
@html.setter
def html(self, value):
self._html = value
@property
def substitution_tag(self):
return self._substitution_tag
@substitution_tag.setter
def substitution_tag(self, value):
self._substitution_tag = value
def get(self):
subscription_tracking = {}
if self.enable is not None:
subscription_tracking["enable"] = self.enable
if self.text is not None:
subscription_tracking["text"] = self.text
if self.html is not None:
subscription_tracking["html"] = self.html
if self.substitution_tag is not None:
subscription_tracking["substitution_tag"] = self.substitution_tag
return subscription_tracking
|
bruxr/Sirius2 | sirius/services/sentry.py | Python | mit | 1,881 | 0.002127 | import os
import json
from google.appengine.api import urlfetch
from sirius.errors import APIError
API_BASE = 'https://app.getsentry.com/api/0'
class SentryError(APIError):
"""Raised when an error is received from Sentry."""
def create_project(name):
"""Creates a project in Sentry.
This assumes that sentry environment variables SENTRY_ORG,
SENTRY_TEAM and SENTRY_KEY is set.
Arguments:
name -- project name
Returns
project slug in Sentry
"""
org = os.environ['SENTRY_ORG']
team = os.environ['SENTRY_TEAM']
headers = {
'Authorization': 'Basic ' + os.environ['SENTRY_KEY'],
'Content-Type': 'application/json'
}
payload = json.dumps({'name': name})
url = '{0}/teams/{1}/{2}/projects/'.format(API_BASE, org, team)
result = urlfetch.fetch(
url=url,
payload=payload,
method=urlfetch.POST,
headers=headers
)
response = json.loads(result.content)
if result.status_code == 201:
return response['slug']
else:
raise SentryError(result.status_code, response['detail'])
def create_key(slug):
"""Creates a cl | ient key for a project.
Arguments:
slug -- project slug
Returns
client's secret DSN key
"""
org = os.environ['SENTRY_ORG']
headers = {
'Authorization': 'Basic ' + os.environ['SENTRY_KEY'],
'Con | tent-Type': 'application/json'
}
payload = json.dumps({'name': 'Default'})
url = '{0}/projects/{1}/{2}/keys/'.format(API_BASE, org, slug)
result = urlfetch.fetch(
url=url,
payload=payload,
method=urlfetch.POST,
headers=headers
)
response = json.loads(result.content)
if result.status_code == 201:
return response['dsn']['secret']
else:
raise SentryError(result.status_code, response['detail']) |
openvstorage/openvstorage-flocker-driver | openvstorage_flocker_plugin/__init__.py | Python | apache-2.0 | 1,409 | 0.00071 | # Copyright 2015 iNuron NV
#
# Licensed under the Open vStorage Modified Apache License (the "License");
# you | may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.openvstorage.org/license
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the Li | cense for the specific language governing permissions and
# limitations under the License.
from flocker.node import BackendDescription, DeployerType
from openvstorage_flocker_plugin.openvstorage_blockdevice import (
openvstorage_from_configuration
)
__author__ = "Chrysostomos Nanakos"
__copyright__ = "Copyright 2015, iNuron NV"
__version__ = "0.1"
__maintainer__ = "Chrysostomos Nanakos"
__email__ = "cnanakos@openvstorage.com"
__status__ = "Development"
def api_factory(cluster_id, **kwargs):
if "vpool_conf_file" in kwargs:
vpool_conf_file = kwargs["vpool_conf_file"]
else:
raise Exception('No vPool configuration file')
return openvstorage_from_configuration(vpool_conf_file=vpool_conf_file)
FLOCKER_BACKEND = BackendDescription(
name=u"openvstorage_flocker_driver",
needs_reactor=False, needs_cluster_id=True,
api_factory=api_factory, deployer_type=DeployerType.block)
|
wevote/WebAppPublic | apis_v1/documentation_source/organizations_followed_retrieve_doc.py | Python | bsd-3-clause | 3,459 | 0.002024 | # apis_v1/documentation_source/organizations_followed_retrieve_doc.py
# Brought to you by We Vote. Be good.
# -*- coding: UTF-8 -*-
def organizations_followed_retrieve_doc_template_values(url_root):
"""
Show documentation about organizationsFollowedRetrieve
"""
required_query_parameter_list = [
{
'name': 'voter_device_id',
'value': 'string', # boolean, integer, long, string
'description': 'An 88 character unique identifier linked to a voter record on the server',
},
{
'name': 'api_key',
'value': 'string (from post, cookie, or get (in that order))', # boolean, integer, long, string
'description': 'The unique key provided to any organization using the WeVoteServer APIs',
},
]
optional_query_parameter_list = [
# {
# 'name': '',
# 'value': '', # boolean, integer, long, string
# 'description': '',
# },
]
potential_status_codes_list = [
{
'code': 'VALID_VOTER_DEVICE_ID_MISSING',
'description': 'Cannot proceed. A valid voter_device_id parameter was not included.',
},
{
'code': 'VALID_VOTER_ID_MISSING',
'description': 'Cannot proceed. A valid voter_id was not found.',
},
# {
# 'code': '',
# 'description': '',
# },
]
try_now_link_variables_dict = {
# 'organization_we_vote_id': 'wv85org1',
}
# Changes made here should also be made in organizations_retrieve
api_response = '{\n' \
' "status": string,\n' \
' "success": boolean,\n' \
' "voter_device_id": string (88 characters long),\n' \
' "organization_list": list\n' \
' [\n' \
' "organization_id": integer (the id of the organization found),\n' \
' "organization_we_vote_id": string (the organization ' \
' identifier that moves server-to-server),\n' \
' "organization_name": string (value from Google),\n' \
' "organization_websi | te": string (website address),\n' \
' "organization_twitter_handle": string (twitter address),\n' \
' "twitter_followers_count": integer,\n' \
' "organization_email": string,\n' \
' "organization_facebook": string,\n' \
' "organization_photo_url": string,\n' \
' ],\n' \
'}'
template_values = {
'api_n | ame': 'organizationsFollowedRetrieve',
'api_slug': 'organizationsFollowedRetrieve',
'api_introduction':
"",
'try_now_link': 'apis_v1:organizationsFollowedRetrieveView',
'try_now_link_variables_dict': try_now_link_variables_dict,
'url_root': url_root,
'get_or_post': 'GET',
'required_query_parameter_list': required_query_parameter_list,
'optional_query_parameter_list': optional_query_parameter_list,
'api_response': api_response,
'api_response_notes':
"",
'potential_status_codes_list': potential_status_codes_list,
}
return template_values
|
earlysaints/database | datasets/nauvoo_deeds/persons2quad.py | Python | gpl-2.0 | 892 | 0.005605 | from xml.dom import minidom
import codecs
inxml = minidom.parse(r'm_persons.xml')
outcsv = codecs.open(r'person_quads.csv', 'w', 'utf-8')
outcsv.write('id,assert_type,subject,predicate,object\r\n')
i=1
for person in inxml.getElementsByTagName('personk'):
pid = 'I'+person.getElementsByTagName('PERSON_ID')[0].childNodes[0].data
pname = person.getElementsByTagName('NAME')[0].childNodes[0].data
outcsv.write(str(i)+',entity, | '+pid+',common name,|'+pname+'|\r\n')
outcsv.write(str(i+1)+',property,'+pid+',entity type,person\r\n')
pcontentel = person.getElementsByTagName('CONTENT')
if pcontentel:
pcontent = pcontentel[0].childNodes[0].data
pcontent.replace('<','<')
pcontent = "<br />".join(pcontent.split("\n"))
outcsv.write(str(i+2)+',property,'+pid+',description,|'+pcontent+'|\r\n')
i=i+3
outcsv.flu | sh()
outcsv.close()
|
Simulmedia/pyembedpg | pyembedpg.py | Python | apache-2.0 | 10,244 | 0.003221 | #
# Copyright 2015 Simulmedia, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import shutil
import socket
import tarfile
import tempfile
import time
from contextlib import closing
from distutils import spawn
from os.path import expanduser
from subprocess import Popen
import psycopg2
import requests
from psycopg2._psycopg import OperationalError
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
logger = logging.getLogger('pyembedpg')
class PyEmbedPg(object):
DOWNLOAD_BASE_URL = 'http://ftp.postgresql.org/pub/source'
DOWNLOAD_URL = DOWNLOAD_BASE_URL + '/v{version}/postgresql-{version}.tar.bz2'
LOCAL_VERSION = 'local'
CACHE_DIRECTORY = '.pyembedpg'
def __init__(self, version=None):
"""
Initialize a new Postgres object
:param version: version to use. If it is not set, use the latest version in .pyembedpg directory. If not present
use the latest version remotely. Use 'local' to use the local postgres version installed on the machine
:return:
"""
home_dir = expanduser("~")
self._cache_dir = os.path.join(home_dir, PyEmbedPg.CACHE_DIRECTORY)
# if version is not specified, check local last version otherwise get last remote version
self.version = version
if not self.version:
self.version = self.get_latest_local_version()
if not self.version:
self.version = self.get_latest_remote_version()
if version == PyEmbedPg.LOCAL_VERSION:
full_path = spawn.find_executable('postgres')
if not full_path:
raise PyEmbedPgException('Cannot find postgres executable. Make sure it is in your path')
self._version_path = os.path.dirname(full_path)
else:
self._version_path = os.path.join(self._cache_dir, self.version)
def get_latest_local_version(self):
"""
Return the latest version installed in the cache
:return: latest version installed locally in the cache and None if there is nothing downloaded
"""
if not os.path.exists(self._cache_dir):
return None
tags = os.listdir(self._cache_dir)
# we want to sort based on numbers so:
# v3.0.0-QA6
# v3.0.0-QA15
# v3.0.0-QA2
# are sorted according to the numbers so no lexigraphically
revs_to_tag = [(re.split(r"[^\d]+", tag), tag) for tag in tags]
return max(revs_to_tag)[1]
def get_latest_remote_versi | on(self):
"""
Return the latest version on the Postgres FTP server
:return: latest version installed locally on the Postgres FTP server
"""
response = requests.get(PyEmbedPg.DOWNLOAD_BASE_URL)
last_version_match = list(re.finditer('>v(?P<version>[^<]+)<', response | .content.decode()))[-1]
return last_version_match.group('version')
def check_version_present(self):
"""
Check if the version is present in the cache
:return: True if the version has already been downloaded and build, False otherwise
"""
return os.path.exists(self._version_path)
def download_and_unpack(self):
# if the version we want to download already exists, do not do anything
if self.check_version_present():
logger.debug('Version {version} already present in cache'.format(version=self.version))
return
url = PyEmbedPg.DOWNLOAD_URL.format(version=self.version)
response = requests.get(url, stream=True)
if not response.ok:
raise PyEmbedPgException('Cannot download file {url}. Error: {error}'.format(url=url, error=response.content))
with tempfile.NamedTemporaryFile() as fd:
logger.debug('Downloading {url}'.format(url=url))
for block in response.iter_content(chunk_size=4096):
fd.write(block)
fd.flush()
# Unpack the file into temporary dir
temp_dir = tempfile.mkdtemp()
source_dir = os.path.join(temp_dir, 'postgresql-{version}'.format(version=self.version))
try:
# Can't use with context directly because of python 2.6
with closing(tarfile.open(fd.name)) as tar:
tar.extractall(temp_dir)
os.system(
'sh -c "cd {path} && ./configure --prefix={target_dir} && make install && cd contrib && make install"'.format(
path=source_dir,
target_dir=self._version_path)
)
finally:
shutil.rmtree(temp_dir, ignore_errors=True)
def start(self, port=5432):
"""
Start a new Postgres server on the specified port
:param port: port to connect to, can be an int or a list of ports
:return:
"""
if not self.check_version_present():
self.download_and_unpack()
bin_dir = os.path.join(self._version_path, 'bin')
ports = [port] if isinstance(port, int) else port
return DatabaseRunner(bin_dir, ports)
class DatabaseRunner(object):
ADMIN_USER = 'root'
TIMEOUT = 10
def __init__(self, bin_dir, ports):
self._ports = ports
self._postgres_cmd = os.path.join(bin_dir, 'postgres')
# init db
init_db = os.path.join(bin_dir, 'initdb')
self._temp_dir = tempfile.mkdtemp()
command = init_db + ' -D ' + self._temp_dir + ' -U ' + DatabaseRunner.ADMIN_USER
logger.debug('Running command: {command}'.format(command=command))
os.system(command)
# overwrite pg_hba.conf to only allow local access with password authentication
with open(os.path.join(self._temp_dir, 'pg_hba.conf'), 'w') as fd:
fd.write(
'# TYPE DATABASE USER ADDRESS METHOD\n'
'# "local" is for Unix domain socket connections only\n'
'local all {admin} trust\n'
'local all all md5\n'
'host all {admin} 127.0.0.1/32 trust\n'
'host all all 127.0.0.1/32 md5\n'
'# IPv6 local connections:\n'
'host all {admin} ::1/128 trust\n'
'host all all ::1/128 md5\n'.format(admin=DatabaseRunner.ADMIN_USER)
)
def can_connect(port):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
return sock.connect_ex(('127.0.0.1', port)) != 0
self.running_port = next((port for port in ports if can_connect(port)), None)
if self.running_port is None:
raise PyEmbedPgException('Cannot run postgres on any of these ports [{ports}]'.format(ports=', '.join((str(p) for p in ports))))
self.proc = Popen([self._postgres_cmd, '-D', self._temp_dir, '-p', str(self.running_port)])
logger.debug('Postgres started on port {port}...'.format(port=self.running_port))
# Loop until the server is started
logger.debug('Waiting for Postgres to start...')
start = time.time()
while time.time() - start < DatabaseRunner.TIMEOUT:
try:
with psycopg2.connect(database='postgres', user=DatabaseRunner.ADMIN_USER, host='localhost', port=self.running_port):
break
|
jsmesami/naovoce | src/gallery/migrations/0002_image_author.py | Python | bsd-3-clause | 598 | 0.001672 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('gallery', '0001_initial'),
]
operations = [
mi | grations.AddField(
model_name='image',
name='author',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, blank=True, null=True, verbose_name='author', r | elated_name='images', on_delete=models.CASCADE),
),
]
|
vorburger/mcedit2 | src/mcedit2/editortools/brush/__init__.py | Python | bsd-3-clause | 8,331 | 0.00168 | """
brush
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import logging
from PySide import QtGui
from mcedit2.editortools import EditorTool
from mcedit2.command import SimplePerformCommand
from mcedit2.editortools.brush.masklevel import FakeBrushSection
from mcedit2.editortools.brush.modes import BrushModes
from mcedit2.rendering import worldscene
from mcedit2.rendering.depths import DepthOffset
from mcedit2.rendering.scenegraph.matrix import TranslateNode
from mcedit2.rendering.selection import SelectionBoxNode
from mcedit2.util.load_ui import load_ui, registerCustomWidget
from mcedit2.util.settings import Settings
from mcedit2.util.showprogress import showProgress
from mcedit2.util.worldloader import WorldLoader
from mceditlib.geometry import Vector
from mceditlib.util import exhaust
log = logging.getLogger(__name__)
BrushModeSetting = Settings().getOption("editortools/brush/mode", default="fill")
BrushShapeSetting = Settings().getOption("editortools/brush/shape")
BrushSizeSetting = Settings().getOption("editortools/brush/size")
class BrushCommand(SimplePerformCommand):
def __init__(self, editorSession, points, options):
"""
:type editorSession: mcedit2.editorsession.EditorSession
:type points: list
:type options: dict
:return:
:rtype:
"""
super(BrushCommand, self).__init__(editorSession)
# if options is None: options = {}
self.options = options
self.points = points
self.brushSize = options['brushSize']
self.brushShape = options['brushShape']
self.brushMode = options['brushMode']
self.setText("%s %s Brush" % (self.brushMode.name, self.brushShape.ID))
if max(self.brushSize) > BrushTool.maxBrushSize:
self.brushSize = (BrushTool.maxBrushSize,) * 3
if max(self.brushSize) < 1:
self.brushSize = (1, 1, 1)
@property
def noise(self):
return self.options.get('brushNoise', 100)
@property
def hollow(self):
return self.options.get('brushHollow', False)
def perform(self):
if len(self.points) > 10:
showProgress("Performing brush...", self._perform(), cancel=True)
else:
exhaust(self._perform())
def _perform(self):
yield 0, len(self.points), "Applying {0} brush...".format(self.brushMode.name)
try:
#xxx combine selections
selections = [self.brushShape.createShapedSelection(self.brushMode.brushBoxForPoint(point, self.options),
self.editorSession.currentDimension)
for point in self.points]
self.brushMode.applyToSelections(self, selections)
except NotImplementedError:
for i, point in enumerate(self.points):
f = self.brushMode.applyToPoint(self, point)
if hasattr(f, "__iter__"):
for progress in f:
yield progress
else:
yield i, len(self.points), "Applying {0} brush...".format(self.brushMode.name)
self.performed = True
class BrushTool(EditorTool):
name = "Brush"
| iconName = "brush"
maxBrushSize = 512
def __init__(self, editorSession, *args, **kwargs):
super(BrushTool, self).__init__(editorSession, *args, **kwargs)
self.toolWidget = load_ui("editortools/brush.ui")
self.brushMode = None
self.brushLoader = None
BrushModeSetting.connectAndCall(self.modeSetting | Changed)
self.cursorWorldScene = None
self.cursorNode = TranslateNode()
self.toolWidget.xSpinSlider.setMinimum(1)
self.toolWidget.ySpinSlider.setMinimum(1)
self.toolWidget.zSpinSlider.setMinimum(1)
self.toolWidget.xSpinSlider.valueChanged.connect(self.setX)
self.toolWidget.ySpinSlider.valueChanged.connect(self.setY)
self.toolWidget.zSpinSlider.valueChanged.connect(self.setZ)
self.toolWidget.brushShapeInput.shapeChanged.connect(self.updateCursor)
self.toolWidget.brushShapeInput.shapeOptionsChanged.connect(self.updateCursor)
self.fillBlock = editorSession.worldEditor.blocktypes["stone"]
self.brushSize = BrushSizeSetting.value(QtGui.QVector3D(5, 5, 5)).toTuple() # calls updateCursor
self.toolWidget.xSpinSlider.setValue(self.brushSize[0])
self.toolWidget.ySpinSlider.setValue(self.brushSize[1])
self.toolWidget.zSpinSlider.setValue(self.brushSize[2])
@property
def hoverDistance(self):
return self.toolWidget.hoverSpinSlider.value()
_brushSize = (0, 0, 0)
@property
def brushSize(self):
return self._brushSize
@brushSize.setter
def brushSize(self, value):
self._brushSize = value
BrushSizeSetting.setValue(QtGui.QVector3D(*self.brushSize))
self.updateCursor()
def setX(self, val):
x, y, z = self.brushSize
x = float(val)
self.brushSize = x, y, z
def setY(self, val):
x, y, z = self.brushSize
y = float(val)
self.brushSize = x, y, z
def setZ(self, val):
x, y, z = self.brushSize
z = float(val)
self.brushSize = x, y, z
def setBlocktypes(self, types):
if len(types) == 0:
return
self.fillBlock = types[0]
self.updateCursor()
def mousePress(self, event):
pos = event.blockPosition
vector = (event.blockFace.vector * self.hoverDistance)
command = BrushCommand(self.editorSession, [pos + vector], self.options)
self.editorSession.pushCommand(command)
def mouseMove(self, event):
if event.blockPosition:
vector = (event.blockFace.vector * self.hoverDistance)
assert isinstance(vector, Vector), "vector isa %s" % type(vector)
self.cursorNode.translateOffset = event.blockPosition + vector
@property
def options(self):
options = {'brushSize': self.brushSize,
'brushShape': self.brushShape,
'brushMode': self.brushMode}
options.update(self.brushMode.getOptions())
return options
def modeSettingChanged(self, value):
self.brushMode = BrushModes.modesByName[value]
stack = self.toolWidget.modeOptionsStack
while stack.count():
stack.removeWidget(stack.widget(0))
widget = self.brushMode.createOptionsWidget(self)
if widget:
stack.addWidget(widget)
@property
def brushShape(self):
return self.toolWidget.brushShapeInput.currentShape
def updateCursor(self):
log.info("Updating brush cursor")
if self.cursorWorldScene:
self.brushLoader.timer.stop()
self.cursorNode.removeChild(self.cursorWorldScene)
self.cursorNode.removeChild(self.cursorBoxNode)
cursorLevel = self.brushMode.createCursorLevel(self)
cursorBox = self.brushMode.brushBoxForPoint((0, 0, 0), self.options)
self.cursorBoxNode = SelectionBoxNode()
self.cursorBoxNode.selectionBox = cursorBox
self.cursorBoxNode.filled = False
self.cursorWorldScene = worldscene.WorldScene(cursorLevel, self.editorSession.textureAtlas)
self.cursorWorldScene.depthOffsetNode.depthOffset = DepthOffset.PreviewRenderer
self.cursorNode.addChild(self.cursorWorldScene)
self.cursorNode.addChild(self.cursorBoxNode)
self.brushLoader = WorldLoader(self.cursorWorldScene)
self.brushLoader.timer.start()
# xxx button palette?
@registerCustomWidget
class BrushModeWidget(QtGui.QComboBox):
def __init__(self, *args, **kwargs):
super(BrushModeWidget, self).__init__(*args, **kwargs)
for mode in BrushModes.allModes:
self.addItem(mode.displayName, mode.name)
currentID = BrushModeSetting.value()
currentIndex = self.findData(currentID)
if currentIndex == -1:
currentIndex = 0
self.setCurrentIndex(currentIndex)
self |
moraleslazaro/cockpit | pkg/storaged/luksmeta-monitor-hack.py | Python | lgpl-2.1 | 5,420 | 0.010148 | #! /usr/bin/python3
# This simulates the org.freedesktop.UDisks.Encrypted.Slots property
# et al for versions of UDisks that don't have them yet.
import sys
import json
import subprocess
import re
import base64
import signal
import atexit
import os
def b64_decode(data):
# The data we get doesn't seem to have any padding, but the base64
# module requires it. So we add it back. Can't anyone agree on
# anything? Not even base64?
return base64.urlsafe_b64decode((data + '=' * ((4 - len(data) % 4) % 4)).encode('ascii', 'ignore'))
def get_clevis_config_from_protected_header(protected_header):
header = b64_decode(protected_header).decode("utf-8")
header_object = json.loads(header)
clevis = header_object.get("clevis", None)
if clevis:
pin = clevis.get("pin", None)
if pin == "tang":
return clevis
elif pin == "sss":
subpins = { }
jwes = clevis["sss"]["jwe"]
for jwe in jwes:
subconf = get_clevis_config_from_jwe(jwe)
subpin = subconf["pin"]
if subpin not in subpins:
subpins[subpin] = [ subconf[subpin] ]
else:
subpins[subpin].append(subconf[subpin])
return { "pin": "sss", "sss": { "t": clevis["sss"]["t"], "pins": subpins } }
else:
return { "pin": pin, pin: { } }
def get_clevis_config_from_jwe(jwe):
return get_clevis_config_from_protected_header(jwe.split(".")[0])
def info(dev):
slots = { }
version = 1
max_slots = 8
try:
result = subprocess.check_output([ "cryptsetup", "luksDump", dev ], stderr=subprocess.PIPE)
except subprocess.CalledProcessError:
return { "version": version, "slots": [ ], "max_slots": max_slots }
in_luks2_slot_section = False
in_luks2_token_section = False
for line in result.splitlines():
if not (line.startswith(b" ") or line.startswith(b"\t")):
in_luks2_slot_section = False
in_luks2_token_section = False
if line == b"Keyslots:":
in_luks2_slot_section = True
version = 2
max_slots = 32
elif line == b"Tokens:":
in_luks2_token_section = True
if in_luks2_slot_section:
match = re.match(b" ([0-9]+): luks2$", line)
else:
match = re.match(b"Key Slot ([0-9]+): ENABLED$", line)
if match:
slot = int(match.group(1))
entry = { "Index": { "v": slot } }
if version == 1:
try:
luksmeta = subprocess.check_output([ "luksmeta", "load", "-d", dev, "-s", str(slot),
"-u", "cb6e8904-81ff-40da-a84a-07ab9ab5715e" ],
stderr=subprocess.PIPE)
entry["ClevisConfig"] = {
"v": json.dumps(get_clevis_config_from_jwe(luksmeta.decode("utf-8")))
}
except subprocess.CalledProcessError:
pass
| if slot not in slots:
s | lots[slot] = entry
if in_luks2_token_section:
match = re.match(b" ([0-9]+): clevis$", line)
if match:
try:
token = subprocess.check_output([ "cryptsetup", "token", "export", dev, "--token-id", match.group(1) ],
stderr=subprocess.PIPE)
token_object = json.loads(token.decode("utf-8"))
if token_object.get("type") == "clevis":
config = json.dumps(get_clevis_config_from_protected_header(token_object["jwe"]["protected"]))
for slot_str in token_object.get("keyslots", [ ]):
slot = int(slot_str)
slots[slot] = { "Index": { "v": slot },
"ClevisConfig": { "v": config } }
except subprocess.CalledProcessError:
pass
return { "version": version, "slots": list(slots.values()), "max_slots": max_slots }
def monitor(dev):
mon = None
# We have to kill the udevadm process explicitly when Cockpit
# kills us. It will eventually exit on its own since its stdout
# will be closed when we exit, but that will only happen when it
# actually writes something.
def killmon():
if mon:
mon.terminate()
def sigexit(signo, stack):
killmon()
os._exit(0)
atexit.register(killmon)
signal.signal(signal.SIGTERM, sigexit)
signal.signal(signal.SIGINT, sigexit)
signal.signal(signal.SIGHUP, sigexit)
path = subprocess.check_output([ "udevadm", "info", "-q", "path", dev ]).rstrip(b"\n")
mon = subprocess.Popen([ "stdbuf", "-o", "L", "udevadm", "monitor", "-u", "-s", "block"],
stdout=subprocess.PIPE)
old_infos = info(dev)
sys.stdout.write(json.dumps(old_infos) + "\n")
sys.stdout.flush()
while True:
line = mon.stdout.readline()
if path in line:
new_infos = info(dev)
if new_infos != old_infos:
sys.stdout.write(json.dumps(new_infos) + "\n")
sys.stdout.flush()
old_infos = new_infos
monitor(sys.argv[1])
|
lekshmideepu/nest-simulator | testsuite/pytests/test_connect_all_to_all.py | Python | gpl-2.0 | 6,019 | 0 | # -*- coding: utf-8 -*-
#
# test_connect_all_to_all.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import numpy as np
import scipy.stats
import test_connect_helpers as hf
from test_connect_parameters import TestParams
@hf.nest.ll_api.check_stack
class TestAllToAll(TestParams):
# specify connection pattern
rule = 'all_to_all'
conn_dict = {'rule': rule}
# sizes of populations
N1 = 6
N2 = 7
N1_array = 500
N2_array = 10
def testConnectivity(self):
self.setUpNetwork(self.conn_dict)
# make sure all connections do exist
M = hf.get_connectivity_matrix(self.pop1, self.pop2)
M_all = np.ones((len(self.pop2), len(self.pop1)))
hf.mpi_assert(M, M_all, self)
# make sure no connections were drawn from the target to the source
# population
M = hf.get_connectivity_matrix(self.pop2, self.pop1)
M_none = np.zeros((len(self.pop1), len(self.pop2)))
hf.mpi_assert(M, M_none, self)
def testInputArray(self):
for label in ['weight', 'delay']:
syn_params = {}
if label == 'weight':
self.param_array = np.arange(
self.N1_array * self.N2_array, dtype=float
).reshape(self.N2_array, self.N1_array)
elif label == 'delay':
self.param_array = np.arange(
1, self.N1_array * self.N2_array + 1
).reshape(self.N2_array, self.N1_array) * 0.1
syn_params[label] = self.param_array
hf.nest.ResetKernel()
self.setUpNetwork(self.conn_dict, syn_params,
N1=self.N1_array, N2=self.N2_array)
M_nest = hf.get_weighted_connectivity_matrix(
self.pop1, self.pop2, label)
hf.mpi_assert(M_nest, self.param_array, self)
def testInputArrayWithoutAutapses(self):
self.conn_dict['allow_autapses'] = False
for label in ['weight', 'delay']:
syn_params = {}
if label == 'weight':
self.param_array = np.arange(
self.N1 * self.N1, dtype=float).reshape(self.N1, self.N1)
elif label == 'delay':
self.param_array = np.arange(
1, self.N1 * self.N1 + 1).reshape(self.N1, self.N1) * 0.1
syn_params[label] = self.param_array
self.setUpNetworkOnePop(self.conn_dict, syn_params)
M_nest = hf.get_weighted_connectivity_matrix(
self.pop, self.pop, label)
np.fill_diagonal(self.param_array, 0)
hf.mpi_assert(M_nest, self.param_array, self)
def testInputArrayRPort(self):
syn_params = {}
neuron_model = 'iaf_psc_exp_multisynapse'
neuron_dict = {'tau_syn': [0.1 + i for i in range(self.N2)]}
self.pop1 = hf.nest.Create(neuron_model, self.N1)
self.pop2 = hf.nest.Create(neuron_model, self.N2, neuron_dict)
self.param_array = np.transpose(np.asarray(
[np.arange(1, self.N2 + 1) for i in range(self.N1)]))
syn_params['receptor_type'] = self.param_array
hf.nest.Connect(self.pop1, self.pop2, self.conn_dict, syn_params)
M = hf.get_weighted_connectivity_matrix(
self.pop1, self.pop2, 'receptor')
hf.mpi_assert(M, self.param_array, self)
def testInputArrayToStdpSynapse(self):
params = ['Wmax', 'alpha', 'lambda', 'mu_minus', 'mu_plus', 'tau_plus']
syn_params = {'synapse_model': 'stdp_synapse'}
values = [
np.arange(self.N1 * self.N2, dtype=float).reshape(self.N2, self.N1)
for i in range(6)
]
for i, param in enumerate(params):
syn_params[param] = values[i]
self.setUpNetwork(self.conn_dict, syn_params)
for i, param in enumerate(params):
a = hf.get_weighted_connectivity_matrix(
self.pop1, self.pop2, param)
hf.mpi_assert(a, values[i], self)
# test single threaded for now
def testRPortDistribution(self):
n_rport = 10
nr_neurons = 100
hf.nest.ResetKernel() # To reset local_num_threads
neuron_model = 'iaf_psc_exp_multisynapse'
neuron_dict = {'tau_syn': [0.1 + i for i in range(n_rport)]}
self.pop1 = hf.nest.Create(neuron_model, nr_neurons, neuron_dict)
self.pop2 = hf.nest.Create(neuron_model, nr_neurons, neuron_dict)
syn_params = {'synapse_model': 'static_synapse'}
syn_params['receptor_type'] = 1 + hf.nest.random.uniform_int(n_rport)
hf.nest.Connect(self.pop1, self.pop2, self.conn_dict, syn_params)
| M = hf.get_weighted_connectivity_matrix(
self.pop1, self.pop2, 'receptor')
M = hf.gather_data(M)
if M is not None:
M = M.flatten()
frequencies = scipy.stats.itemfreq(M)
self.assertTrue(np.array_equal(frequencies[:, 0], np.arange(
1, n_rport + 1)), 'Missing or invalid rports')
chi, p = scipy.stats.chisquare(frequencies[:, 1])
| self.assertGreater(p, self.pval)
def suite():
suite = unittest.TestLoader().loadTestsFromTestCase(TestAllToAll)
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == '__main__':
run()
|
sclc/NAEF | src/chebyshev_basis_cacg.py | Python | gpl-3.0 | 13,294 | 0.010832 | """CBCG method """
from chebyshev_polynomial import ChebyshevPolynomial
from gerschgorin_circle_theorem import GerschgorinCircleTheoremEigenvalueEstimator
import numpy as np
from scipy.sparse import linalg
class CBCG():
""" """
def | __init__(self):
pass
def cbcg_solver(self, mat, rhs, init_x, step_val, tol, maxiter):
gerschgorin_estimator = GerschgorinCircleTheoremEigenvalueEstimator()
max_eigenvalue, min_eigenvalue = gerschgorin_estimator.csr_mat_extreme_eigenvalue_estimation(mat)
chebyshev_basis_generator = Cheb | yshevPolynomial()
op_A = linalg.aslinearoperator(mat)
v_r = rhs - op_A(init_x)
v_x = init_x.copy()
s_normb = np.linalg.norm(rhs)
residual_ratio_hist = [np.linalg.norm(v_r)/s_normb]
for itercounter in range(1, maxiter+1):
m_chebyshev_basis = \
chebyshev_basis_generator.basis_generation_with_eigenvalue_shifting_and_scaling_single_vec(\
mat, v_r, step_val, max_eigenvalue, min_eigenvalue)
if itercounter == 1:
m_Q = m_chebyshev_basis
else:
#op_AQ_trans = linalg.aslinearoperator(m_AQ.transpose())
#AQ_trans_mul_chebyshev_basis = op_AQ_trans.matmat(m_chebyshev_basis)
#m_B = linalg.aslinearoperator(Q_trans_AQ_inverse).matmat (AQ_trans_mul_chebyshev_basis)
m_AQ_trans_mul_chebyshev_basis = np.matmul(m_AQ.T , m_chebyshev_basis)
m_B = np.matmul(m_Q_trans_AQ_inverse , m_AQ_trans_mul_chebyshev_basis)
m_Q = m_chebyshev_basis - np.matmul(m_Q, m_B)
m_AQ = op_A.matmat(m_Q)
#m_Q_trans_AQ = linalg.aslinearoperator(m_Q.transpose())(m_AQ)
m_Q_trans_AQ = np.matmul(m_Q.T, m_AQ)
m_Q_trans_AQ_inverse = np.linalg.inv(m_Q_trans_AQ)
v_alpha = np.matmul( m_Q_trans_AQ_inverse, np.matmul(m_Q.T, v_r) )
v_x += np.matmul(m_Q,v_alpha)
v_r -= np.matmul(m_AQ, v_alpha)
residual_ratio_hist.append( np.linalg.norm(v_r)/s_normb)
print(itercounter, ": ", np.linalg.norm(v_r)/s_normb)
if residual_ratio_hist[-1] <= tol:
return v_x, v_r, residual_ratio_hist
return v_x, v_r, residual_ratio_hist
def cbcg_solver_least_square(self, mat, rhs, init_x, step_val, tol, maxiter):
"""get the inverse matrix by least square method"""
gerschgorin_estimator = GerschgorinCircleTheoremEigenvalueEstimator()
max_eigenvalue, min_eigenvalue = gerschgorin_estimator.csr_mat_extreme_eigenvalue_estimation(mat)
chebyshev_basis_generator = ChebyshevPolynomial()
op_A = linalg.aslinearoperator(mat)
v_r = rhs - op_A(init_x)
v_x = init_x.copy()
s_normb = np.linalg.norm(rhs)
residual_ratio_hist = [np.linalg.norm(v_r)/s_normb]
for itercounter in range(1, maxiter+1):
m_chebyshev_basis = \
chebyshev_basis_generator.basis_generation_with_eigenvalue_shifting_and_scaling_single_vec(\
mat, v_r, step_val, max_eigenvalue, min_eigenvalue)
if itercounter == 1:
m_Q = m_chebyshev_basis
else:
#op_AQ_trans = linalg.aslinearoperator(m_AQ.transpose())
#AQ_trans_mul_chebyshev_basis = op_AQ_trans.matmat(m_chebyshev_basis)
#m_B = linalg.aslinearoperator(Q_trans_AQ_inverse).matmat (AQ_trans_mul_chebyshev_basis)
m_AQ_trans_mul_chebyshev_basis = np.matmul(m_AQ.T , m_chebyshev_basis)
#m_B = np.matmul(m_Q_trans_AQ_inverse , m_AQ_trans_mul_chebyshev_basis)
m_B = np.linalg.lstsq(m_Q_trans_AQ, m_AQ_trans_mul_chebyshev_basis)[0]
m_Q = m_chebyshev_basis - np.matmul(m_Q, m_B)
m_AQ = op_A.matmat(m_Q)
#m_Q_trans_AQ = linalg.aslinearoperator(m_Q.transpose())(m_AQ)
m_Q_trans_AQ = np.matmul(m_Q.T, m_AQ)
#m_Q_trans_AQ_inverse = np.linalg.inv(m_Q_trans_AQ)
#v_alpha = np.matmul( m_Q_trans_AQ_inverse, np.matmul(m_Q.T, v_r) )
v_alpha = np.linalg.lstsq(m_Q_trans_AQ, np.matmul(m_Q.T, v_r))[0]
v_x += np.matmul(m_Q,v_alpha)
v_r -= np.matmul(m_AQ, v_alpha)
residual_ratio_hist.append( np.linalg.norm(v_r)/s_normb)
print(itercounter, ": ", np.linalg.norm(v_r)/s_normb)
if residual_ratio_hist[-1] <= tol:
return v_x, v_r, residual_ratio_hist
return v_x, v_r, residual_ratio_hist
def cbcg_solver_least_square_eigen_param(self, mat, rhs, init_x, step_val, tol, maxiter, max_eigenvalue, min_eigenvalue):
"""get the inverse matrix by least square method"""
#gerschgorin_estimator = GerschgorinCircleTheoremEigenvalueEstimator()
#max_eigenvalue, min_eigenvalue = gerschgorin_estimator.csr_mat_extreme_eigenvalue_estimation(mat)
chebyshev_basis_generator = ChebyshevPolynomial()
op_A = linalg.aslinearoperator(mat)
v_r = rhs - op_A(init_x)
v_x = init_x.copy()
s_normb = np.linalg.norm(rhs)
residual_ratio_hist = [np.linalg.norm(v_r)/s_normb]
print ("max and min eigen which are going to be used ... ", max_eigenvalue, " , ", min_eigenvalue)
for itercounter in range(1, maxiter+1):
m_chebyshev_basis = \
chebyshev_basis_generator.basis_generation_with_eigenvalue_shifting_and_scaling_single_vec(\
mat, v_r, step_val, max_eigenvalue, min_eigenvalue)
if itercounter == 1:
m_Q = m_chebyshev_basis
else:
#op_AQ_trans = linalg.aslinearoperator(m_AQ.transpose())
#AQ_trans_mul_chebyshev_basis = op_AQ_trans.matmat(m_chebyshev_basis)
#m_B = linalg.aslinearoperator(Q_trans_AQ_inverse).matmat (AQ_trans_mul_chebyshev_basis)
m_AQ_trans_mul_chebyshev_basis = np.matmul(m_AQ.T , m_chebyshev_basis)
#m_B = np.matmul(m_Q_trans_AQ_inverse , m_AQ_trans_mul_chebyshev_basis)
m_B = np.linalg.lstsq(m_Q_trans_AQ, m_AQ_trans_mul_chebyshev_basis)[0]
m_Q = m_chebyshev_basis - np.matmul(m_Q, m_B)
m_AQ = op_A.matmat(m_Q)
#m_Q_trans_AQ = linalg.aslinearoperator(m_Q.transpose())(m_AQ)
m_Q_trans_AQ = np.matmul(m_Q.T, m_AQ)
#m_Q_trans_AQ_inverse = np.linalg.inv(m_Q_trans_AQ)
#v_alpha = np.matmul( m_Q_trans_AQ_inverse, np.matmul(m_Q.T, v_r) )
v_alpha = np.linalg.lstsq(m_Q_trans_AQ, np.matmul(m_Q.T, v_r))[0]
v_x += np.matmul(m_Q,v_alpha)
v_r -= np.matmul(m_AQ, v_alpha)
residual_ratio_hist.append( np.linalg.norm(v_r)/s_normb)
print(itercounter, ": ", np.linalg.norm(v_r)/s_normb)
if residual_ratio_hist[-1] <= tol:
return v_x, v_r, residual_ratio_hist
return v_x, v_r, residual_ratio_hist
class BCBCG():
""" """
def __init__(self):
pass
def bcbcg_solver(self, mat, RHS, init_X, step_val, tol, maxiter,whichcol):
gerschgorin_estimator = GerschgorinCircleTheoremEigenvalueEstimator()
max_eigenvalue, min_eigenvalue = gerschgorin_estimator.csr_mat_extreme_eigenvalue_estimation(mat)
chebyshev_basis_generator = ChebyshevPolynomial()
op_A = linalg.aslinearoperator(mat)
m_R = RHS - op_A(init_X)
m_X = init_X.copy()
R_to_RHS_norm_ratio = lambda x: np.linalg.norm(m_R[:,x])/np.linalg.norm(RHS[:,x])
residual_ratio_hist = [R_to_RHS_norm_ratio(whichcol)]
for itercounter in range(1, maxiter+1):
m_chebyshev_basis = \
chebyshev_basis_generator.basis_generation_with_eigenvalue_shifting_and_scaling_block_vecs(\
mat, m_R, step_val, max_eigenvalue, min_eigenvalue)
#print("basis rank",np.linalg.matrix_rank(m_chebyshev_basis))
#return
if itercounter == 1:
m_Q = m_chebyshev_basis
else:
|
yuriyarhipov/FeatureRequestApp | models.py | Python | mit | 1,068 | 0 | from flask_sqlalchemy import SQLAlchemy
db = SQLAl | chemy()
class Client(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Client %r>' % self.name
class Area(db.Model):
i | d = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(50))
def __init__(self, name):
self.name = name
def __repr__(self):
return '<Area %r>' % self.name
class Feature(db.Model):
id = db.Column(db.Integer, primary_key=True)
description = db.Column(db.Text)
client_priority = db.Column(db.Integer)
target_date = db.Column(db.DateTime)
ticket_url = db.Column(db.Text)
client_id = db.Column(db.Integer, db.ForeignKey('client.id'))
client = db.relationship(
'Client',
backref=db.backref('clients', lazy='dynamic'))
area_id = db.Column(db.Integer, db.ForeignKey('area.id'))
area = db.relationship('Area', backref=db.backref('areas', lazy='dynamic'))
|
ShaguptaS/python | bigml/tests/create_batch_prediction_steps.py | Python | apache-2.0 | 8,511 | 0.004582 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
#
# Copyright 2012, 2015 BigML
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import time
import json
import requests
import csv
import traceback
from datetime import datetime, timedelta
from world import world, res_filename
from bigml.api import HTTP_CREATED
from bigml.api import FINISHED
from bigml.api import FAULTY
from bigml.api import get_status
from bigml.io import UnicodeReader
from read_batch_prediction_steps import (i_get_the_batch_prediction,
i_get_the_batch_centroid, i_get_the_batch_anomaly_score)
#@step(r'I create a batch prediction for the dataset with the model$')
def i_create_a_batch_prediction(step):
dataset = world.dataset.get('resource')
model = world.model.get('resource')
resource = world.api.create_batch_prediction(model, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.batch_prediction = resource['object']
world.batch_predictions.append(resource['resource'])
#@step(r'I create a batch prediction for the dataset with the ensemble$')
def i_create_a_batch_prediction_ensemble(step):
dataset = world.dataset.get('resource')
ensemble = world.ensemble.get('resource')
resource = world.api.create_batch_prediction(ensemble, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.location = resource['location']
world.batch_prediction = resource['object']
world.batch_predictions.append(resource['resource'])
#@step(r'I wait until the batch prediction status code is either (\d) or (-\d) less than (\d+)')
def wait_until_batch_prediction_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
i_get_the_batch_prediction(step, world.batch_prediction['resource'])
status = get_status(world.batch_prediction)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert datetime.utcnow() - start < timedelta(seconds=int(secs))
i_get_the_batch_prediction(step, world.batch_prediction['resource'])
status = get_status(world.batch_prediction)
assert status['code'] == int(code1)
#@step(r'I wait until the batch centroid status code is either (\d) or (-\d) less than (\d+)')
def wait_until_batch_centroid_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
i_get_the_batch_centroid(step, world.batch_centroid['resource'])
status = get_status(world.batch_centroid)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert datetime.utcnow() - start < timedelta(seconds=int(secs))
i_get_the_batch_centroid(step, world.batch_centroid['resource'])
status = get_status(world.batch_centroid)
assert status['code'] == int(code1)
#@step(r'I wait until the batch anomaly score status code is either (\d) or (-\d) less than (\d+)')
def wait_until_batch_anomaly_score_status_code_is(step, code1, code2, secs):
start = datetime.utcnow()
i_get_the_batch_anomaly_score(step, world.batch_anomaly_score['resource'])
status = get_status(world.batch_anomaly_score)
while (status['code'] != int(code1) and
status['code'] != int(code2)):
time.sleep(3)
assert datetime.utcnow() - start < timedelta(seconds=int(secs))
i_get_the_batch_anomaly_score(step, world.batch_anomaly_score['resource'])
status = get_status(world.batch_anomaly_score)
assert status['code'] == int(code1)
#@step(r'I wait until the batch prediction is ready less than (\d+)')
def the_batch_prediction_is_finished_in_less_than(step, secs):
wait_until_batch_prediction_status_code_is(step, FINISHED, FAULTY, secs)
#@step(r'I wait until the batch centroid is ready less than (\d+)')
def the_batch_centroid_is_finished_in_less_than(step, secs):
wait_until_batch_centroid_status_code_is(step, FINISHED, FAULTY, secs)
#@step(r'I wait until the batch anomaly score is ready less than (\d+)')
def the_batch_anomaly_score_is_finished_in_less_than(step, secs):
wait_until_batch_anomaly_score_status_code_is(step, FINISHED, FAULTY, secs)
#@step(r'I download the created predictions file to "(.*)"')
def i_download_predictions_file(step, filename):
file_object = world.api.download_batch_prediction(
world.batch_prediction, filename=res_filename(filename))
assert file_object is not None
world.output = file_object
#@step(r'I download the created centroid file to "(.*)"')
def i_download_centroid_file(step, filename):
file_object = world.api.download_batch_centroid(
world.batch_centroid, filename=res_filename(filename))
assert file_object is not None
world.output = file_object
#@step(r'I download the created anomaly score file to "(.*)"')
def i_download_anomaly_score_file(step, filename):
file_object = world.api.download_batch_anomaly_score(
world.batch_anomaly_score, filename=res_filename(filename))
assert file_object is not None
world.output = file_object
def check_rows(prediction_rows, test_rows):
for row in prediction_rows:
check_row = next(test_rows)
assert len(check_row) == len (row)
for index in range(len(row)):
dot = row[index].find(".")
if dot > 0:
try:
decs = min(len(row[index]), len(check_row[index])) - dot - 1
row[index] = round(float(row[index]), decs)
check_row[index] = round(float(check_row[index]), decs)
except ValueError:
pass
assert check_row[index] == row[index], ("Got: %s/ Expected: %s" % (row, check_row))
#@step(r'the batch prediction file is like "(.*)"')
def i_check_predictions(step, check_file):
with UnicodeReader(world.output) as prediction_rows:
with UnicodeReader(res_filename(check_file)) as test_rows:
check_rows(prediction_rows, test_rows)
#@step(r'the batch centroid file is like "(.*)"')
def i_check_batch_centroid(step, check_file):
i_check_predictions(step, check_file)
#@step(r'the batch anomaly score file is like "(.*)"')
def i_check_batch_anomaly_score(step, check_file):
i_check_predictions(step, check_file)
#@step(r'I check the batch centroid is ok')
def i_check_batch_centroid_is_ok(step):
assert world.api.ok(world.batch_centroid)
#@step(r'I check the batch anomaly score is ok')
def i_check_batch_anomaly_score_is_ok(step):
assert world.api.ok(world.batch_anomaly_score)
#@step(r'I create a batch centroid for the dataset$')
def i_create_a_batch_prediction_with_cluster(step):
dataset = world.dataset.get('resource')
cluster = world.cluster.get('resource')
resource = world.api.create_batch_centroid(cluster, dataset)
world.status = resource['code']
assert world.status = | = HTTP_CREATED
world.location = resource['location']
world.batch_centroid = resource['object']
world.batch_centroids.append(resource['resource'])
#@step(r'I create a batch anomaly score$')
def i_create_a_batch_prediction_with_anomaly(step):
dataset = world.dataset.get('resource')
anomaly = world.anomaly.get('resource')
resource = world.api.create_batch_anomaly_score(anomaly, dataset)
world.status = resource['code']
assert world.status == HTTP_CREATED
world.locati | on = resource['location']
world.batch_anomaly_score = resource['object']
world.batch_anomaly_scores.append(resource['resource'])
#@step(r'I create a source from the batch prediction$')
def i_create_a_source_from_batch_prediction(step):
ba |
AstroMatt/esa-subjective-time-perception | backend/api_v2/migrations/0009_trial_is_valid.py | Python | mit | 496 | 0.002016 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017- | 02-21 21:40
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api_v2', '0008_auto_20170101_0105'),
]
operations = [
migrations.AddField(
model_name='trial',
name='is_valid',
field=models.Nul | lBooleanField(db_index=True, default=None, verbose_name='Is Valid?'),
),
]
|
ajyoon/brown | examples/feldman_projections_2/score.py | Python | gpl-3.0 | 5,802 | 0.000862 | from typing import Union
from brown import constants
from brown.core import brown
from brown.core.music_font import MusicFont
from brown.core.object_group import ObjectGroup
from brown.core.path import Path
from brown.core.pen import Pen
from brown.core.pen_pattern import PenPattern
from brown.core.staff import Staff
from examples.feldman_projections_2.glyph_name import GlyphName
from examples.feldman_projections_2.grid_unit import GridUnit
from examples.feldman_projections_2.instrument_data import InstrumentData
from examples.feldman_projections_2.measure import Measure
from examples.feldman_projections_2.music_text_event import MusicTextEvent
from examples.feldman_projections_2.text_event import TextEvent
class Score(ObjectGroup):
_TEXT_FONT_SIZE = GridUnit(0.6).base_value
_MUSIC_FONT_SIZE = Staff._make_unit_class(GridUnit(0.5))
_bar_line_pen = Pen(thickness=GridUnit(0.05), pattern=PenPattern.DOT)
_instrument_divider_pen = Pen(thickness=GridUnit(0.05))
def __init__(self, pos, instruments, parent):
super().__init__(pos, parent)
self.events = []
self.text_font = brown.default_font.modified(
size=Score._TEXT_FONT_SIZE, weight=60
)
self.music_font = MusicFont(
constants.DEFAULT_MUSIC_FONT_NAME, Score._MUSIC_FONT_SIZE
)
self.instruments = instruments
for i, instrument in enumerate(instruments):
for event_data in instrument.event_data:
self.events.append(self._create_event(i, event_data))
self.draw_instrument_dividers()
self.draw_bar_lines()
def _create_event(self, instrument_index, event_data):
if isinstance(event_data.text, GlyphName):
return self._create_music_text_event(instrument_index, event_data)
return self._create_text_event(instrument_index, event_data)
def _create_text_event(self, instrument_index, event_data):
return TextEvent(
(
event_data.pos_x,
(Score._instrument_pos_y(instrument_index) + event_data.register.value),
),
self,
event_data.length,
event_data.text,
self.text_font,
)
def _create_music_text_event(self, instrument_index, event_data):
return MusicTextEvent(
(
event_data.pos_x,
(Score._instrument_pos_y(instrument_index) + event_data.register.value),
),
self,
event_data.length,
event_data.text,
self.music_font,
)
@property
def measure_count(self):
return (
max(
max(int(Measure(e.pos_x).display_value) for e in i.event_data)
for i in self.instruments
)
+ 1
)
@staticmethod
def _instrument_pos_y(instrument_index):
return GridUnit(3 * instrument_index)
@staticmethod
def _divider_pos_y(divider_index):
return GridUnit(3 * divider_index)
@staticmethod
def _divider_visible(
instrument_above: Union[InstrumentData, None],
instrument_below: Union[InstrumentData, None],
measure_num: int,
) -> bool:
return (
instrument_above is not None
and instrument_above.measure_has_events(measure_num)
) or (
instrument_below is not None
and instrument_below.measure_has_events(measure_num)
)
def _bar_line_extends_below(self, measure_num: int, divider_num: int) -> bool:
if divider_num >= len(self.instruments):
return False
instrument = self.instruments[divider_num]
return instrument.measure_has_events(
measure_num - 1
) or instrument.measure_has_events(measure_num)
def draw_instrument_dividers(self):
for divider in range(len(self.instruments) + 1):
current_path = Path(
(Measure(0), Score._divider_pos_y(divider)),
pen=Score._instrument_divider_pen,
parent=self,
)
instrument_above = self.instruments[divider - 1] if divider > 0 else None
instrument_below = (
self.instruments[divider] if divider < len(self.ins | truments) else None
)
drawing = False
for measure_num in range(self.measure_count + 1):
if Score._divider_visible(
instrument_above, instrument_below, measure_num
):
if not drawing:
current_path.move_to(Measure | (measure_num), GridUnit(0))
drawing = True
else:
if drawing:
current_path.line_to(Measure(measure_num), GridUnit(0))
drawing = False
def draw_bar_lines(self):
for measure_num in range(self.measure_count + 1):
current_path = Path(
(Measure(measure_num), GridUnit(0)),
pen=Score._bar_line_pen,
parent=self,
)
drawing = False
for divider_num in range(len(self.instruments) + 1):
if self._bar_line_extends_below(measure_num, divider_num):
if not drawing:
current_path.move_to(
GridUnit(0), Score._instrument_pos_y(divider_num)
)
drawing = True
else:
if drawing:
current_path.line_to(
GridUnit(0), Score._instrument_pos_y(divider_num)
)
drawing = False
|
tomvansteijn/xsb | xsboringen/scripts/xsb.py | Python | gpl-3.0 | 1,552 | 0.005799 | # -*- coding: utf-8 -*-
# Royal HaskoningDHV
from xsboringen.scripts.write_csv import write_csv
from xsboringen.scripts.write_shape import write_shape
from xsboringen.scripts.plot import plot_cross_section
import click
import yaml
from collections import ChainMap
import log | ging
import os
log = logging.getLogger(os.path.basename(__file__))
@click.command()
@click.argument('function',
type=click.Choice(['write_csv', 'write_shape', 'plot']),
)
@click.argument('inputfile',
)
@click.option('--logging', 'level',
type=click.Choice(['warning', 'info', 'debug']),
default='info',
help='log messages level'
)
def main(function, inputfile, level):
'''plot geological cross-sections'''
logging.basicConfig(level=level.upper())
| # function arguments from input file
with open(inputfile) as y:
kwargs = yaml.load(y)
# read default config
scripts_folder = os.path.dirname(os.path.realpath(__file__))
defaultconfigfile = os.path.join(os.path.dirname(scripts_folder),
'defaultconfig.yaml')
with open(defaultconfigfile) as y:
defaultconfig = yaml.load(y)
# get user config from input file
userconfig = kwargs.get('config') or {}
# chain config
kwargs['config'] = ChainMap(userconfig, defaultconfig)
# dispatch function
if function == 'write_csv':
write_csv(**kwargs)
elif function == 'write_shape':
write_shape(**kwargs)
elif function == 'plot':
plot_cross_section(**kwargs)
if __name__ == '__main__':
main()
|
keedio/sahara | sahara/service/edp/spark/engine.py | Python | apache-2.0 | 8,343 | 0 | # Copyright (c) 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from sahara import conductor as c
from sahara import context
from sahara import exceptions as e
from sahara.i18n import _
from sahara.plugins.general import utils as plugin_utils
from sahara.plugins.spark import config_helper as c_helper
from sahara.service.edp import base_engine
from sahara.service.edp import job_utils
from sahara.service.validations.edp import job_execution as j
from sahara.utils import edp
from sahara.utils import files
from sahara.utils import general
from sahara.utils import remote
conductor = c.API
class SparkJobEngine(base_engine.JobEngine):
def __init__(self, cluster):
self.cluster = cluster
def _get_pid_and_inst_id(self, job_id):
try:
pid, inst_id = job_id.split("@", 1)
if pid and inst_id:
return (pid, inst_id)
except Exception:
pass
return "", ""
def _get_instance_if_running(self, job_execution):
pid, inst_id = self._get_pid_and_inst_id(job_execution.oozie_job_id)
if not pid or not inst_id or (
job_execution.info['status'] in edp.JOB_STATUSES_TERMINATED):
return None, None
# TODO(tmckay): well, if there is a list index out of range
# error here it probably means that the instance is gone. If we
# have a job execution that is not terminated, and the instance
# is gone, we should probably change the status somehow.
# For now, do nothing.
try:
instance = general.get_instances(self.cluster, [inst_id])[0]
except Exception:
instance = None
return pid, instance
def _get_result_file(self, r, job_execution):
result = os.path.join(job_execution.extra['spark-path'], "result")
return r.execute_command("cat %s" % result,
raise_when_error=False)
def _check_pid(self, r, pid):
ret, stdout = r.execute_command("ps hp %s" % pid,
raise_when_error=False)
return ret
def _get_job_status_from_remote(self, r, pid, job_execution):
# If the pid is there, it's still running
if self._check_pid(r, pid) == 0:
return {"status": edp.JOB_STATUS_RUNNING}
# The process ended. Look in the result file to get the exit status
ret, stdout = self._get_result_file(r, job_execution)
if ret == 0:
exit_status = stdout.strip()
if exit_status == "0":
return {"status": edp.JOB_STATUS_SUCCEEDED}
# SIGINT will yield either -2 or 130
elif exit_status in ["-2", "130"]:
return {"status": edp.JOB_STATUS_KILLED}
# Well, process is done and result is missing or unexpected
return {"status": edp.JOB_STATUS_DONEWITHERROR}
def cancel_job(self, job_execution):
pid, instance = self._get_instance_if_running(job_execution)
if instance is not None:
with remote.get_remote(instance) as r:
ret, stdout = r.execute_command("kill -SIGINT %s" % pid,
| raise_when_error=False)
if ret == 0:
# We had some effect, check the status
return self._get_job_status_from_remote(r,
pid, job_execution)
def get_job_status(self, job | _execution):
pid, instance = self._get_instance_if_running(job_execution)
if instance is not None:
with remote.get_remote(instance) as r:
return self._get_job_status_from_remote(r, pid, job_execution)
def _job_script(self):
path = "service/edp/resources/launch_command.py"
return files.get_file_text(path)
def run_job(self, job_execution):
ctx = context.ctx()
job = conductor.job_get(ctx, job_execution.job_id)
proxy_configs = job_execution.job_configs.get('proxy_configs')
# We'll always run the driver program on the master
master = plugin_utils.get_instance(self.cluster, "master")
# TODO(tmckay): wf_dir should probably be configurable.
# The only requirement is that the dir is writable by the image user
wf_dir = job_utils.create_workflow_dir(master, '/tmp/spark-edp', job,
job_execution.id)
paths = job_utils.upload_job_files(master, wf_dir, job,
libs_subdir=False,
proxy_configs=proxy_configs)
# We can shorten the paths in this case since we'll run out of wf_dir
paths = [os.path.basename(p) for p in paths]
# TODO(tmckay): for now, paths[0] is always assumed to be the app
# jar and we generate paths in order (mains, then libs).
# When we have a Spark job type, we can require a "main" and set
# the app jar explicitly to be "main"
app_jar = paths.pop(0)
# The rest of the paths will be passed with --jars
additional_jars = ",".join(paths)
if additional_jars:
additional_jars = "--jars " + additional_jars
# Launch the spark job using spark-submit and deploy_mode = client
host = master.hostname()
port = c_helper.get_config_value("Spark", "Master port", self.cluster)
spark_submit = os.path.join(
c_helper.get_config_value("Spark",
"Spark home",
self.cluster),
"bin/spark-submit")
job_class = job_execution.job_configs.configs["edp.java.main_class"]
# TODO(tmckay): we need to clean up wf_dirs on long running clusters
# TODO(tmckay): probably allow for general options to spark-submit
args = " ".join(job_execution.job_configs.get('args', []))
# The redirects of stdout and stderr will preserve output in the wf_dir
cmd = "%s %s --class %s %s --master spark://%s:%s %s" % (
spark_submit,
app_jar,
job_class,
additional_jars,
host,
port,
args)
# If an exception is raised here, the job_manager will mark
# the job failed and log the exception
with remote.get_remote(master) as r:
# Upload the command launch script
launch = os.path.join(wf_dir, "launch_command")
r.write_file_to(launch, self._job_script())
r.execute_command("chmod +x %s" % launch)
ret, stdout = r.execute_command(
"cd %s; ./launch_command %s > /dev/null 2>&1 & echo $!"
% (wf_dir, cmd))
if ret == 0:
# Success, we'll add the wf_dir in job_execution.extra and store
# pid@instance_id as the job id
# We know the job is running so return "RUNNING"
return (stdout.strip() + "@" + master.id,
edp.JOB_STATUS_RUNNING,
{'spark-path': wf_dir})
# Hmm, no execption but something failed.
# Since we're using backgrounding with redirect, this is unlikely.
raise e.EDPError(_("Spark job execution failed. Exit status = "
"%(status)s, stdout = %(stdout)s") %
{'status': ret, 'stdout': stdout})
def validate_job_execution(self, cluster, job, data):
j.check_main_class_present(data, job)
@staticmethod
def get_possible_job_conf |
frascoweb/frasco-upload | frasco_upload/backends.py | Python | mit | 1,658 | 0.000603 | from frasco import current_app, url_for
from flask import safe_join
import os
upload_backends = {}
def file_upload_backend(cls):
upload_backends[cls.name] = cls
return cls
class StorageBackend(object):
def __init__(self, options):
self.options = options
def save(self, file, filename):
raise NotImplementedError
def | url_for(self, filename, **kwargs):
raise NotImplementedError
def delete(self, filename):
raise NotImplementedError
@file_upload_backend
class LocalStorageBackend(StorageBackend):
name = 'local'
def save(self, file, filename):
filename = safe_join(self.options["upload_dir"], filename)
if not os.path.isabs(filename):
filename = os.path.join(current_app. | root_path, filename)
dirname = os.path.dirname(filename)
if not os.path.exists(dirname):
os.makedirs(dirname)
file.save(filename)
def url_for(self, filename, **kwargs):
return url_for("static_upload", filename=filename, **kwargs)
def delete(self, filename):
filename = safe_join(self.options["upload_dir"], filename)
if not os.path.isabs(filename):
filename = os.path.join(current_app.root_path, filename)
if os.path.exists(filename):
os.unlink(filename)
@file_upload_backend
class HttpStorageBackend(StorageBackend):
name = 'http'
def url_for(self, filename, **kwargs):
return 'http://' + filename
@file_upload_backend
class HttpsStorageBackend(StorageBackend):
name = 'https'
def url_for(self, filename, **kwargs):
return 'https://' + filename |
telefonicaid/murano | murano/tests/functional/engine/base.py | Python | apache-2.0 | 12,267 | 0 | # Copyright (c) 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import socket
import time
import uuid
import testresources
import testtools
from heatclient import client as heatclient
from keystoneclient.v2_0 import client as ksclient
from muranoclient import client as mclient
import muranoclient.common.exceptions as exceptions
import murano.tests.functional.engine.config as cfg
CONF = cfg.cfg.CONF
class MuranoBase(testtools.TestCase, testtools.testcase.WithAttributes,
testresources.ResourcedTestCase):
@classmethod
def setUpClass(cls):
super(MuranoBase, cls).setUpClass()
cfg.load_config()
keystone_client = ksclient.Client(username=CONF.murano.user,
password=CONF.murano.password,
tenant_name=CONF.murano.tenant,
auth_url=CONF.murano.auth_url)
heat_url = keystone_client.service_catalog.url_for(
service_type='orchestration', endpoint_type='publicURL')
cls.heat_client = heatclient.Client('1', endpoint=heat_url,
token=keystone_client.auth_token)
url = CONF.murano.murano_url
murano_url = url if 'v1' not in url else "/".join(
url.split('/')[:url.split('/').index('v1')])
cls.muranoclient = mclient.Client('1',
endpoint=murano_url,
token=keystone_client.auth_token)
cls.linux = CONF.murano.linux_image
cls.pkgs_path = os.path.abspath(os.path.join(
os.path.dirname(__file__),
os.path.pardir,
'murano-app-incubator'
))
def upload_package(package_name, body, app):
files = {'%s' % package_name: open(app, 'rb')}
return cls.muranoclient.packages.create(body, files)
upload_package(
'PostgreSQL',
{"categories": ["Databases"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.databases.PostgreSql.zip')
)
upload_package(
'SqlDatabase',
{"categories": ["Databases"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.databases.SqlDatabase.zip')
)
upload_package(
'Apache',
{"categories": ["Application Servers"], "tags": ["tag"]},
os.path.join(cls.pkgs_path,
'io.murano.apps.apache.ApacheHttpServer.zip')
)
upload_package(
'Tomcat',
{"categories": ["Application Servers"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.apps.apache.Tomcat.zip')
)
upload_package(
'Telnet',
{"categories": ["Web"], "tags": ["tag"]},
os.path.join(cls.pkgs_path, 'io.murano.apps.linux.Telnet.zip')
)
def setUp(self):
super(MuranoBase, self).setUp()
self.environments = []
def tearDown(self):
super(MuranoBase, self).tearDown()
for env in self.environments:
try:
self.environment_delete(env)
except Exception:
pass
def environment_delete(self, environment_id, timeout=180):
self.muranoclient.environments.delete(environment_id)
start_time = time.time()
while time.time() - start_time < timeout:
try:
self.muranoclient.environments.get(environment_id)
except exceptions.HTTPNotFound:
return
raise Exception(
'Environment {0} was not deleted in {1} seconds'.format(
environment_id, timeout))
def wait_for_environment_deploy(self, environment):
start_time = time.time()
while environment.manager.get(environment.id).status != 'ready':
if time.time() - start_time > 1200:
self.fail(
'Environment deployment is not finished in 1200 seconds')
time.sleep(5)
return environment.manager.get(environment.id)
def check_port_access(self, ip, port):
result = 1
start_time = time.time()
while time.time() - start_time < 300:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
result = sock.connect_ex((str(ip), port))
sock.close()
if result == 0:
break
time.sleep(5)
self.assertEqual(0, result, '%s port is closed on instance' % port)
def deployment_success_check(self, environment, port):
deployment = self.muranoclient.deployments.list(environment.id)[-1]
self.assertEqual('success', deployment.state,
'Deployment status is {0}'.format(deployment.state))
ip = environment.services[-1]['instance']['floatingIpAddress']
if ip:
self.check_port_access(ip, port)
else:
self.fail('Instance does not have floating IP')
def test_deploy_telnet(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.linux.Telnet",
"id": str(uuid.uuid4())
}
}
environment_name = 'Telnetenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(environment_name, post_body)
self.deployment_success_check(env, 23)
def test_deploy_apache(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": "testMurano"
},
"name": "teMurano",
"?": {
"type": "io.murano.apps.apache.ApacheHttpServer",
"id": str(uuid.uuid4())
}
}
environment_name = 'Apacheenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(e | nvironment_name, post_body)
self.deployment_success_check(env, 80)
def test_deploy_postgresql(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
"assignFloatingIp": True,
"?": {
"type": "io.murano.resources.LinuxMuranoInstance",
"id": str(uuid.uuid4())
},
"name": | "testMurano"
},
"name": "teMurano",
"database": "test_db",
"username": "test_usr",
"password": "test_pass",
"?": {
"type": "io.murano.databases.PostgreSql",
"id": str(uuid.uuid4())
}
}
environment_name = 'Postgreenv' + uuid.uuid4().hex[:5]
env = self._quick_deploy(environment_name, post_body)
self.deployment_success_check(env, 5432)
def test_deploy_tomcat(self):
post_body = {
"instance": {
"flavor": "m1.medium",
"image": self.linux,
|
HaroldMills/Vesper | vesper/util/tests/test_yaml_utils.py | Python | mit | 1,582 | 0.008217 | from vesper.tests.test_case import TestCase
import vesper.util.yaml_utils as yaml_utils
class YamlUtilsTests(TestCase):
def test_dump_and_load(self):
x = {'x': 1, 'y': [1, 2, 3], 'z': {'one': 1}}
s = yaml_utils.dump(x)
y = yaml_utils.load(s)
self.assertEqual(x, y)
def test_dump_and_load_with_non_default_flow_style(self):
x = {'x': 1, 'y': [1, 2, 3], 'z': {'one': 1}}
s = yaml_utils.dump(x, default_flow_style=False)
y = yaml_utils.load(s)
self.assertEqual(x, y)
def test_sexagesimal_load(self):
"""
The PyYAML `load` function parses YAML 1.1, in which "12:34:56"
is the sexagesimal number 12 * 3600 + 34 * 60 + 56 = 45296. We
use `ruaml_yaml` rather than PyYAML because it can also parse
YAML 1.2, in which "12:34:56" is simply the string "12:34:56".
This test checks that `yaml_utils.load` parses its input as we
would like.
"" | "
x = yaml_utils.load('12:34:56')
self.assertEqual(x, '12:34:56')
# def test_numpy_scalar_dump(self):
#
# """
# This test shows that you can't dump a NumPy scalar, since the
# dumper doesn't know how to represent its type. Perhaps we could
# tell it how to, or perhaps that would be more trouble than it's
# worth.
# """
#
# imp | ort numpy as np
# x = np.arange(3)
# s = yaml_utils.dump(x[1])
# self.assertEqual(s, '1')
|
diogocs1/comps | web/addons/account_budget/wizard/account_budget_crossovered_summary_report.py | Python | apache-2.0 | 2,191 | 0.000913 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with th | is program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
class | account_budget_crossvered_summary_report(osv.osv_memory):
"""
This wizard provides the crossovered budget summary report'
"""
_name = 'account.budget.crossvered.summary.report'
_description = 'Account Budget crossvered summary report'
_columns = {
'date_from': fields.date('Start of period', required=True),
'date_to': fields.date('End of period', required=True),
}
_defaults = {
'date_from': lambda *a: time.strftime('%Y-01-01'),
'date_to': lambda *a: time.strftime('%Y-%m-%d'),
}
def check_report(self, cr, uid, ids, context=None):
if context is None:
context = {}
data = self.read(cr, uid, ids, context=context)[0]
datas = {
'ids': context.get('active_ids',[]),
'model': 'crossovered.budget',
'form': data
}
datas['form']['ids'] = datas['ids']
datas['form']['report'] = 'analytic-one'
return self.pool['report'].get_action(cr, uid, [], 'account_budget.report_crossoveredbudget', data=datas, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
obeattie/sqlalchemy | test/base/test_except.py | Python | mit | 5,046 | 0.014665 | """Tests exceptions and DB-API exception wrapping."""
from sqlalchemy import exc as sa_exceptions
from sqlalchemy.test import TestBase
# Py3K
#StandardError = BaseException
# Py2K
from exceptions import StandardError, KeyboardInterrupt, SystemExit
# end Py2K
class Error(StandardError):
"""This class will be old-style on <= 2.4 and new-style on >= 2.5."""
class DatabaseError(Error):
pass
class OperationalError(DatabaseError):
pass
class ProgrammingError(DatabaseError):
def __str__(self):
return "<%s>" % self.bogus
class OutOfSpec(DatabaseError):
pass
class WrapTest(TestBase):
def test_db_error_normal(self):
try:
raise sa_exceptions.DBAPIError.instance(
'', [], OperationalError())
except sa_exceptions.DBAPIError:
self.assert_(True)
def test_tostring(self):
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', None, OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc) == "(OperationalError) 'this is a message' None"
def test_tostring_large_dict(self):
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', {'a':1, 'b':2, 'c':3, 'd':4, 'e':5, 'f':6, 'g':7, 'h':8, 'i':9, 'j':10, 'k':11}, OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc).startswith("(OperationalError) 'this is a message' {")
def test_tostring_large_list(self):
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11], OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc).startswith("(OperationalError) 'this is a message' [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]")
def test_tostring_large_executemany(self):
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', [{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},], OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc) == "(OperationalError) 'this is a message' [{1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}, {1: 1}]", str(exc)
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', [{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},{1:1},], OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc) == "(OperationalError) 'this is a message' [{1: 1}, {1: 1}] ... and a total of 11 bound parameter sets"
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', [(1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,)], OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc) == "(OperationalError) 'this is a message' [(1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,)]"
try:
raise sa_exceptions.DBAPIError.instance(
'this is a message', [(1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), (1,), ], OperationalError())
except sa_exceptions.DBAPIError, exc:
assert str(exc) == "(OperationalError) 'this is a message' [(1,), (1,)] ... and a total of 11 bound parameter sets"
def test_db_error_busted_dbapi(self):
try:
raise sa_exceptions.DBAPIError.instance(
'', [], ProgrammingError())
except sa_exception | s.DBAPIError, e:
self.assert_(True)
self.assert_('Error in str() of DB-API' in e.args[0])
def test_db_error_noncompliant_dbapi(self):
try:
| raise sa_exceptions.DBAPIError.instance(
'', [], OutOfSpec())
except sa_exceptions.DBAPIError, e:
self.assert_(e.__class__ is sa_exceptions.DBAPIError)
except OutOfSpec:
self.assert_(False)
# Make sure the DatabaseError recognition logic is limited to
# subclasses of sqlalchemy.exceptions.DBAPIError
try:
raise sa_exceptions.DBAPIError.instance(
'', [], sa_exceptions.ArgumentError())
except sa_exceptions.DBAPIError, e:
self.assert_(e.__class__ is sa_exceptions.DBAPIError)
except sa_exceptions.ArgumentError:
self.assert_(False)
def test_db_error_keyboard_interrupt(self):
try:
raise sa_exceptions.DBAPIError.instance(
'', [], KeyboardInterrupt())
except sa_exceptions.DBAPIError:
self.assert_(False)
except KeyboardInterrupt:
self.assert_(True)
def test_db_error_system_exit(self):
try:
raise sa_exceptions.DBAPIError.instance(
'', [], SystemExit())
except sa_exceptions.DBAPIError:
self.assert_(False)
except SystemExit:
self.assert_(True)
|
gboone/wedding.harmsboone.org | posts/models.py | Python | mit | 597 | 0.025126 | from django.db import models
from django.core.urlresolvers import reverse
class Post(models.Model):
title = models.CharField(max_length=255)
slug = models.SlugField(unique=True, max_length=255)
descript | ion = models.CharField(max_length=255)
content = models.TextField()
published = models.BooleanField(default=True)
created = models.DateTimeField(auto_now_add=True)
content_id = models.CharField(max_length=64)
class Meta:
ordering = ['-created']
|
def __unicode__(self):
return u'%s' % self.title
def get_absolute_url(self):
return reverse('mysite.views.post', args=[self.slug]) |
FedoraScientific/salome-hexablock | src/TEST_PY/test_v6/monica.py | Python | lgpl-2.1 | 5,353 | 0.034 | # !/bin/python
# -*- coding: latin-1 -*-
# Copyright (C) 2009-2014 CEA/DEN, EDF R&D
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#
# See http://www.salome-platform.org/ or email : webmaster.salome@opencascade.com
#
# Hexa : Creation d'hexaedres
import hexablock
import os
#---+----1----+----2----+----3----+----4----+----5----+----6----+----7----+----8
doc = hexablock.addDocument ("default")
vx = doc.addVector (1,0,0)
vy = doc.addVector (0,1,0)
vz = doc.addVector (0,0,1)
vxy = doc.addVector (1,1,0)
nbr_files = 0
# ======================================================= save_vtk
def save_vtk () :
global nbr_files
nom = "monica%d.vtk" % nbr_files
nbr_files += 1
doc.saveVtk (nom)
# ======================================================= carre
def carre (x) :
return x*x
# ======================================================= get_center
def get_center (quad) :
px = 0
py = 0
pz = 0
for nv in range (4) :
vertex = quad.getVertex (nv)
px += vertex.getX() / 4
py += vertex.getY() / 4
pz += vertex.getZ() / 4
return [ px, py, pz ]
# ======================================================= nearest
def nearest (grid, vertex) :
nbre = grid.countVertex()
dmin = 1e+6
result = None
px = vertex.getX()
py = vertex.getY()
pz = vertex.getZ()
for nro in range (nbre) :
v1 = grid.getVertex (nro)
d2 = carre(px-v1.getX()) + carre(py-v1.getY()) + carre(pz-v1.getZ())
if (d2 < dmin) :
result = v1
dmin = d2
print vertex.getName () , px, py, pz, " -> ", result.getName()
return result
# ======================================================= nearest_quad
def nearest_quad (grid, quad) :
dmin = 1e+16
result = None
[ox, oy, oz] = get_center (quad)
nbre = grid.countQuad ()
for nro in range (nbre) :
q1 = grid.getQuad (nro)
if q1 != None :
[px, py, pz] = get_center (q1)
d2 = carre(px-ox) + carre(py-oy) + carre(pz-oz)
if (d2 < dmin) :
result = q1
dmin = d2
print quad.getName () , px, py, pz, " -> ", result.getName()
return result
# ======================================================= insert_cylinder
def insert_cylinder (plaque, nx, ny) :
hexa = plaque.getHexaIJK (nx, ny, 0)
xmin = 666 ; ymin = xmin ; zmin = xmin
xmax = -666 ; ymax = xmax ; zmax = xmax
tabv1 = []
for nv in range (8) :
node = hexa.getVertex (nv)
xmin = min (xmin, node.getX()) ; xmax = max (xmax, node.getX())
ymin = min (ymin, node.getY()) ; ymax = max (ymax, node.getY())
zmin = min (zmin, node.getZ()) ; zmax = max (zmax, node.getZ())
tabv1.append (node)
doc.removeHexa (hexa)
save_vtk ()
dx = (xmax - xmin)/2
dz = (zmax - zmin)/2
xorig = (xmin + xmax)/2
yorig = (ymin + ymax)/2
zorig = (zmin + zmax)/2 - 3*dz
orig = doc.addVertex (xorig, yorig, zorig)
nr = 1
na = 4
nh = 3
rext = dx
rint = rext/3
haut = 3
angle = 360
pipe = doc.makePipeUni (orig, vxy,vz, rint,rext,angle,haut, nr,na,nh)
hexablock.what ()
tabquad = []
tabv0 = []
for nq in range (4) :
quad = pipe.getQuadJK (1, nq, 1)
tabquad.append (quad)
print " .. tabquad[0] = ", tabquad[0].getName ()
cible = nearest_quad (plaque, tabquad[0 | ])
tabquad[0]. setColor (5)
cible . setColor (5)
save_vtk ()
va1 = tabquad[0].getVertex (0)
va2 = tabquad[0].getVertex (1)
vb1 = cible.nearestVertex (va1)
vb2 = cible.nearestVertex (va2)
doc.setLevel (1)
doc.joinQuadsUni (tabquad, cible, va1, vb1, va2, vb2, 1)
hexabloc | k.what ()
save_vtk ()
return
doc.setLevel (1)
for nv in range (8) :
ier = doc.mergeVertices (tabv0[nv], tabv1[nv])
print "ier = ", ier
save_vtk ()
# ======================================================= test_monica
def test_monica () :
orig = doc.addVertex (0,0,0)
lx = 1
ly = lx
lz = lx
nx = 3
ny = nx
nz = 1
plaque = doc.makeCartesianUni (orig, vx,vy,vz, lx, ly, lz, nx,ny,nz)
save_vtk ()
insert_cylinder (plaque, 1, 1)
## hexa = plaque.getHexaIJK (1,1,0)
## doc.removeHexa (hexa)
return doc
# ================================================================= Begin
doc = test_monica ()
law = doc.addLaw("Uniform", 4)
for j in range(doc.countPropagation()):
propa = doc.getPropagation(j)
propa.setLaw(law)
mesh_hexas = hexablock.mesh (doc)
|
ActiveState/code | recipes/Python/577863_Context_manager_prevent_calling_code_catching/recipe-577863.py | Python | mit | 195 | 0.010256 | from | contextlib impor | t contextmanager
@contextmanager
def failnow():
try:
yield
except Exception:
import sys
sys.excepthook(*sys.exc_info())
sys.exit(1)
|
ianstalk/Flexget | flexget/components/sites/sites/newtorrents.py | Python | mit | 5,069 | 0.001381 | import re
from urllib.parse import quote
from loguru import logger
from flexget import plugin
from flexget.components.sites.urlrewriting import UrlRewritingError
from flexget.components.sites.utils import normalize_unicode, torrent_availability
from flexget.entry import Entry
from flexget.event import event
from flexget.utils import requests
from flexget.utils.soup import get_soup
logger = logger.bind(name='newtorrents')
class NewTorrents:
"""NewTorrents urlrewriter and search plugin."""
def __init__(self):
self.resolved = []
# UrlRewriter plugin API
def url_rewritable(self, task, entry):
# Return true only for urls that can and should be resolved
if entry['url'].startswith('http://www.newtorrents.info/down.php?'):
return False
return (
entry['url'].startswith('http://www.newtorrents.info')
and not entry['url'] in self.resolved
)
# UrlRewriter plugin API
def url_rewrite(self, task, entry):
url = entry['url']
if url.startswith('http://www.newtorrents.info/?q=') or url.startswith(
'http://www.newtorrents.info/search'
):
results = self.entries_from_search(entry['title'], url=url)
if not results:
raise UrlRewritingError("No matches for %s" % entry['title'])
url = results[0]['url']
else:
url = self.url_from_page(url)
if url:
entry['url'] = url
self.resolved.append(url)
else:
raise UrlRewritingError('Bug in newtorrents urlrewriter')
# Search plugin API
def search(self, task, entry, config=None):
entries = set()
for search_string in entry.get('search_string', [entry['title']]):
entries.update(self.entries_from_search(search_string))
return entries
@plugin.internet(logger)
def url_from_page(self, url):
"""Parses torrent url from newtorrents download page"""
try:
page = requests.get(url)
data = page.text
except requests.RequestException:
raise UrlRewritingError('URLerror when retrieving page')
p = re.compile(r"copy\(\'(.*)\'\)", re.IGNORECASE)
f = p.search(data)
if not f:
# the link in which plugin relies is missing!
raise UrlRewritingError(
'Failed to get url from download page. Plugin may need a update.'
)
else:
return f.group(1)
@plugin.internet(logger)
def entries_from_search(self, name, url=None):
"""Parses torrent download url from search results"""
name = normalize_unicode(name)
if not url:
url = 'http://www.newtorrents.info/search/%s' % quote(
name.encode('utf-8'), safe=b':/~?=&%'
)
logger.debug('search url: {}', url)
html = requests.get(url).text
# fix </SCR'+'IPT> so that BS does not crash
# TODO: should use beautifulsoup massage
html = re.sub(r'(</SCR.*?)...(.*?IPT>)', r'\1\2', html)
soup = get_soup(html)
# saving torrents in dict
torrents = []
for link in soup.find_all('a', attrs={'href': re.compile('down.php')}):
torrent_url = 'http://www.newtorrents.info%s' % link.get('href')
release_name = link.parent.next.get('title')
# quick dirty hack
seed = link.find_next('td', attrs={'class': re.compile('s')}).renderContents()
if seed == 'n/a':
seed = 0
else:
try:
seed = int(seed)
except ValueError:
logger.warning(
'Error converting seed value ({}) from newtorrents to integer.', seed
)
seed = 0
# TODO: also parse content_size and peers from results
torrents.append(
Entry(
title=release_name,
url=torrent_url,
torrent_seeds=seed,
torrent_availability=torrent_availability(seed, 0),
)
)
# sort with seed number Reverse order
torrents.sort(reverse=True, key=lambda x: x.get('torrent_availability', 0))
# choose the torrent
if not torrents:
dashindex = name.rfind('-')
if dashindex != -1:
return self.entries_from_search(name[:dashindex])
else:
return torrents
else:
if len(torrents) == 1:
logger.debug('found only one matching search result.')
else:
logger.debug(
'search result contains multiple matches, sorted {} by most seeders', torrents
)
return torrents
@event('plugin.register')
def register_plugin():
plugin.register(NewTorrents, 'newtorrents', interfaces= | ['urlrewriter', 'search'], | api_ver=2)
|
isb-cgc/ISB-CGC-Webapp | bq_data_access/v2/seqpeek/seqpeek_view.py | Python | apache-2.0 | 7,709 | 0.001686 | #
# Copyright 2015-2019, Institute for Systems Biology
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from builtins import map
from builtins import str
from builtins import object
from copy import deepcopy
import logging
from bq_data_access.v2.seqpeek.seqpeek_interpro import InterProDataProvider
logger = logging.getLogger('main_logger')
SAMPLE_ID_FIELD_NAME = 'sample_id'
TRACK_ID_FIELD = "tumor"
COORDINATE_FIELD_NAME = 'uniprot_aapos'
PROTEIN_ID_FIELD = 'ensg_id'
PROTEIN_DOMAIN_DB = 'PFAM'
SEQPEEK_VIEW_DEBUG_MODE = False
def get_number_of_unique_samples(track):
sample_ids = set()
for mutation in track['mutations']:
sample_ids.add(mutation[SAMPLE_ID_FIELD_NAME])
return len(sample_ids)
def get_number_of_mutated_positions(track):
sample_locations = set()
for mutation in track['mutations']:
sample_locations.add(mutation[COORDINATE_FIELD_NAME])
return len(sample_locations)
# TODO remove if not needed
def clean_track_mutations(mutations_array):
retval = []
for mutation in mutations_array:
cleaned = deepcopy(mutation)
cleaned[COORDINATE_FIELD_NAME] = int(mutation[COORDINATE_FIELD_NAME])
retval.append(cleaned)
return retval
def sort_track_mutations(mutations_array):
return sorted(mutations_array, key=lambda k: k[COORDINATE_FIELD_NAME])
def get_track_statistics_by_track_type(track, cohort_info_map):
track_id = track[TRACK_ID_FIELD]
result = {
'samples': {
'numberOf': get_number_of_unique_samples(track),
'mutated_positions': get_number_of_mutated_positions(track)
}
}
if track['type'] == 'tumor':
cohort_info = cohort_info_map[track_id]
result['cohort_size'] = cohort_info['size']
else:
# Do not assign cohort size for the 'COMBINED' track.
result['cohort_size'] = None
return result
def filter_protein_domains(match_array):
return [m for m in match_array if m['dbname'] == PROTEIN_DOMAIN_DB]
def get_table_row_id(tumor_type):
return "seqpeek_row_{0}".format(tumor_type)
def build_seqpeek_regions(protein_data):
return [{
'type': 'exon',
'start': 0,
'end': protein_data['length']
}]
def build_summary_track(tracks):
all = []
for track in tracks:
all.extend(track["mutations"])
return {
'mutations': all,
'label': 'COMBINED',
'tumor': 'none-combined',
'type': 'summary'
}
def get_track_label_and_cohort_information(track_id_value, cohort_info_map):
cohort_info = cohort_info_map[track_id_value]
label = cohort_info['name']
cohort_size = cohort_info['size']
return label, cohort_size
def get_track_label(track, cohort_info_array):
# The IDs in cohort_info_array are integers, whereas the track IDs are strings.
cohort_map = {str(item['id']): item['name'] for item in cohort_info_array}
return cohort_map[track[TRACK_ID_FIELD]]
def get_protein_domains(uniprot_id):
protein = InterProDataProvider().get_data(uniprot_id)
return protein
class MAFData(object):
def __init__(self, cohort_info, data):
self.cohort_info = cohort_info
self.data = data
@classmethod
def from_dict(cls, param):
return cls(param['cohort_set'], param['items'])
def build_track_data(track_id_list, all_tumor_mutations):
tracks = []
for track_id in track_id_list:
tracks.append({
TRACK_ID_FIELD: track_id,
'mutations': [m for m in all_tumor_mutations if int(track_id) in set(m['cohort'])]
})
return tracks
def find_uniprot_id(mutations):
uniprot_id = None
for m in mutations:
if PROTEIN_ID_FIELD in m:
uniprot_id = m[PROTEIN_ID_FIELD]
break
return uniprot_id
def get_genes_tumors_lists_debug():
return {
'symbol_list': ['EGFR', 'TP53', 'PTEN'],
'disease_codes': ['ACC', 'BRCA', 'GBM']
}
def get_genes_tumors_lists_remote():
context = {
'symbol_list': [],
'track_id_list': []
}
return context
def get_genes_tumors_lists():
if SEQPEEK_VIEW_DEBUG_MODE:
return get_genes_tumors_lists_debug()
else:
return get_genes_tumors_lists_remote()
def get_track_id_list(param):
return list(map(str, param))
def format_removed_row_statistics_to_list(stats_dict):
result = []
for key, value in list(stats_dict.items()):
result.append({
'name': key,
'num': value
})
return result
class SeqPeekViewDataBuilder(object):
def build_view_data(self, hugo_symbol, filtered_maf_vector, seqpeek_cohort_info, cohort_id_list, removed_row_statistics, tables_used):
context = get_genes_tumors_lists()
cohort_info_map = {str(item['id']): item for item in seqpeek_cohort_info}
track_id_list = get_track_id_list(cohort_id_list)
# Since the gene (hugo_symbol) parameter is part of the GNAB feature ID,
# it will be sanity-checked in the SeqPeekMAFDataAccess instance.
uniprot_id = find_uniprot_id(filtered_maf_vector)
logging.info("UniProt ID: " + str(uniprot_id))
protein_data = get_protein_domains(uniprot_id)
track_data = build_track_data(track_id_list, filtered_maf_vector)
plot_data = {
'gen | e_label': hugo_symbol,
'tracks': track_data,
| 'protein': protein_data
}
# Pre-processing
# - Sort mutations by chromosomal coordinate
for track in plot_data['tracks']:
track['mutations'] = sort_track_mutations(track['mutations'])
# Annotations
# - Add label, possibly human readable
# - Add type that indicates whether the track is driven by data from search or
# if the track is aggregate
for track in plot_data['tracks']:
track['type'] = 'tumor'
label, cohort_size = get_track_label_and_cohort_information(track[TRACK_ID_FIELD], cohort_info_map)
track['label'] = label
# Display the "combined" track only if more than one cohort is visualized
if len(cohort_id_list) >= 2:
plot_data['tracks'].append(build_summary_track(plot_data['tracks']))
for track in plot_data['tracks']:
# Calculate statistics
track['statistics'] = get_track_statistics_by_track_type(track, cohort_info_map)
# Unique ID for each row
track['render_info'] = {
'row_id': get_table_row_id(track[TRACK_ID_FIELD])
}
plot_data['regions'] = build_seqpeek_regions(plot_data['protein'])
plot_data['protein']['matches'] = filter_protein_domains(plot_data['protein']['matches'])
tumor_list = ','.join(track_id_list)
context.update({
'plot_data': plot_data,
'hugo_symbol': hugo_symbol,
'tumor_list': tumor_list,
'cohort_id_list': track_id_list,
'removed_row_statistics': format_removed_row_statistics_to_list(removed_row_statistics),
'bq_tables': list(set(tables_used))
})
return context
|
pallamidessi/mvrptw | gen/protobuf/vehicleType_pb2.py | Python | mit | 1,823 | 0.006583 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: vehicleType.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf.internal import enum_type_wrapper
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='vehicleType.proto',
package='',
serialized_pb=_b('\n\x11vehicleType.proto*;\n\rTypeOfVehicle\x12\x07\n\x03VSL\x10\x00\x12\x08\n\x04TPMR\x10\x01\x12\r\n\tAmbulance\x10\x02\x12\x08\n\x04Taxi\x10\x03')
)
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_TYPEOFVEHICLE = _descriptor.EnumDescriptor(
name='TypeOfVehicle',
full_name='TypeOfVehicle',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='VSL', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='TPMR', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='Ambulance', index=2, number=2,
options=None,
| type=None),
_descriptor.EnumValueDescriptor(
name='Taxi', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=21,
serialized_end=80,
)
_sym_db.RegisterEnumDescriptor(_TYPEOFVEHICLE)
TypeOfVehicle = enu | m_type_wrapper.EnumTypeWrapper(_TYPEOFVEHICLE)
VSL = 0
TPMR = 1
Ambulance = 2
Taxi = 3
DESCRIPTOR.enum_types_by_name['TypeOfVehicle'] = _TYPEOFVEHICLE
# @@protoc_insertion_point(module_scope)
|
mikesun/xen-cow-checkpointing | tools/python/scripts/xapi.py | Python | gpl-2.0 | 29,258 | 0.011484 | #!/usr/bin/python
#============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2006 XenSource Ltd.
#=============================== | =============================================
imp | ort sys
import time
import re
import os
sys.path.append('/usr/lib/python')
from xen.util.xmlrpclib2 import ServerProxy
from optparse import *
from pprint import pprint
from types import DictType
from getpass import getpass
# Get default values from the environment
SERVER_URI = os.environ.get('XAPI_SERVER_URI', 'http://localhost:9363/')
SERVER_USER = os.environ.get('XAPI_SERVER_USER', '')
SERVER_PASS = os.environ.get('XAPI_SERVER_PASS', '')
MB = 1024 * 1024
HOST_INFO_FORMAT = '%-20s: %-50s'
VM_LIST_FORMAT = '%(name_label)-18s %(memory_actual)-5s %(VCPUs_number)-5s'\
' %(power_state)-10s %(uuid)-36s'
SR_LIST_FORMAT = '%(name_label)-18s %(uuid)-36s %(physical_size)-10s' \
'%(type)-10s'
VDI_LIST_FORMAT = '%(name_label)-18s %(uuid)-36s %(virtual_size)-8s'
VBD_LIST_FORMAT = '%(device)-6s %(uuid)-36s %(VDI)-8s'
TASK_LIST_FORMAT = '%(name_label)-18s %(uuid)-36s %(status)-8s %(progress)-4s'
VIF_LIST_FORMAT = '%(name)-8s %(device)-7s %(uuid)-36s %(MAC)-10s'
CONSOLE_LIST_FORMAT = '%(uuid)-36s %(protocol)-8s %(location)-32s'
COMMANDS = {
'host-info': ('', 'Get Xen Host Info'),
'host-set-name': ('', 'Set host name'),
'pif-list': ('', 'List all PIFs'),
'sr-list': ('', 'List all SRs'),
'vbd-list': ('', 'List all VBDs'),
'vbd-create': ('<domname> <pycfg> [opts]',
'Create VBD attached to domname'),
'vdi-create': ('<pycfg> [opts]', 'Create a VDI'),
'vdi-list' : ('', 'List all VDI'),
'vdi-rename': ('<vdi_uuid> <new_name>', 'Rename VDI'),
'vdi-destroy': ('<vdi_uuid>', 'Delete VDI'),
'vif-create': ('<domname> <pycfg>', 'Create VIF attached to domname'),
'vtpm-create' : ('<domname> <pycfg>', 'Create VTPM attached to domname'),
'vm-create': ('<pycfg>', 'Create VM with python config'),
'vm-destroy': ('<domname>', 'Delete VM'),
'vm-list': ('[--long]', 'List all domains.'),
'vm-name': ('<uuid>', 'Name of UUID.'),
'vm-shutdown': ('<name> [opts]', 'Shutdown VM with name'),
'vm-start': ('<name>', 'Start VM with name'),
'vm-uuid': ('<name>', 'UUID of a domain by name.'),
'async-vm-start': ('<name>', 'Start VM asynchronously'),
}
OPTIONS = {
'sr-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of SR'})
],
'vdi-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of VDI'})
],
'vif-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of VIF'})
],
'vm-list': [(('-l', '--long'),
{'action':'store_true',
'help':'List all properties of VMs'})
],
'vm-shutdown': [(('-f', '--force'), {'help': 'Shutdown Forcefully',
'action': 'store_true'})],
'vdi-create': [(('--name-label',), {'help': 'Name for VDI'}),
(('--name-description',), {'help': 'Description for VDI'}),
(('--virtual-size',), {'type': 'int',
'default': 0,
'help': 'Size of VDI in bytes'}),
(('--type',), {'choices': ['system', 'user', 'ephemeral'],
'default': 'system',
'help': 'VDI type'}),
(('--sharable',), {'action': 'store_true',
'help': 'VDI sharable'}),
(('--read-only',), {'action': 'store_true',
'help': 'Read only'}),
(('--sr',), {})],
'vbd-create': [(('--VDI',), {'help': 'UUID of VDI to attach to.'}),
(('--mode',), {'choices': ['RO', 'RW'],
'help': 'device mount mode'}),
(('--driver',), {'choices':['paravirtualised', 'ioemu'],
'help': 'Driver for VBD'}),
(('--device',), {'help': 'Device name on guest domain'})]
}
class OptionError(Exception):
pass
class XenAPIError(Exception):
pass
#
# Extra utility functions
#
class IterableValues(Values):
"""Better interface to the list of values from optparse."""
def __iter__(self):
for opt, val in self.__dict__.items():
if opt[0] == '_' or callable(val):
continue
yield opt, val
def parse_args(cmd_name, args, set_defaults = False):
argstring, desc = COMMANDS[cmd_name]
parser = OptionParser(usage = 'xapi %s %s' % (cmd_name, argstring),
description = desc)
if cmd_name in OPTIONS:
for optargs, optkwds in OPTIONS[cmd_name]:
parser.add_option(*optargs, **optkwds)
if set_defaults:
default_values = parser.get_default_values()
defaults = IterableValues(default_values.__dict__)
else:
defaults = IterableValues()
(opts, extraargs) = parser.parse_args(args = list(args),
values = defaults)
return opts, extraargs
def execute(server, fn, args, async = False):
if async:
func = eval('server.Async.%s' % fn)
else:
func = eval('server.%s' % fn)
result = func(*args)
if type(result) != DictType:
raise TypeError("Function returned object of type: %s" %
str(type(result)))
if 'Value' not in result:
raise XenAPIError(*result['ErrorDescription'])
return result['Value']
_initialised = False
_server = None
_session = None
def connect(*args):
global _server, _session, _initialised
if not _initialised:
# try without password or default credentials
try:
_server = ServerProxy(SERVER_URI)
_session = execute(_server.session, 'login_with_password',
(SERVER_USER, SERVER_PASS))
except:
login = raw_input("Login: ")
password = getpass()
creds = (login, password)
_server = ServerProxy(SERVER_URI)
_session = execute(_server.session, 'login_with_password',
creds)
_initialised = True
return (_server, _session)
def _stringify(adict):
return dict([(k, str(v)) for k, v in adict.items()])
def _read_python_cfg(filename):
cfg = {}
execfile(filename, {}, cfg)
return cfg
def resolve_vm(server, session, vm_name):
vm_uuid = execute(server, 'VM.get_by_name_label', (session, vm_name))
if not vm_uuid:
return None
else:
return vm_uuid[0]
def resolve_vdi(server, session, vdi_name):
vdi_uuid = execute(server, 'VDI.get_by_name_label', (session, vdi_name))
if not vdi_uuid:
return None
else:
return vdi_uuid[0]
#
# Actual commands
#
def xapi_host_info(args, async = False):
server, session = connect()
hosts = execute(server, 'host.get_all', (session,))
for host in hosts: # there is only on |
apple/coremltools | coremltools/converters/sklearn/_converter.py | Python | bsd-3-clause | 5,804 | 0.000517 | # Copyright (c) 2017, Apple Inc. All rights reserved.
#
# Use of this source code is governed by a BSD-3-clause license that can be
# found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause
from coremltools import __version__ as ct_version
from coremltools.models import _METADATA_VERSION, _METADATA_SOURCE
"""
Defines the primary function for converting scikit-learn models.
"""
def convert(sk_obj, input_features=None, output_feature_names=None):
"""
Convert scikit-learn pipeline, classifier, or regressor to Core ML format.
Parameters
----------
sk_obj: model | [model] of scikit-learn format.
Scikit learn model(s) to convert to a Core ML format.
The input model may be a single scikit learn model, a scikit learn
pipeline model, or a list of scikit learn models.
Currently supported scikit learn models are:
- Linear and Logistic Regression
- LinearSVC and LinearSVR
- SVC and SVR
- NuSVC and NuSVR
- Gradient Boosting Classifier and Regressor
- Decision Tree Classifier and Regressor
- Random Forest Classifier and Regressor
- Normalizer
- Imputer
- Standard Scaler
- DictVectorizer
- One Hot Encoder
- KNeighborsClassifier
The input model, or the last model in a pipeline or list of models,
determines whether this is exposed as a Transformer, Regressor,
or Classifier.
Note that there may not be a one-to-one correspondence between scikit
learn models and which Core ML models are used to represent them. For
example, many scikit learn models are embedded in a pipeline to handle
processing of input features.
input_features: str | dict | list
Optional name(s) that can be given to the inputs of the scikit-learn
model. Defaults to 'input'.
Input features can be specified in a number of forms.
- Single string: In this case, the input is assumed to be a single
array, with the number of dimensions set using num_dimensions.
- List of strings: In this case, the overall input dimensions to the
scikit-learn model is assumed to be the length of the list. If
neighboring names are identical, they are assumed to be an input
array of that length. For example:
["a", "b", "c"]
resolves to
[("a", Double), ("b", Double), ("c", Double)].
And:
["a", "a", "b"]
resolves to
[("a", Array(2)), ("b", Double)].
- Dictionary: Where the keys are the names and the indices or ranges of
feature indices.
In this case, it's presented as a mapping from keys to indices or
ranges of contiguous indices. For example,
{"a" : 0, "b" : [2,3], "c" : 1}
Resolves to
[("a", Double), ("c", Double), ("b", Array(2))].
Note that the ordering is determined by the indices.
- List of tuples of the form `(name, datatype)`. Here, `name` is the
name of the exposed feature, and `datatype` is an instance of
`String`, `Double`, `Int64`, `Array`, or `Dictionary`.
output_feature_names: string or list of strings
Optional name(s) that can be given to the inputs of the scikit-learn
model.
The output_feature_names is interpreted according to the model type:
- If the scikit-learn model is a transformer, it is the name of the
array feature output by the final sequence of the transformer
(defaults to "output").
- If it is a classifier, it should be a 2-tuple of names giving the top
class prediction and the array of scores for each class (defaults to
"classLabel" and "classScores").
- If it is a regressor, it should give the name of the prediction value
(defaults to "prediction").
| Returns
-------
model:MLModel
Returns an MLModel instance representing a Core ML model.
Examples
--------
.. sourcecode:: python
>>> from sklearn.linear_model import LinearRegression
>>> import pandas as pd
# Load data
>>> data = pd.read_csv('houses.csv')
# Train a model
>>> model = LinearRegression()
>>> model.fit(dat | a[["bedroom", "bath", "size"]], data["price"])
# Convert and save the scikit-learn model
>>> import coremltools
>>> coreml_model = coremltools.converters.sklearn.convert(model,
["bedroom", "bath", "size"],
"price")
>>> coreml_model.save('HousePricer.mlmodel')
"""
# This function is just a thin wrapper around the internal converter so
# that sklearn isn't actually imported unless this function is called
from ...models import MLModel
# NOTE: Providing user-defined class labels will be enabled when
# several issues with the ordering of the classes are worked out. For now,
# to use custom class labels, directly import the internal function below.
from ._converter_internal import _convert_sklearn_model
spec = _convert_sklearn_model(
sk_obj, input_features, output_feature_names, class_labels=None
)
model = MLModel(spec)
from sklearn import __version__ as sklearn_version
model.user_defined_metadata[_METADATA_VERSION] = ct_version
model.user_defined_metadata[_METADATA_SOURCE] = "scikit-learn=={0}".format(
sklearn_version
)
return model
|
plotly/python-api | packages/python/plotly/plotly/validators/mesh3d/colorbar/_lenmode.py | Python | mit | 523 | 0.001912 | import _plotly_utils.basevalidators
cla | ss LenmodeValidator(_plotly_utils.basevalidators.EnumeratedValidator):
def __init__(self, plotly_name="lenmode", parent_name="mesh3d.colorbar", **kwargs):
super(LenmodeValidator, self).__init__(
plotly_name=plotly_name,
parent_name | =parent_name,
edit_type=kwargs.pop("edit_type", "colorbars"),
role=kwargs.pop("role", "info"),
values=kwargs.pop("values", ["fraction", "pixels"]),
**kwargs
)
|
kfelzenbergs/smartalarm-api | smartalarm/manage.py | Python | gpl-3.0 | 808 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "smartalarm.settings")
| try:
| from django.core.management import execute_from_command_line
except ImportError:
# The above import may fail for some other reason. Ensure that the
# issue is really that Django is missing to avoid masking other
# exceptions on Python 2.
try:
import django
except ImportError:
raise ImportError(
"Couldn't import Django. Are you sure it's installed and "
"available on your PYTHONPATH environment variable? Did you "
"forget to activate a virtual environment?"
)
raise
execute_from_command_line(sys.argv)
|
hallvors/mobilewebcompat | preproc/webcompat_data_exporter.py | Python | mpl-2.0 | 6,094 | 0.001477 | #!/usr/bin/env python2.7
# encoding: utf-8
'''
extract_id_title.py
Created by Hallvord R. M. Steen on 2014-10-25.
Modified by Karl
Mozilla Public License, version 2.0
see LICENSE
Dumps data from webcompat.com bug tracker
by default creates one CSV file (webcompatdata.csv)
and one JSON file (webcompatdata-bzlike.json)
the JSON file uses many of the field names Bugzilla uses in its export,
so the output from this script can be used where Bugzilla data is expected
'''
import json
import re
import socket
import sys
import urllib2
# Config
URL_REPO = "https://api.github.com/rep | os/webcompat/web-bugs"
VERBOSE = True
# Seconds. Loading searches c | an be slow
socket.setdefaulttimeout(240)
def get_remote_file(url, req_json=False):
print('Getting '+url)
req = urllib2.Request(url)
req.add_header('User-agent', 'AreWeCompatibleYetBot')
if req_json:
req.add_header('Accept', 'application/vnd.github.v3+json')
bzresponse = urllib2.urlopen(req, timeout=240)
return {"headers": bzresponse.info(),
"data": json.loads(bzresponse.read().decode('utf8'))}
def extract_url(issue_body):
'''Extract the URL for an issue from WebCompat.
URL in webcompat.com bugs follow this pattern:
**URL**: https://example.com/foobar
'''
url_pattern = re.compile('\*\*URL\*\*\: (.*)\n')
url_match = re.search(url_pattern, issue_body)
if url_match:
url = url_match.group(1).strip()
if not url.startswith(('http://', 'https://')):
url = "http://%s" % url
else:
url = ""
return url.encode('utf-8')
def extract_data(json_data, results_csv, results_bzlike):
resolution_labels = ["duplicate", "invalid", "wontfix", "fixed",
"worksforme"]
whiteboard_labels = ["needsinfo", "contactready", "sitewait",
"needscontact", "needsdiagnosis"]
for issue in json_data["data"]:
# Extracting data
body = issue["body"]
url = extract_url(body)
bug_id = issue["number"]
link = 'https://webcompat.com/issues/%s' % bug_id
issue_title = issue["title"].encode('utf-8').strip()
if VERBOSE:
print('Issue %s: %s' % (bug_id, issue_title))
creation_time = issue['created_at'].encode('utf-8')
last_change_time = issue['updated_at'].encode('utf-8')
issue_state = issue['state'].encode('utf-8')
cf_last_resolved = issue['closed_at']
if issue_state == 'open':
status = 'OPEN'
else:
status = 'RESOLVED'
# Extracting the labels
rx_unprefix = re.compile('^(status-|browser-|os-)')
labels_list = [re.sub(rx_unprefix, '', label['name']) for label in issue['labels']]
# areWEcompatibleyet is only about mozilla bugs
if any([('firefox' or 'mozilla') in label for label in labels_list]):
# Defining the OS
if any(['android' in label for label in labels_list]):
op_sys = 'Android'
elif any(['iOS' in label for label in labels_list]):
op_sys = 'iOS'
elif any(['mobile' in label for label in labels_list]):
op_sys = 'Gonk (Firefox OS)'
else:
op_sys = ''
# Did the bug had a resolution?
resolution = ''
resolution_set = set(labels_list).intersection(resolution_labels)
if resolution_set:
resolution = resolution_set.pop().upper()
# Gathering Whiteboard keys
whiteboard = ''.join(['[%s] ' % label for label in labels_list
#if label in whiteboard_labels
])
# creating CSV file
if issue_title:
results_csv.append("%i\t%s\t%s\t%s" % (
bug_id, issue_title, url, link))
# Creating dictionary
bzlike = {"id": bug_id,
"summary": issue_title,
"url": url,
"whiteboard": whiteboard,
"op_sys": op_sys,
"creation_time": creation_time,
"last_change_time": last_change_time,
"status": status,
"cf_last_resolved": cf_last_resolved,
"resolution": resolution,
"body": body
}
results_bzlike.append(bzlike)
def extract_next_link(link_hdr):
'''Given a HTTP Link header, extract the "next" link.
Link header has the pattern:
'<https://example.com/foobar?page=2>; rel="next",
<https://example.com/foobar?page=100>; rel="last"'
We need:
https://example.com/foobar?page=2
When no more "next", we return an empty string.
'''
next_link = ''
links = link_hdr.split(',')
for link in links:
link_only, rel = link.split(';')
if 'next' in rel:
next_link = link_only.strip(' <>')
break
return next_link
def get_webcompat_data(url_repo=URL_REPO):
'''Extract Issues data from github repo.
Start with the first page and follow hypermedia links to explore the rest.
'''
next_link = '%s/issues?per_page=100&page=1' % (url_repo)
results = []
bzresults = []
while next_link:
response_data = get_remote_file(next_link, True)
extract_data(response_data, results, bzresults)
next_link = extract_next_link(response_data["headers"]["link"])
return results, {"bugs": bzresults}
def main():
results, bzresults = get_webcompat_data(URL_REPO)
with open('webcompatdata.csv', 'w') as f:
f.write("\n".join(results).encode('utf8'))
f.write('\n')
print("Wrote %d items to webcompatdata.csv " % len(results))
with open('webcompatdata-bzlike.json', 'w') as f:
f.write(json.dumps(bzresults, indent=4).encode('utf8'))
print("Wrote %d items to webcompatdata-bzlike.json" % len(bzresults['bugs']))
if __name__ == "__main__":
sys.exit(main())
|
mozman/ezdxf | integration_tests/test_recover.py | Python | mit | 4,960 | 0 | # Copyright (c) 2020, Manfred Moitzi
# License: MIT License
import os
import pytest
import random
from ezdxf import recover
from ezdxf.audit import AuditError
from ezdxf.lldxf.tagger import tag_compiler, ascii_tags_loader
BASEDIR = os.path.dirname(__file__)
DATADIR = "data"
RECOVER1 = "recover01.dxf"
RECOVER2 = "recover02.dxf"
CC_DXFLIB = "cc_dxflib.dxf"
EMPTY_HANDLES = "empty_handles.dxf"
def fullpath(name):
filename = os.path.join(BASEDIR, DATADIR, name)
if not os.path.exists(filename):
pytest.skip(f"File {filename} not found.")
return filename
@pytest.fixture
def tags01():
filename = fullpath(RECOVER1)
tool = recover.Recover()
with open(filename, "rb") as fp:
return list(tool.load_tags(fp, errors="ignore"))
def test_bytes_loader():
filename = fullpath(RECOVER1)
with open(filename, "rb") as fp:
tags = list(recover.bytes_loader(fp))
assert len(tags) == 14736
def test_safe_tag_loader():
filename = fullpath(RECOVER1)
with open(filename, "rt", encoding="cp1252") as fp:
expected = list(tag_compiler(iter(ascii_tags_loader(fp))))
with open(filename, "rb") as fp:
tags = list(recover.safe_tag_loader(fp))
assert len(tags) == len(expected)
a | ssert tags[:100] == tags[:100]
def test_rebuild_sections(tags01):
tool = recover.Recover()
sections = tool.rebuild_sections(tags01)
expected = sum(int(tag == (0, "SECTION")) for tag in tags01)
orphans = sections.pop( | )
assert len(sections) == expected
assert len(orphans) == 4
def test_build_section_dict(tags01):
tool = recover.Recover()
sections = tool.rebuild_sections(tags01)
tool.load_section_dict(sections)
assert len(tool.section_dict) == 2
header = tool.section_dict["HEADER"][0]
assert len(header) == 6
assert header[0] == (0, "SECTION")
assert header[1] == (2, "HEADER")
assert len(tool.section_dict["ENTITIES"]) == 1505
def test_readfile_recover01_dxf():
doc, auditor = recover.readfile(fullpath(RECOVER1))
assert doc.dxfversion == "AC1009"
assert auditor.has_errors is False
@pytest.fixture
def tags02():
filename = fullpath(RECOVER2)
tool = recover.Recover()
with open(filename, "rb") as fp:
return list(tool.load_tags(fp, errors="ignore"))
def test_rebuild_tables(tags02):
recover_tool = recover.Recover()
sections = recover_tool.rebuild_sections(tags02)
recover_tool.load_section_dict(sections)
tables = recover_tool.section_dict.get("TABLES")
random.shuffle(tables)
tables = recover_tool.rebuild_tables(tables)
assert tables[0] == [(0, "SECTION"), (2, "TABLES")]
assert tables[1][0] == (0, "TABLE")
assert tables[1][1] == (2, "VPORT")
assert tables[2][0] == (0, "VPORT")
assert tables[3][0] == (0, "ENDTAB")
assert tables[4][0] == (0, "TABLE")
assert tables[4][1] == (2, "LTYPE")
assert tables[5][0] == (0, "LTYPE")
assert tables[8][0] == (0, "ENDTAB")
assert tables[-5][0] == (0, "TABLE")
assert tables[-5][1] == (2, "BLOCK_RECORD")
assert tables[-4][0] == (0, "BLOCK_RECORD")
assert tables[-1][0] == (0, "ENDTAB")
def test_readfile_recover02_dxf():
doc, auditor = recover.readfile(fullpath(RECOVER2))
assert doc.dxfversion == "AC1032"
assert auditor.has_errors is False
# Auditor should restore deleted BLOCK-RECORD table head:
blkrec_head = doc.block_records.head
assert blkrec_head.dxf.handle is not None
assert blkrec_head.dxf.handle in doc.entitydb
# Auditor should update/fix BLOCK_RECORD entries owner handle:
for entry in doc.block_records:
assert (
entry.dxf.owner == blkrec_head.dxf.handle
), "Auditor() should update table-entry owner handle."
# Auditor should restore invalid VPORT table-head owner handle:
vport_head = doc.viewports.head
assert (
vport_head.dxf.owner == "0"
), "Auditor() should repair invalid table-head owner handle."
# Auditor should fix invalid VPORT table-entry owner handle:
vport = doc.viewports.get("*Active")[0]
assert (
vport.dxf.owner == vport_head.dxf.handle
), "Auditor() should update table-entry owner handle."
def test_read_cc_dxflib_file():
doc, auditor = recover.readfile(fullpath(CC_DXFLIB))
codes = {fix.code for fix in auditor.fixes}
assert AuditError.REMOVED_UNSUPPORTED_SECTION in codes
assert AuditError.REMOVED_UNSUPPORTED_TABLE in codes
msp = doc.modelspace()
polyline = msp.query("POLYLINE").first
assert polyline is not None
def test_readfile_empty_handles_dxf():
doc, auditor = recover.readfile(fullpath(EMPTY_HANDLES))
msp = doc.modelspace()
assert doc.dxfversion == "AC1009"
assert auditor.has_errors is False
assert len(msp.query("LINE")) == 8
assert len(msp.query("*[layer=='GEOMETRY-CUTTING']")) == 4
assert len(msp.query("*[layer=='GEOMETRY-ENGRAVING']")) == 4
|
SCIP-Interfaces/PySCIPOpt | examples/finished/gcp_fixed_k.py | Python | mit | 2,648 | 0.018505 | ##@file gcp_fixed_k.py
#@brief solve the graph coloring problem with fixed-k model
"""
Copyright (c) by Joao Pedro PEDROSO and Mikio KUBO, 2012
"""
from pyscipopt import Model, quicksum, multidict
def gcp_fixed_k(V,E,K):
"""gcp_fixed_k -- model for minimizing number of bad edges in coloring a graph
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
- K: number of colors to be used
Returns a model, ready to be solved.
"""
model = Model("gcp - fixed k")
x,z = {},{}
for i in V:
for k in range(K):
x[i,k] = model.addVar(vtype="B", name="x(%s,%s)"%(i,k))
for (i,j) in E:
z[i,j] = model.addVar(vtype="B", name="z(%s,%s)"%(i,j))
for i in V:
model.addCons(quicksum(x[i,k] for k in range(K)) == 1, "AssignColor(%s)" % i)
for (i,j) in E:
for k in range(K):
model.addCons(x[i,k] + x[j,k] <= 1 + z[i,j], "BadEdge(%s,%s,%s)"%(i,j,k))
model.setObjective(quicksum(z[i,j] for (i,j) in E), "minimize")
model.data = x,z
return model
def solve_gcp(V,E):
"""solve_gcp -- solve the graph coloring problem with b | isection and fixed-k model
Parameters:
- V: set/list of nodes in the graph
- E: set/list of edges in the graph
Returns tuple with number of colors used, and dictionary mapping colors to vertices
"""
| LB = 0
UB = len(V)
color = {}
while UB-LB > 1:
K = int((UB+LB) / 2)
gcp = gcp_fixed_k(V,E,K)
# gcp.Params.OutputFlag = 0 # silent mode
#gcp.Params.Cutoff = .1
gcp.setObjlimit(0.1)
gcp.optimize()
status = gcp.getStatus()
if status == "optimal":
x,z = gcp.data
for i in V:
for k in range(K):
if gcp.getVal(x[i,k]) > 0.5:
color[i] = k
break
# else:
# raise "undefined color for", i
UB = K
else:
LB = K
return UB,color
import random
def make_data(n,prob):
"""make_data: prepare data for a random graph
Parameters:
- n: number of vertices
- prob: probability of existence of an edge, for each pair of vertices
Returns a tuple with a list of vertices and a list edges.
"""
V = range(1,n+1)
E = [(i,j) for i in V for j in V if i < j and random.random() < prob]
return V,E
if __name__ == "__main__":
random.seed(1)
V,E = make_data(75,.25)
K,color = solve_gcp(V,E)
print("minimum number of colors:",K)
print("solution:",color)
|
flightcom/freqtrade | freqtrade/plugins/pairlist/VolatilityFilter.py | Python | gpl-3.0 | 4,749 | 0.002948 | """
Volatility pairlist filter
"""
import logging
import sys
from copy import deepcopy
from typing import Any, Dict, List, Optional
import arrow
import numpy as np
from cachetools.ttl import TTLCache
from pandas import DataFrame
from freqtrade.exceptions import OperationalException
from freqtrade.misc import plural
from freqtrade.plugins.pairlist.IPairList import IPairList
logger = logging.getLogger(__name__)
class VolatilityFilter(IPairList):
"""
Filters pairs by volatility
"""
def __init__(self, exchange, pairlistmanager,
config: Dict[str, Any], pairlistconfig: Dict[str, Any],
pairlist_pos: int) -> None:
super().__init__(exchange, pairlistmanager, config, pairlistconfig, pairlist_pos)
self._days = pairlistconfig.get('lookback_days', 10)
self._min_volatility = pairlistconfig.get('min_volatility', 0)
self._max_volatility = pairlistconfig.get('max_volatility', sys.maxsize)
self._refresh_period = pairlistconfig.get('refresh_period' | , 1440)
self._pair_cache: TTLCache = TTLCache(maxsize=1000, ttl=self._refresh_period)
if self._days < 1:
| raise OperationalException("VolatilityFilter requires lookback_days to be >= 1")
if self._days > exchange.ohlcv_candle_limit('1d'):
raise OperationalException("VolatilityFilter requires lookback_days to not "
"exceed exchange max request size "
f"({exchange.ohlcv_candle_limit('1d')})")
@property
def needstickers(self) -> bool:
"""
Boolean property defining if tickers are necessary.
If no Pairlist requires tickers, an empty List is passed
as tickers argument to filter_pairlist
"""
return False
def short_desc(self) -> str:
"""
Short whitelist method description - used for startup-messages
"""
return (f"{self.name} - Filtering pairs with volatility range "
f"{self._min_volatility}-{self._max_volatility} "
f" the last {self._days} {plural(self._days, 'day')}.")
def filter_pairlist(self, pairlist: List[str], tickers: Dict) -> List[str]:
"""
Validate trading range
:param pairlist: pairlist to filter or sort
:param tickers: Tickers (from exchange.get_tickers()). May be cached.
:return: new allowlist
"""
needed_pairs = [(p, '1d') for p in pairlist if p not in self._pair_cache]
since_ms = (arrow.utcnow()
.floor('day')
.shift(days=-self._days - 1)
.int_timestamp) * 1000
# Get all candles
candles = {}
if needed_pairs:
candles = self._exchange.refresh_latest_ohlcv(needed_pairs, since_ms=since_ms,
cache=False)
if self._enabled:
for p in deepcopy(pairlist):
daily_candles = candles[(p, '1d')] if (p, '1d') in candles else None
if not self._validate_pair_loc(p, daily_candles):
pairlist.remove(p)
return pairlist
def _validate_pair_loc(self, pair: str, daily_candles: Optional[DataFrame]) -> bool:
"""
Validate trading range
:param pair: Pair that's currently validated
:param ticker: ticker dict as returned from ccxt.fetch_tickers()
:return: True if the pair can stay, false if it should be removed
"""
# Check symbol in cache
cached_res = self._pair_cache.get(pair, None)
if cached_res is not None:
return cached_res
result = False
if daily_candles is not None and not daily_candles.empty:
returns = (np.log(daily_candles.close / daily_candles.close.shift(-1)))
returns.fillna(0, inplace=True)
volatility_series = returns.rolling(window=self._days).std()*np.sqrt(self._days)
volatility_avg = volatility_series.mean()
if self._min_volatility <= volatility_avg <= self._max_volatility:
result = True
else:
self.log_once(f"Removed {pair} from whitelist, because volatility "
f"over {self._days} {plural(self._days, 'day')} "
f"is: {volatility_avg:.3f} "
f"which is not in the configured range of "
f"{self._min_volatility}-{self._max_volatility}.",
logger.info)
result = False
self._pair_cache[pair] = result
return result
|
NaturalGIS/QGIS | tests/src/python/test_qgssymbollayerutils.py | Python | gpl-2.0 | 7,685 | 0 | # -*- coding: utf-8 -*-
"""QGIS Unit tests for QgsSymbolLayerUtils.
.. note:: This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
"""
__author__ = 'Nyall Dawson'
__date__ = '2016-09'
__copyright__ = 'Copyright 2016, The QGIS Project'
import qgis # NOQA
from qgis.core import (QgsSymbolLayerUtils,
QgsMarkerSymbol,
QgsArrowSymbolLayer)
from qgis.PyQt.QtGui import QColor
from qgis.PyQt.QtCore import QSizeF, QPointF
from qgis.testing import unittest, start_app
start_app()
class PyQgsSymbolLayerUtils(unittest.TestCase):
def testEncodeDecodeSize(self):
s = QSizeF()
string = QgsSymbolLayerUtils.encodeSize(s)
s2 = QgsSymbolLayerUtils.decodeSiz | e(string)
self.assertEqual(s2, s)
s = QSizeF(1.5, 2.5)
string = QgsSymbolLayerUtils.encodeSize(s)
s2 = QgsSymbolLayerUtils.decodeSize(string)
self.assertEqual(s2, s)
# bad string
s2 = QgsSymbolLayerUtils.decodeSize('')
self.assertEqual(s2, QSizeF(0, 0))
def testToSize(sel | f):
s2, ok = QgsSymbolLayerUtils.toSize(None)
self.assertFalse(ok)
s2, ok = QgsSymbolLayerUtils.toSize(4)
self.assertFalse(ok)
s2, ok = QgsSymbolLayerUtils.toSize('4')
self.assertFalse(ok)
# arrays
s2, ok = QgsSymbolLayerUtils.toSize([4])
self.assertFalse(ok)
s2, ok = QgsSymbolLayerUtils.toSize([])
self.assertFalse(ok)
s2, ok = QgsSymbolLayerUtils.toSize([4, 5, 6])
self.assertFalse(ok)
s2, ok = QgsSymbolLayerUtils.toSize([4, 5])
self.assertTrue(ok)
self.assertEqual(s2, QSizeF(4, 5))
s2, ok = QgsSymbolLayerUtils.toSize(['4', '5'])
self.assertTrue(ok)
self.assertEqual(s2, QSizeF(4, 5))
# string values
s = QSizeF()
string = QgsSymbolLayerUtils.encodeSize(s)
s2, ok = QgsSymbolLayerUtils.toSize(string)
self.assertTrue(ok)
self.assertEqual(s2, s)
s = QSizeF(1.5, 2.5)
string = QgsSymbolLayerUtils.encodeSize(s)
s2, ok = QgsSymbolLayerUtils.toSize(string)
self.assertTrue(ok)
self.assertEqual(s2, s)
# bad string
s2, ok = QgsSymbolLayerUtils.toSize('')
self.assertFalse(ok)
self.assertEqual(s2, QSizeF())
def testEncodeDecodePoint(self):
s = QPointF()
string = QgsSymbolLayerUtils.encodePoint(s)
s2 = QgsSymbolLayerUtils.decodePoint(string)
self.assertEqual(s2, s)
s = QPointF(1.5, 2.5)
string = QgsSymbolLayerUtils.encodePoint(s)
s2 = QgsSymbolLayerUtils.decodePoint(string)
self.assertEqual(s2, s)
# bad string
s2 = QgsSymbolLayerUtils.decodePoint('')
self.assertEqual(s2, QPointF())
def testToPoint(self):
s2, ok = QgsSymbolLayerUtils.toPoint(None)
self.assertFalse(ok)
s2, ok = QgsSymbolLayerUtils.toPoint(4)
self.assertFalse(ok)
s2, ok = QgsSymbolLayerUtils.toPoint('4')
self.assertFalse(ok)
# arrays
s2, ok = QgsSymbolLayerUtils.toPoint([4])
self.assertFalse(ok)
s2, ok = QgsSymbolLayerUtils.toPoint([])
self.assertFalse(ok)
s2, ok = QgsSymbolLayerUtils.toPoint([4, 5, 6])
self.assertFalse(ok)
s2, ok = QgsSymbolLayerUtils.toPoint([4, 5])
self.assertTrue(ok)
self.assertEqual(s2, QPointF(4, 5))
s2, ok = QgsSymbolLayerUtils.toPoint(['4', '5'])
self.assertTrue(ok)
self.assertEqual(s2, QPointF(4, 5))
# string values
s = QPointF()
string = QgsSymbolLayerUtils.encodePoint(s)
s2, ok = QgsSymbolLayerUtils.toPoint(string)
self.assertTrue(ok)
self.assertEqual(s2, s)
s = QPointF(1.5, 2.5)
string = QgsSymbolLayerUtils.encodePoint(s)
s2, ok = QgsSymbolLayerUtils.toPoint(string)
self.assertTrue(ok)
self.assertEqual(s2, s)
# bad string
s2, ok = QgsSymbolLayerUtils.toPoint('')
self.assertFalse(ok)
self.assertEqual(s2, QPointF())
def testDecodeArrowHeadType(self):
type, ok = QgsSymbolLayerUtils.decodeArrowHeadType(0)
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.HeadSingle)
type, ok = QgsSymbolLayerUtils.decodeArrowHeadType('single')
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.HeadSingle)
type, ok = QgsSymbolLayerUtils.decodeArrowHeadType(' SINGLE ')
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.HeadSingle)
type, ok = QgsSymbolLayerUtils.decodeArrowHeadType(1)
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.HeadReversed)
type, ok = QgsSymbolLayerUtils.decodeArrowHeadType('reversed')
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.HeadReversed)
type, ok = QgsSymbolLayerUtils.decodeArrowHeadType(2)
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.HeadDouble)
type, ok = QgsSymbolLayerUtils.decodeArrowHeadType('double')
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.HeadDouble)
type, ok = QgsSymbolLayerUtils.decodeArrowHeadType('xxxxx')
self.assertFalse(ok)
type, ok = QgsSymbolLayerUtils.decodeArrowHeadType(34)
self.assertFalse(ok)
def testDecodeArrowType(self):
type, ok = QgsSymbolLayerUtils.decodeArrowType(0)
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.ArrowPlain)
type, ok = QgsSymbolLayerUtils.decodeArrowType('plain')
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.ArrowPlain)
type, ok = QgsSymbolLayerUtils.decodeArrowType(' PLAIN ')
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.ArrowPlain)
type, ok = QgsSymbolLayerUtils.decodeArrowType(1)
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.ArrowLeftHalf)
type, ok = QgsSymbolLayerUtils.decodeArrowType('lefthalf')
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.ArrowLeftHalf)
type, ok = QgsSymbolLayerUtils.decodeArrowType(2)
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.ArrowRightHalf)
type, ok = QgsSymbolLayerUtils.decodeArrowType('righthalf')
self.assertTrue(ok)
self.assertEqual(type, QgsArrowSymbolLayer.ArrowRightHalf)
type, ok = QgsSymbolLayerUtils.decodeArrowType('xxxxx')
self.assertFalse(ok)
type, ok = QgsSymbolLayerUtils.decodeArrowType(34)
self.assertFalse(ok)
def testSymbolToFromMimeData(self):
"""
Test converting symbols to and from mime data
"""
symbol = QgsMarkerSymbol.createSimple({})
symbol.setColor(QColor(255, 0, 255))
self.assertFalse(QgsSymbolLayerUtils.symbolFromMimeData(None))
self.assertFalse(QgsSymbolLayerUtils.symbolToMimeData(None))
mime = QgsSymbolLayerUtils.symbolToMimeData(symbol)
self.assertTrue(mime is not None)
symbol2 = QgsSymbolLayerUtils.symbolFromMimeData(mime)
self.assertTrue(symbol2 is not None)
self.assertEqual(symbol2.color().name(), symbol.color().name())
if __name__ == '__main__':
unittest.main()
|
belokop-an/agenda-tools | code/MaKaC/webinterface/common/regFilters.py | Python | gpl-2.0 | 13,066 | 0.020435 | from datetime import datetime
import MaKaC.common.filters as filters
from MaKaC.webinterface.common.countries import CountryHolder
# -------------- FILTERING ------------------
class AccommFilterField( filters.FilterField ):
"""Contains the filtering criteria for the track of a contribution.
Inherits from: AbstractFilterField
Attributes:
_values -- (list) List of track identifiers;
_showNoValue -- (bool) Tells whether an contribution satisfies the
filter if it hasn't belonged to any track.
"""
_id = "accomm"
def satisfies( self, reg ):
"""
"""
if reg.getAccommodation().getAccommodationType() is None:
return self._showNoValue
if reg.getAccommodation().getAccommodationType() not in reg.getRegistrationForm().getAccommodationForm().getAccommodationTypesList():
return self._showNoValue
return reg.getAccommodation().getAccommodationType().getId() in self._values
class SessionFilterField( filters.FilterField ):
"""
"""
_id = "session"
def satisfies( self, reg ):
"""
"""
if reg.getSessionList():
for sess in reg.getSessionList():
if sess.getId() in self._values:
return True
elif sess not in reg.getRegistrationForm().getSessionsForm().getSessionList():
return self._showNoValue
else:
return self._showNoValue
return False
class SessionFirstPriorityFilterField( filters.FilterField ):
"""
"""
_id = "sessionfirstpriority"
def satisfies( self, reg ):
"""
"""
if len(reg.getSessionList()) > 0:
sess=reg.getSessionList()[0]
if sess.getId() in self._values:
return True
elif sess not in reg.getRegistrationForm().getSessionsForm().getSessionList():
return self._showNoValue
else:
return self._showNoValue
return False
class EventFilterField(filters.FilterField):
"""
"""
_id = "event"
def satisfies(self, reg):
"""
"""
if reg.getSocialEvents():
for event in reg.getSocialEvents():
if event.getId() in self._values:
return True
elif event.getSocialEventItem() not in reg.getRegistrationForm().getSocialEventForm().getSocialEventList():
return self._showNoValue
else:
return self._showNoValue
return False
class RegFilterCrit(filters.FilterCriteria):
_availableFields = { \
AccommFilterField.getId():AccommFilterField,
SessionFilterField.getId():SessionFilterField,
SessionFirstPriorityFilterField.getId():SessionFirstPriorityFilterField | ,
EventFilterField.getId():EventFilterField}
#------------- SORTING --------------------
class RegistrantSortin | gField(filters.SortingField):
def getSpecialId(self):
try:
if self._specialId:
pass
except AttributeError, e:
return self._id
return self._specialId
class NameSF(RegistrantSortingField):
_id="Name"
def compare( self, r1, r2 ):
"""
"""
res1 = r1.getFamilyName().upper()
if r1.getFirstName() != "":
res1 = "%s, %s"%( res1, r1.getFirstName() )
res2 = r2.getFamilyName().upper()
if r2.getFirstName() != "":
res2 = "%s, %s"%( res2, r2.getFirstName() )
return cmp( res1, res2 )
class PositionSF( RegistrantSortingField ):
_id = "Position"
def compare( self, r1, r2 ):
return cmp( r1.getPosition().lower(), r2.getPosition().lower() )
class CountrySF( RegistrantSortingField ):
_id = "Country"
def compare( self, r1, r2 ):
return cmp( CountryHolder().getCountryById(r1.getCountry()).lower(), CountryHolder().getCountryById(r2.getCountry()).lower() )
class CitySF( RegistrantSortingField ):
_id = "City"
def compare( self, r1, r2 ):
return cmp( r1.getCity().lower(), r2.getCity().lower() )
class PhoneSF( RegistrantSortingField ):
_id = "Phone"
def compare( self, r1, r2 ):
return cmp( r1.getPhone().lower(), r2.getPhone().lower() )
class InstitutionSF( RegistrantSortingField ):
_id = "Institution"
def compare( self, r1, r2 ):
return cmp( r1.getInstitution().lower(), r2.getInstitution().lower() )
class EmailSF( RegistrantSortingField ):
_id = "Email"
def compare( self, r1, r2 ):
return cmp( r1.getEmail().lower(), r2.getEmail().lower() )
class SessionsSF( RegistrantSortingField ):
_id = "Sessions"
def compare( self, r1, r2 ):
ses1 = r1.getSessionList()
ses2 = r2.getSessionList()
i = 0
while(i<min(len(ses1), len(ses2))):
v = cmp( ses1[i].getTitle().lower(), ses2[i].getTitle().lower() )
if v != 0:
return v
i += 1
if len(ses1)>len(ses2):
return 1
elif len(ses1)<len(ses2):
return -1
else:
return 0
class AccommodationSF( RegistrantSortingField ):
_id = "Accommodation"
def compare( self, r1, r2 ):
if r2.getAccommodation() is None and r1.getAccommodation() is not None:
return 1
elif r1.getAccommodation() is None and r2.getAccommodation() is not None:
return -1
elif r1.getAccommodation() is None and r2.getAccommodation() is None:
return 0
elif r2.getAccommodation().getAccommodationType() is None and r1.getAccommodation().getAccommodationType() is not None:
return 1
elif r1.getAccommodation().getAccommodationType() is None and r2.getAccommodation().getAccommodationType() is not None:
return -1
elif r1.getAccommodation().getAccommodationType() is None and r2.getAccommodation().getAccommodationType() is None:
return 0
else:
return cmp(r1.getAccommodation().getAccommodationType().getCaption(), r2.getAccommodation().getAccommodationType().getCaption())
class ArrivalDateSF( RegistrantSortingField ):
_id = "ArrivalDate"
def compare( self, r1, r2 ):
if r2.getAccommodation() is None and r1.getAccommodation() is not None:
return 1
elif r1.getAccommodation() is None and r2.getAccommodation() is not None:
return -1
elif r1.getAccommodation() is None and r2.getAccommodation() is None:
return 0
elif r2.getAccommodation().getArrivalDate() is None and r1.getAccommodation().getArrivalDate() is not None:
return 1
elif r1.getAccommodation().getArrivalDate() is None and r2.getAccommodation().getArrivalDate() is not None:
return -1
elif r1.getAccommodation().getArrivalDate() is None and r2.getAccommodation().getArrivalDate() is None:
return 0
else:
return cmp(r1.getAccommodation().getArrivalDate(), r2.getAccommodation().getArrivalDate())
class DepartureDateSF( RegistrantSortingField ):
_id = "DepartureDate"
def compare( self, r1, r2 ):
if r2.getAccommodation() is None and r1.getAccommodation() is not None:
return 1
elif r1.getAccommodation() is None and r2.getAccommodation() is not None:
return -1
elif r1.getAccommodation() is None and r2.getAccommodation() is None:
return 0
elif r2.getAccommodation().getDepartureDate() is None and r1.getAccommodation().getDepartureDate() is not None:
return 1
elif r1.getAccommodation().getDepartureDate() is None and r2.getAccommodation().getDepartureDate() is not None:
return -1
elif r1.getAccommodation().getDepartureDate() is None and r2.getAccommodation().getDepartureDate() is None:
return 0
else:
return cmp(r1.getAccommodation().getDepartureDate(), r2.getAccommodation().getDepartureDate())
class SocialEventsSF( RegistrantSortingF |
BoxLib-Codes/wdmerger | analysis/vol-wd.py | Python | mit | 2,763 | 0.0076 | #!/usr/bin/env python
import matplotlib
matplotlib.use('agg')
import sys
import yt
import numpy as np
from yt.visualization.volume_rendering.api import \
Scene, \
VolumeSource
# this is for the wdconvect problem
def doit(plotfile):
ds = yt.load(plotfile)
ds.periodicity = (True, True, True)
field = ('boxlib', 'density')
ds._get_field_info(field).take_log = True
sc = Scene()
# add a volume: select a sphere
vol = VolumeSource(ds, field=field)
vol.use_ghost_zones = True
sc.add_source(vol)
# transfer function
vals = [-1, 0, 1, 2, 3, 4, 5, 6, 7]
#vals = [0.1, 1.0, 10, 100., 1.e4, 1.e5, 1.e6, 1.e7]
sigma = 0.1
tf = yt.ColorTransferFunction((min(vals), max(vals)))
tf.clear()
cm = "coolwarm"
cm = "spectral"
for v in vals:
if v < 3:
alpha = 0.1
else:
alpha = 0.5
tf.sample_colormap(v, sigma**2, colormap=cm, alpha=alpha)
sc.get_source(0).transfer_function = tf
cam = sc.add_camera(ds, lens_type="perspective")
cam.resolution = (1920, 1080)
cam.position = 1.5*ds.arr(np.array([0.0, 5.e9, 5.e9]), 'cm')
# look toward the center -- we are dealing with an octant
center = 0.5*(ds.domain_left_edge + ds.domain_right_edge)
normal = (center - cam.position)
normal /= np.sqrt(normal.dot(normal))
cam.switch_orientation(normal_vector=normal,
north_vector=[0., 0., 1.])
cam.set_width(ds.domain_width)
#sc.annotate_axes()
#sc.annotate_domain(ds)
pid = plotfile.split("plt")[1]
sc.render()
sc.save("wdmerger_{}_new.png".format(pid), sigma_clip=6.0)
sc.save_annotated("wdmerger_annotated_{}_new.png".format(pid),
text_annotate=[[(0.05, 0.05),
| "t = {:.3f} s".format(float(ds.current_time.d)),
dict(horizontalalignment="left")],
[(0.5,0.95),
"Castro simulation of merging white dwarfs (0.6 $M_\odot$ + 0.9 $M_\odot$)",
dict(color="y", fontsize="22",
horizontalalignment="center")],
[(0.95,0.05),
"M. Katz et al.",
dict(color="w", fontsize="16",
horizontalalignment="right")]])
if __name__ == "__main__":
# Choose a field
plotfile = ""
try:
plotfile = sys.argv[1]
except:
sys.exit("ERROR: no plotfile specified")
for plt in sys.argv[1:]:
plotfile = plt
doit(plotfile)
| |
huntxu/neutron | neutron/services/auto_allocate/db.py | Python | apache-2.0 | 17,330 | 0.000404 | # Copyright 2015-2016 Hewlett Packard Enterprise Development Company, LP
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron_lib.api.definitions import constants as api_const
from neutron_lib.api.definitions import l3 as l3_apidef
from neutron_lib.api.definitions import network as net_def
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib.db import utils as db_utils
from neutron_lib import exceptions as n_exc
from neutron_lib.objects import exceptions as obj_exc
from neutron_lib.plugins import constants
from neutron_lib.plugins import directory
from neutron_lib.plugins import utils as p_utils
from oslo_log import log as logging
from neutron._i18n import _
from neutron.common import exceptions as c_exc
from neutron.db import _resource_extend as resource_extend
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.objects import auto_allocate as auto_allocate_obj
from neutron.objects import base as base_obj
from neutron.objects import network as net_obj
from neutron.services.auto_allocate import exceptions
LOG = logging.getLogger(__name__)
CHECK_REQUIREMENTS = 'dry-run'
def _ensure_external_network_default_value_callback(
resource, event, trigger, **kwargs):
"""Ensure the is_default db field matches the create/update request."""
# TODO(boden): remove shim once all callbacks use payloads
if 'payload' in kwargs:
_request = kwargs['payload'].request_body
_context = kwargs['payload'].context
_network = kwargs['payload'].desired_state
_orig = kwargs['payload'].states[0]
else:
_request = kwargs['request']
_context = kwargs['context']
_network = kwargs['network']
_orig = kwargs.get('original_network')
@db_api.retry_if_session_inactive()
def _do_ensure_external_network_default_value_callback(
context, request, orig, network):
is_default = request.get(api_const.IS_DEFAULT)
if is_default is None:
return
if is_default:
# ensure only one default external network at any given time
pager = base_obj.Pager(limit=1)
objs = net_obj.ExternalNetwork.get_objects(context,
_pager=pager, is_default=True)
if objs:
if objs[0] and network['id'] != objs[0].network_id:
raise exceptions.DefaultExternalNetworkExists(
net_id=objs[0].network_id)
if orig and orig.get(api_const.IS_DEFAULT) == is_default:
return
network[api_const.IS_DEFAULT] = is_default
# Reflect the status of the is_default on the create/update request
obj = net_obj.ExternalNetwork.get_object(context,
network_id=network['id'])
if obj:
obj.is_default = is_default
obj.update()
_do_ensure_external_network_default_value_callback(
_context, _request, _orig, _network)
@resource_extend.has_resource_extenders
class AutoAllocatedTopologyMixin(common_db_mixin.CommonDbMixin):
def __new__(cls, *args, **kwargs):
# NOTE(kevinbenton): we subscribe on object construction because
# the tests blow away the callback manager for each run
new = super(AutoAllocatedTopologyMixin, cls).__new__(cls, *args,
**kwargs)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_UPDATE)
registry.subscribe(_ensure_external_network_default_value_callback,
resources.NETWORK, events.PRECOMMIT_CREATE)
return new
# TODO(armax): if a tenant modifies auto allocated resources under
# the hood the behavior of the get_auto_allocated_topology API is
# undetermined. Consider adding callbacks to deal with the following
# situations:
# - insert subnet -> plug router interface
# - delete router -> remove the entire topology
# - update subnet -> prevent operation
# - update router gateway -> prevent operation
# - ...
@property
def core_plugin(self):
if not getattr(self, '_core_plugin', None):
self._core_plugin = directory.get_plugin()
return self._core_plugin
@property
def l3_plugin(self):
if not getattr(self, '_l3_plugin', None):
self._l3_plugin = directory.get_plugin(constants.L3)
return self._l3_plugin
@staticmethod
@resource_extend.extends([net_def.COLLECTION_NAME])
def _extend_external_network_default(net_res, net_db):
"""Add is_default field to 'show' response."""
if net_db.external is not None:
net_res[api_const.IS_DEFAULT] = net_db.external.is_default
return net_res
def get_auto_allocated_topology(self, context, tenant_id, fields=None):
"""Return tenant's network associated to auto-allocated topology.
The topology will be provisioned upon return, if network is missing.
"""
fields = fields or []
tenant_id = self._validate(context, tenant_id)
if CHECK_REQUIREMENTS in fields:
# for dry-run requests, simply validates that subsequent
# requests can be fulfilled based on a set of requirements
# such as existence of default networks, pools, etc.
return self._check_requirements(context, tenant_id)
elif fields:
raise n_exc.BadRequest(resource='auto_allocate',
msg=_("Unrecognized field"))
# Check for an existent topology
network_id = self._get_auto_allocated_network(context, tenant_id)
if network_id:
return self._response(network_id, tenant_id, fields=fields)
# See if we indeed have an external network to connect to, otherwise
# we will fail fast
default_external_network = self._get_default_external_network(
context)
# If we reach this point, then we got some work to do!
network_id = self._build_topology(
context, tenant_id, default_external_network)
return self._response(network_id, tenant_id, fields=fields)
def delete_auto_allocated_topology(self, context, tenant_id):
tenant_id = self._validate(context, tenant_id)
topology = self._get_auto_allocated_topo | logy(context, tenant_id)
if topology:
subnets = self.core_plugin.get_subnets(
context,
filters={'network_id': [topology['network_id']]})
self._cleanup(
context, network_id=topology['network_id'],
router_id=topology['router_id'], subnets=subnets)
def _build_topology(self, context, tenant_id, default_external_network):
"""Build the network topology and retur | ns its network UUID."""
try:
subnets = self._provision_tenant_private_network(
context, tenant_id)
network_id = subnets[0]['network_id']
router = self._provision_external_connectivity(
context, default_external_network, subnets, tenant_id)
network_id = self._save(
context, tenant_id, network_id, router['id'], subnets)
return network_id
except exceptions.UnknownProvisioningError as e:
# Clean partially provisioned topologies, and reraise the
# error. If it can be retried, so be it.
LOG.error(" |
jia-kai/pynojo | pynojo/mp/session.py | Python | gpl-3.0 | 7,080 | 0.004802 | # -*- encoding: utf-8 -*-
# $File: session.py
# $Date: Sun Mar 04 19:27:38 2012 +0800
#
# Copyright (C) 2012 the pynojo development team <see AUTHORS file>
#
# Contributors to this file:
# PWX <airyai@gmail.com>
#
# This file is part of pynojo
#
# pynojo is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pynojo is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pynojo. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import print_function, unicode_literals
import protocol
import socket, logging
import gevent.event, gevent.coros
class Session(protocol.Dispatcher):
'''
JSON RPC socket session (as well as RPC dispatcher).
Manage RPC request list, and assign every request with a different
id, so that multiple requests can be made at one time correctly.
'''
def __init__(self, socket):
'''
Create a session.
:param socket: The stream socket instance.
:type socket: gevent.socket.socket.
'''
super(Session, self).__init__()
self.peerName = socket.getpeername()
self.name = ':'.join([str(s) for s in self.peerName[:2]])
self._disp = self
self._sck = socket
self._fp = socket.makefile('r')
self._lock = gevent.coros.Semaphore()
self._requests = {} # request queue
self._requestId = 1 # manage request id
self.r | equestTimeout = None # default request timeout
def writeline(self, msg):
'''
Send a line of message to the socket.
Nothing will be returned, but if the re | mote socket has closed,
Session._disconnected will be called.
:param msg: Message body.
:type msg: UTF-8 string.
'''
if (self._sck is None):
return False
ret = False
try:
self._sck.sendall(msg + '\n')
ret = True
except socket.error:
self._disconnected()
return ret
def readline(self):
'''
Read a line of message from the socket.
If socket has been closed, an empty string will be returned.
'''
if (self._fp is None):
return ''
ret = self._fp.readline()
if (not ret):
self._disconnected()
return ret
def _disconnected(self):
'''Callback when the socket has been disconnected.'''
# unset all objects
if (self._fp is not None):
self._fp.close() # TODO: test if .close blocks!
self._fp = None
if (self._sck is not None):
self._sck.close()
self._sck = None
# abandon all request
events = {}
events.update(self._requests)
self._requests.clear()
for e in events.itervalues():
e.set_exception(socket.error('Connection closed.'))
def abandon(self):
'''Abandon the session.'''
#self._sck.close()
self._disconnected()
def serve(self):
'''Start socket message loop.'''
while self._sck is not None:
msg = self.readline()
if (not msg):
return
msg = msg.strip()
obj = protocol.parseJson(msg)
if (isinstance(obj, tuple)):
logging.debug('Got bad message from %s.' % self.name)
self._send_response(protocol.Response(None, obj[0], obj[1]))
elif (isinstance(obj, protocol.Response)):
logging.debug('Got response from %s.' % self.name)
self._got_response(obj)
elif (isinstance(obj, protocol.Request)):
logging.debug('Handle request from %s.' % self.name)
gevent.spawn(self._serve_request, obj)
else:
self._got_badmessage(msg)
def _got_badmessage(self, msg):
'''Called while socket received a bad message.'''
pass
def _send_response(self, response):
'''Send response to the remote side.'''
return self.writeline(response.toJSON())
def _send_request(self, s, asyncResult):
if (not self.writeline(s)):
asyncResult.set_exception(socket.error('Connection closed.'))
def _serve_request(self, request):
'''Serve when get request from remote side.'''
result = self._disp.dispatch(request)
result.id = request.id
self.writeline(result.toJSON())
def _got_response(self, response):
'''Parse the response from remote side.'''
rId = response.id
ev = self._requests.pop(rId, None)
if (ev is not None):
if (response.error is not None):
ev.set_exception(response.error)
else:
ev.set(response.result)
def _nextRquestId(self):
'''get next available job id.'''
self._lock.acquire()
ret = self._requestId
self._requestId += 1
if (self._requestId == 0xffffffff):
self._requestId = 1
self._lock.release()
return ret
def doRequest(self, request, timeout=None):
'''
Emit a request.
Raise socket.error if the connection has been closed.
'''
# assign a job id.
rId = self._nextRquestId()
request.id = rId
# serialize request
s = request.toJSON()
# init async call
result = gevent.event.AsyncResult()
self._requests[rId] = result
# emit job
gevent.spawn(self._send_request, s, result)
# wait for result & delete job
try:
ret = result.get(timeout=timeout)
return ret
except:
self._requests.pop(rId, None)
raise
def call(self, method, *args, **kwargs):
'''
A fast interface to emit request.
JSON RPC requires only one of the list params or dict params,
so pass *args and **kwargs at the same time will cause a
TypeError.
Raise socket.error if the connection has been closed.
'''
if (len(args) > 0 and len(kwargs) > 0):
raise TypeError('JSON RPC requires only one of the list '
'params or dict params.')
params = (args if len(args) > 0
else kwargs if len(kwargs) > 0
else None)
timeout = self.requestTimeout
return self.doRequest(protocol.Request(method, params), timeout)
|
PisiLinuxNew/kaptan | kaptan/libkaptan/ui_welcome.py | Python | gpl-3.0 | 2,425 | 0.0033 | # Copyright 2016 Metehan Özbek <mthnzbk@gmail.com>
# 2020 Erdem Ersoy <erdemersoy@erdemersoy.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, |
# MA 02110-1301, USA.
from PyQt5.QtWidgets import QW | izardPage, QLabel, QHBoxLayout, QVBoxLayout, QSpacerItem, QSizePolicy
from PyQt5.QtGui import QPixmap
from PyQt5.QtCore import Qt
class WelcomeWidget(QWizardPage):
def __init__(self, parent=None):
super().__init__(parent)
self.setSubTitle(self.tr("<h2>Welcome to Pisi Linux!</h2>"))
vlayout = QVBoxLayout(self)
vlayout.addItem(QSpacerItem(20, 150, QSizePolicy.Preferred, QSizePolicy.Minimum))
hlayout = QHBoxLayout(self)
label = QLabel(self)
label.setText(self.tr("""<h1>What is Pisi Linux?</h1>
<p><strong>Pisi Linux</strong> is a reliable, secure, fast and user friendly operating system.</p>
<p>With Pisi Linux, you can connect to the internet, read your e-mails, work with your office documents,
watch movies, play music, develop applications, play games and much more!</p>
<p><strong>Kaptan</strong>, will help you personalize your Pisi Linux workspace easily and quickly.
Please click <strong>Next</strong> in order to begin.</p>"""))
label.setWordWrap(True)
label.setAlignment(Qt.AlignLeft)
hlayout.addWidget(label)
kaptan_logo = QLabel(self)
kaptan_logo.setScaledContents(True)
kaptan_logo.setPixmap(QPixmap("/usr/share/kaptan/images/kaptan_welcome.svg"))
kaptan_logo.setAlignment(Qt.AlignRight)
kaptan_logo.setFixedSize(196, 196)
hlayout.addWidget(kaptan_logo)
vlayout.addLayout(hlayout)
vlayout.addItem(QSpacerItem(20, 40, QSizePolicy.Preferred, QSizePolicy.Preferred))
|
mrtnrdl/.macdots | scripts/bin/platform-tools/systrace/catapult/devil/devil/android/logcat_monitor_test.py | Python | unlicense | 8,737 | 0.004922 | #!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
# pylint: disable=protected-access
import itertools
import threading
import unittest
from devil import devil_env
from devil.android import logcat_monitor
from devil.android.sdk import adb_wrapper
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
def _CreateTestLog(raw_logcat=None):
test_adb = adb_wrapper.Adb | Wrapper('0123456789abcdef')
test_adb.Logcat = mock.Mock(return_value=(l for l in raw_logcat))
test_log = logcat_monitor.LogcatMonitor(test_adb, clear=False)
return test_log
class LogcatMonitorTest(unittest.TestCase):
_TEST_THREADTIME_LOGCAT_DATA = [
'01-01 01:02:03.456 7890 0987 V LogcatMonitorTest: '
'verbose l | ogcat monitor test message 1',
'01-01 01:02:03.457 8901 1098 D LogcatMonitorTest: '
'debug logcat monitor test message 2',
'01-01 01:02:03.458 9012 2109 I LogcatMonitorTest: '
'info logcat monitor test message 3',
'01-01 01:02:03.459 0123 3210 W LogcatMonitorTest: '
'warning logcat monitor test message 4',
'01-01 01:02:03.460 1234 4321 E LogcatMonitorTest: '
'error logcat monitor test message 5',
'01-01 01:02:03.461 2345 5432 F LogcatMonitorTest: '
'fatal logcat monitor test message 6',
'01-01 01:02:03.462 3456 6543 D LogcatMonitorTest: '
'last line'
]
def assertIterEqual(self, expected_iter, actual_iter):
for expected, actual in itertools.izip_longest(expected_iter, actual_iter):
self.assertIsNotNone(
expected,
msg='actual has unexpected elements starting with %s' % str(actual))
self.assertIsNotNone(
actual,
msg='actual is missing elements starting with %s' % str(expected))
self.assertEqual(actual.group('proc_id'), expected[0])
self.assertEqual(actual.group('thread_id'), expected[1])
self.assertEqual(actual.group('log_level'), expected[2])
self.assertEqual(actual.group('component'), expected[3])
self.assertEqual(actual.group('message'), expected[4])
with self.assertRaises(StopIteration):
next(actual_iter)
with self.assertRaises(StopIteration):
next(expected_iter)
@mock.patch('time.sleep', mock.Mock())
def testWaitFor_success(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
actual_match = test_log.WaitFor(r'.*(fatal|error) logcat monitor.*', None)
self.assertTrue(actual_match)
self.assertEqual(
'01-01 01:02:03.460 1234 4321 E LogcatMonitorTest: '
'error logcat monitor test message 5',
actual_match.group(0))
self.assertEqual('error', actual_match.group(1))
test_log.Stop()
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testWaitFor_failure(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
actual_match = test_log.WaitFor(
r'.*My Success Regex.*', r'.*(fatal|error) logcat monitor.*')
self.assertIsNone(actual_match)
test_log.Stop()
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testWaitFor_buffering(self):
# Simulate an adb log stream which does not complete until the test tells it
# to. This checks that the log matcher can receive individual lines from the
# log reader thread even if adb is not producing enough output to fill an
# entire file io buffer.
finished_lock = threading.Lock()
finished_lock.acquire()
def LogGenerator():
for line in type(self)._TEST_THREADTIME_LOGCAT_DATA:
yield line
finished_lock.acquire()
test_adb = adb_wrapper.AdbWrapper('0123456789abcdef')
test_adb.Logcat = mock.Mock(return_value=LogGenerator())
test_log = logcat_monitor.LogcatMonitor(test_adb, clear=False)
test_log.Start()
actual_match = test_log.WaitFor(r'.*last line.*', None)
finished_lock.release()
self.assertTrue(actual_match)
test_log.Stop()
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_defaults(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
expected_results = [
('7890', '0987', 'V', 'LogcatMonitorTest',
'verbose logcat monitor test message 1'),
('8901', '1098', 'D', 'LogcatMonitorTest',
'debug logcat monitor test message 2'),
('9012', '2109', 'I', 'LogcatMonitorTest',
'info logcat monitor test message 3'),
('0123', '3210', 'W', 'LogcatMonitorTest',
'warning logcat monitor test message 4'),
('1234', '4321', 'E', 'LogcatMonitorTest',
'error logcat monitor test message 5'),
('2345', '5432', 'F', 'LogcatMonitorTest',
'fatal logcat monitor test message 6')]
actual_results = test_log.FindAll(r'\S* logcat monitor test message \d')
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_defaults_miss(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
expected_results = []
actual_results = test_log.FindAll(r'\S* nothing should match this \d')
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_filterProcId(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
actual_results = test_log.FindAll(
r'\S* logcat monitor test message \d', proc_id=1234)
expected_results = [
('1234', '4321', 'E', 'LogcatMonitorTest',
'error logcat monitor test message 5')]
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_filterThreadId(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
actual_results = test_log.FindAll(
r'\S* logcat monitor test message \d', thread_id=2109)
expected_results = [
('9012', '2109', 'I', 'LogcatMonitorTest',
'info logcat monitor test message 3')]
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_filterLogLevel(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
actual_results = test_log.FindAll(
r'\S* logcat monitor test message \d', log_level=r'[DW]')
expected_results = [
('8901', '1098', 'D', 'LogcatMonitorTest',
'debug logcat monitor test message 2'),
('0123', '3210', 'W', 'LogcatMonitorTest',
'warning logcat monitor test message 4')
]
self.assertIterEqual(iter(expected_results), actual_results)
test_log.Close()
@mock.patch('time.sleep', mock.Mock())
def testFindAll_filterComponent(self):
test_log = _CreateTestLog(
raw_logcat=type(self)._TEST_THREADTIME_LOGCAT_DATA)
test_log.Start()
test_log.WaitFor(r'.*last line.*', None)
test_log.Stop()
actual_results = test_log.FindAll(r'.*', component='LogcatMonitorTest')
expected_results = [
('7890', '0987', 'V', 'LogcatMonitorTest',
'verbose logcat monitor test message 1'),
('8901', '1098', 'D', 'LogcatMonitorTest',
'debug logcat monitor test message 2'),
('9012', '2109', 'I', 'LogcatMonitorTest',
'info lo |
wenzheli/python_new | com/uva/learning/gibbs_sampler.py | Python | gpl-3.0 | 6,875 | 0.0112 | from com.uva.learning.learner import Learner
from sets import Set
import math
import random
import numpy as np
import copy
from com.uva.sample_latent_vars import sample_z_ab_from_edge
import cProfile, pstats, StringIO
import time
from com.uva.core_utils import gibbs_sampler
from array import *
class GibbsSampler(Learner):
def __init__(self, args, graph):
# call base class initialization
Learner.__init__(self, args, graph)
self._avg_log = []
self._timing = []
self._max_iteration = args.max_iteration
# latent variables
self.z = [ [ 0 for i in range(self._N) ] for j in range(self._N) ]
# counting variables
self.num_kk = np.zeros((self._K, 2))
self.num_n_k = np.zeros((self._N, self._K))
self._random_initialize()
def _random_initialize(self):
for i in range(0, self._N):
for j in range(i+1, self._N):
if (i,j) in self._network.get_held_out_set():
continue
y = 0
if (i,j) in self._network.get_linked_edges():
y = 1
self.z[i][j] = random.randint(0, self._K-1)
self.z[j][i] = random.randint(0, self._K-1)
self.num_n_k[i][self.z[i][j]] += 1
self.num_n_k[j][self.z[j][i]] += 1
if self.z[i][j] == self.z[j][i]:
if y == 1:
self.num_kk[self.z[i][j]][0] += 1
else:
self.num_kk[self.z[i][j]][1] += 1
def run(self):
itr = 0
self._max_iteration = 500
| start = time.time()
while itr < self._max_iteration:
"""
pr = cProfile.Profile()
pr.enable()
"""
print "iteration: " + str(itr)
self._process()
self._update()
ppx = self._cal_perplexity_held_out()
if itr > 300:
size = | len(self._avg_log)
ppx = (1-1.0/(itr-300)) * self._avg_log[size-1] + 1.0/(itr-300) * ppx
self._avg_log.append(ppx)
else:
self._avg_log.append(ppx)
self._timing.append(time.time()-start)
if itr % 50 == 0:
self._save()
itr += 1
"""
pr.disable()
s = StringIO.StringIO()
sortby = 'cumulative'
ps = pstats.Stats(pr, stream=s).sort_stats(sortby)
ps.print_stats()
print s.getvalue()
"""
def _save(self):
f = open('ppx_gibbs_sampler.txt', 'wb')
for i in range(0, len(self._avg_log)):
f.write(str(math.exp(self._avg_log[i])) + "\t" + str(self._timing[i]) +"\n")
f.close()
def _update(self):
# update pi
for i in range(0, self._N):
for j in range(0, self._K):
self._pi[i][j] = self.num_n_k[i][j]/(1.0 *np.sum(self.num_n_k[i]))
# update beta
for k in range(0, self._K):
self._beta[k] = (1+self.num_kk[k][0])*1.0/(self.num_kk[k][0]+self.num_kk[k][1]+1)
def _sample_from_distribution(self, p, K):
n = K * K;
temp = np.zeros(n)
cnt = 0
for i in range(0, K):
for j in range(0, K):
temp[cnt] = p[i][j]
cnt += 1
for i in range(1, n):
temp[i] += temp[i-1];
u = random.random() * temp[n-1]
idx = 0
for i in range(0, n):
if u <= temp[i]:
idx = i
break
k1_new = int(idx/K)
k2_new = int(idx%K)
return (k1_new, k2_new)
def _process(self):
"""
run one iteration of gibbs sampling
"""
for i in range(0, self._N):
for j in range(i+1, self._N):
if (i,j) in self._network.get_held_out_set():
continue
y = 0
if (i,j) in self._network.get_linked_edges():
y = 1
# remove current assignment
z_ij_old = self.z[i][j]
z_ji_old = self.z[j][i]
self.num_n_k[i][z_ij_old] -= 1
self.num_n_k[j][z_ji_old] -= 1
if z_ij_old == z_ji_old:
if y == 1:
self.num_kk[z_ij_old][0] -= 1
else:
self.num_kk[z_ij_old][1] -= 1
(k1_new, k2_new) = gibbs_sampler(i,j,self._K,self._epsilon, y,self._alpha,self.num_n_k,self.num_kk, self._eta )
"""
# assign new values.
p = np.zeros((self._K, self._K))
for k1 in range(0, self._K):
for k2 in range(0, self._K):
if k1 != k2:
if y == 1:
term = self._epsilon
else:
term = 1 - self._epsilon
p[k1][k2] = (self._alpha + self.num_n_k[i][k1]) * (self._alpha + self.num_n_k[j][k2])\
* term
else:
if y == 1:
term = (self.num_kk[k1][0] +self._eta[0])/(self.num_kk[k1][0]+self.num_kk[k1][1]\
+self._eta[0]+self._eta[1])
else:
term = (self.num_kk[k1][1] + self._eta[1])/(self.num_kk[k1][0]+self.num_kk[k1][1]\
+self._eta[0]+self._eta[1])
p[k1][k2] = (self._alpha + self.num_n_k[i][k1]) * (self._alpha + self.num_n_k[j][k2])\
* term
(k1_new, k2_new) = self._sample_from_distribution(p, self._K)
"""
self.z[i][j] = k1_new
self.z[j][i] = k2_new
self.num_n_k[i][k1_new] += 1
self.num_n_k[j][k2_new] += 1
if k1_new == k2_new:
if y == 1:
self.num_kk[k1_new][0] += 1
else:
self.num_kk[k1_new][1] += 1
|
hakanardo/pyvx | setup.py | Python | mit | 1,768 | 0.001131 | from distutils.core import setup, Command
from pyvx import __version__
import sys
class PyTestCommand(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import pytest
errno = pytest.main()
sys.exit(errno)
setup(
name='PyVX',
| description='OpenVX python support',
long_description='''
PyVX is a set of python bindings for `OpenVX`_. `OpenVX`_ is a standard for
expressing computer vision processing algorithms as a graph of function nodes.
This graph is verified once and can then be processed (executed) multiple
times. PyVX allows these graphs to be constructed and interacted with from
python. It also suppor | ts the use of multiple `OpenVX`_ backends, both C and
python backends. It also used to contain a code generating `OpenVX`_ backend
written it python, but it will be moved to a package of it's own (curently
it lives on the try1 branch of pyvx).
Further details are provided in the `Documentation`_
.. _`OpenVX`: https://www.khronos.org/openvx
.. _`Documentation`: https://pyvx.readthedocs.org
''',
version=__version__,
packages=['pyvx', 'pyvx.backend'],
package_data={'pyvx': ['cdefs/vx_api.h',
'cdefs/vx.h',
'cdefs/vx_kernels.h',
'cdefs/vx_nodes.h',
'cdefs/vx_types.h',
'cdefs/vx_vendors.h',
]},
zip_safe=False,
url='http://pyvx.readthedocs.org',
author='Hakan Ardo',
author_email='pyvx@googlegroups.com',
license='MIT',
install_requires=['cffi'],
cmdclass={'test': PyTestCommand},
tests_require=['pytest'],
)
|
justinbot/timecard | timecard/admin/views.py | Python | mit | 885 | 0.00226 | from datetime import datetime
from flask import Blueprint, render_template
from flask_cas import login_required
from timecard.api import current_period_start
from timecard.models import config, admin_required
admin_views = Blueprint('admin', __name__, url_prefix='/admin', template_folder='templates')
@admin_views.route('/')
@admin_views.route('/users', methods=['GET'])
@login_required
@admin_required
de | f admin_users_page():
return render_template(
'admin_users.html',
initial_date=datetime.now().isoformat(), # now, in server's time zone
initial_period_start=current_period_start().isoformat(),
period_duration=config['period_duration'],
lock_date=config['lock_date'],
)
@admin_views.route('/settings')
@login_required
@admin_required |
def admin_settings_page():
return render_template(
'admin_settings.html'
) |
google/clusterfuzz | src/clusterfuzz/_internal/tests/appengine/handlers/testcase_detail/remove_issue_test.py | Python | apache-2.0 | 2,010 | 0.001493 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""remove_issue tests."""
import unittest
import flask
import webtest
from clusterfuzz._internal.datastore import data_types
from clusterfuzz._internal.tests.test_libs import helpers as test_helpers
from clusterfuzz._internal.tests.test_libs import test_utils
from handlers.testcase_detail import remove_issue
from libs import form
@test_utils.with_cloud_emulators( | 'datastore')
class HandlerTest(unittest.TestCase):
"""Test Handler."""
def setUp(self):
test_helpers.patch(self, [
'handlers.testcase_detail.show.get_testcase_detail',
'libs.auth.get_current_user',
'libs.auth.is_current_use | r_admin',
])
self.mock.is_current_user_admin.return_value = True
self.mock.get_testcase_detail.return_value = {'testcase': 'yes'}
self.mock.get_current_user().email = 'test@user.com'
flaskapp = flask.Flask('testflask')
flaskapp.add_url_rule('/', view_func=remove_issue.Handler.as_view('/'))
self.app = webtest.TestApp(flaskapp)
def test_succeed(self):
"""Remove issue from a testcase."""
testcase = data_types.Testcase()
testcase.bug_information = '1234'
testcase.put()
resp = self.app.post_json('/', {
'testcaseId': testcase.key.id(),
'csrf_token': form.generate_csrf_token(),
})
self.assertEqual(200, resp.status_int)
self.assertEqual('yes', resp.json['testcase'])
self.assertEqual('', testcase.key.get().bug_information)
|
kavdev/dj-stripe | djstripe/contrib/rest_framework/permissions.py | Python | mit | 861 | 0.01626 | """
.. module:: dj-stripe.contrib.rest_framework.permissions.
:synopsis: dj-stripe - Permissions to be used with the dj-stripe REST API.
.. moduleauthor:: @kavdev, @pydanny |
"""
from rest_framework.permissions import BasePermission
from ...settings import subscriber_request_callback
from ...utils import subscriber_has_active_subscription
class DJStripeSubscriptionPermission(BasePermission | ):
"""A permission to be used when wanting to permit users with active subscriptions."""
def has_permission(self, request, view):
"""
Check if the subscriber has an active subscription.
Returns false if:
* a subscriber isn't passed through the request
See ``utils.subscriber_has_active_subscription`` for more rules.
"""
try:
subscriber_has_active_subscription(subscriber_request_callback(request))
except AttributeError:
return False
|
vialectrum/vialectrum | electrum_ltc/gui/qt/locktimeedit.py | Python | mit | 5,781 | 0.001557 | # Copyright (C) 2020 The Electrum developers
# Distributed under the MIT software license, see the accompanying
# file LICENCE or http://www.opensource.org/licenses/mit-license.php
import time
from datetime import datetime
from typing import Optional, Any
from PyQt5.QtCore import Qt, QDateTime
from PyQt5.QtGui import QPalette, QPainter
from PyQt5.QtWidgets import (QWidget, QLineEdit, QStyle, QStyleOptionFrame, QComboBox,
QHBoxLayout, QDateTimeEdit)
from electrum_ltc.i18n import _
from electrum_ltc.bitcoin import NLOCKTIME_MIN, NLOCKTIME_MAX, NLOCKTIME_BLOCKHEIGHT_MAX
from .util import char_width_in_lineedit
class LockTimeEdit(QWidget):
def __init__(self, parent=None):
QWidget.__init__(self, parent)
hbox = QHBoxLayout()
self.setLayout(hbox)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.setSpacing(0)
self.locktime_raw_e = LockTimeRawEdit()
self.locktime_height_e = LockTimeHeightEdit()
self.locktime_date_e = LockTimeDateEdit()
self.editors = [self.locktime_raw_e, self.locktime_height_e, self.locktime_date_e]
self.combo = QComboBox()
options = [_("Raw"), _("Block height"), _("Date")]
option_index_to_editor_map = {
0: self.locktime_raw_e,
1: self.locktime_height_e,
2: self.locktime_date_e,
}
default_index = 1
self.combo.addItems(options)
def on_current_index_changed(i):
for w in self.editors:
w.setVisible(False)
w.setEnabled(False)
prev_locktime = self.editor.get_locktime()
self.editor = option_index_to_editor_map[i]
if self.editor.is_acceptable_locktime(prev_lockt | ime):
self.editor.set_locktime(prev_locktime)
self.editor.setVisible(True)
self.editor.set | Enabled(True)
self.editor = option_index_to_editor_map[default_index]
self.combo.currentIndexChanged.connect(on_current_index_changed)
self.combo.setCurrentIndex(default_index)
on_current_index_changed(default_index)
hbox.addWidget(self.combo)
for w in self.editors:
hbox.addWidget(w)
hbox.addStretch(1)
def get_locktime(self) -> Optional[int]:
return self.editor.get_locktime()
def set_locktime(self, x: Any) -> None:
self.editor.set_locktime(x)
class _LockTimeEditor:
min_allowed_value = NLOCKTIME_MIN
max_allowed_value = NLOCKTIME_MAX
def get_locktime(self) -> Optional[int]:
raise NotImplementedError()
def set_locktime(self, x: Any) -> None:
raise NotImplementedError()
@classmethod
def is_acceptable_locktime(cls, x: Any) -> bool:
if not x: # e.g. empty string
return True
try:
x = int(x)
except:
return False
return cls.min_allowed_value <= x <= cls.max_allowed_value
class LockTimeRawEdit(QLineEdit, _LockTimeEditor):
def __init__(self, parent=None):
QLineEdit.__init__(self, parent)
self.setFixedWidth(14 * char_width_in_lineedit())
self.textChanged.connect(self.numbify)
def numbify(self):
text = self.text().strip()
chars = '0123456789'
pos = self.cursorPosition()
pos = len(''.join([i for i in text[:pos] if i in chars]))
s = ''.join([i for i in text if i in chars])
self.set_locktime(s)
# setText sets Modified to False. Instead we want to remember
# if updates were because of user modification.
self.setModified(self.hasFocus())
self.setCursorPosition(pos)
def get_locktime(self) -> Optional[int]:
try:
return int(str(self.text()))
except:
return None
def set_locktime(self, x: Any) -> None:
try:
x = int(x)
except:
self.setText('')
return
x = max(x, self.min_allowed_value)
x = min(x, self.max_allowed_value)
self.setText(str(x))
class LockTimeHeightEdit(LockTimeRawEdit):
max_allowed_value = NLOCKTIME_BLOCKHEIGHT_MAX
def __init__(self, parent=None):
LockTimeRawEdit.__init__(self, parent)
self.setFixedWidth(20 * char_width_in_lineedit())
self.help_palette = QPalette()
def paintEvent(self, event):
super().paintEvent(event)
panel = QStyleOptionFrame()
self.initStyleOption(panel)
textRect = self.style().subElementRect(QStyle.SE_LineEditContents, panel, self)
textRect.adjust(2, 0, -10, 0)
painter = QPainter(self)
painter.setPen(self.help_palette.brush(QPalette.Disabled, QPalette.Text).color())
painter.drawText(textRect, Qt.AlignRight | Qt.AlignVCenter, "height")
class LockTimeDateEdit(QDateTimeEdit, _LockTimeEditor):
min_allowed_value = NLOCKTIME_BLOCKHEIGHT_MAX + 1
def __init__(self, parent=None):
QDateTimeEdit.__init__(self, parent)
self.setMinimumDateTime(datetime.fromtimestamp(self.min_allowed_value))
self.setMaximumDateTime(datetime.fromtimestamp(self.max_allowed_value))
self.setDateTime(QDateTime.currentDateTime())
def get_locktime(self) -> Optional[int]:
dt = self.dateTime().toPyDateTime()
locktime = int(time.mktime(dt.timetuple()))
return locktime
def set_locktime(self, x: Any) -> None:
if not self.is_acceptable_locktime(x):
self.setDateTime(QDateTime.currentDateTime())
return
try:
x = int(x)
except:
self.setDateTime(QDateTime.currentDateTime())
return
dt = datetime.fromtimestamp(x)
self.setDateTime(dt)
|
joharei/QtChordii | settings/settings.py | Python | gpl-3.0 | 2,286 | 0.000875 | # coding: utf-8
# Copyright (C) 2013-2016 Johan Reitan
#
# This file is part of QtChordii.
#
# QtChordii is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at you | r option) any later version.
#
# QtChordii is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with QtChordii. If not, see <http://www.gnu.org/licenses/>.
from PyQt5.QtCore import QSettings, QCoreApplication, QSize, QPoint
APPL | ICATION_NAME = 'QtChordii'
group_main_window = 'MainWindow'
key_size = 'size'
key_pos = 'pos'
key_is_full_screen = 'is_full_screen'
key_splitter_sizes = 'splitter_sizes'
group_project_settings = 'ProjectSettings'
key_project_file = 'project_file'
def set_up_settings():
QCoreApplication.setOrganizationName(APPLICATION_NAME)
QCoreApplication.setApplicationName(APPLICATION_NAME)
def save_window_geometry(size, pos, is_full_screen, splitter_sizes):
settings = QSettings()
settings.beginGroup(group_main_window)
settings.setValue(key_size, size)
settings.setValue(key_pos, pos)
settings.setValue(key_is_full_screen, is_full_screen)
settings.setValue(key_splitter_sizes, splitter_sizes)
settings.endGroup()
def load_window_geometry():
settings = QSettings()
settings.beginGroup(group_main_window)
size = settings.value(key_size, type=QSize)
pos = settings.value(key_pos, type=QPoint)
is_full_screen = settings.value(key_is_full_screen, type=bool)
splitter_sizes = settings.value(key_splitter_sizes, type=int)
settings.endGroup()
return {key_size: size, key_pos: pos, key_is_full_screen: is_full_screen, key_splitter_sizes: splitter_sizes}
def save_project_file(filename):
settings = QSettings()
settings.setValue('/'.join((group_project_settings, key_project_file)), filename)
def load_project_file():
settings = QSettings()
return settings.value('/'.join((group_project_settings, key_project_file)))
|
TuSimple/simpledet | config/resnet_v1b/tridentnet_r152v1bc4_c5_2x.py | Python | apache-2.0 | 9,529 | 0.003883 | from models.tridentnet.builder import TridentFasterRcnn as Detector
from models.tridentnet.builder_v2 import TridentResNetV1bC4 as Backbone
from models.tridentnet.builder import TridentRpnHead as RpnHead
from models.tridentnet.builder import process_branch_outputs, process_branch_rpn_outputs
from symbol.builder import Neck
from symbol.builder import RoiAlign as RoiExtractor
from symbol.builder import BboxC5V1Head as BboxHead
from mxnext.complicate import normalizer_factory
def get_config(is_train):
class General:
log_frequency = 10
name = __name__.rsplit("/")[-1].rsplit(".")[-1]
batch_image = 1 if is_train else 1
fp16 = False
class Trident:
num_branch = 3
train_scaleaware = True
test_scaleaware = True
branch_ids = range(num_branch)
branch_dilates = [1, 2, 3]
valid_ranges = [(0, 90), (30, 160), (90, -1)]
valid_ranges_on_origin = True
branch_bn_shared = True
branch_conv_shared = True
branch_deform = False
assert num_branch == len(branch_ids)
assert num_branch == len(valid_ranges)
class KvstoreParam:
kvstore = "local"
batch_image = General.batch_image
gpus = [0, 1, 2, 3, 4, 5, 6, 7]
fp16 = General.fp16
class NormalizeParam:
# normalizer = normalizer_factory(type="syncbn", ndev=len(KvstoreParam.gpus))
normalizer = normalizer_factory(type="fixbn")
class BackboneParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
depth = 152
num_branch = Trident.num_branch
branch_ids = Trident.branch_ids
branch_dilates = Trident.branch_dilates
branch_bn_shared = Trident.branch_bn_shared
branch_conv_shared = Trident.branch_conv_shared
branch_deform = Trident.branch_deform
class NeckParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
class RpnParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
batch_image = General.batch_image * Trident.num_branch
class anchor_generate:
scale = (2, 4, 8, 16, 32)
ratio = (0.5, 1.0, 2.0)
stride = 16
image_anchor = 256
class head:
conv_channel = 512
mean = (0, 0, 0, 0)
std = (1, 1, 1, 1)
class proposal:
pre_nms_top_n = 12000 if is_train else 6000
post_nms_top_n = 500 if is_train else 300
nms_thr = 0.7
min_bbox_side = 0
class subsample_proposal:
proposal_wo_gt = True
image_roi = 128
fg_fraction = 0.5
fg_thr = 0.5
bg_thr_hi = 0.5
bg_thr_lo = 0.0
class bbox_target:
num_reg_class = 2
class_agnostic = True
weight = (1.0, 1.0, 1.0, 1.0)
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class BboxParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
num_class = 1 + 80
image_roi = 128
batch_image = General.batch_image * Trident.num_branch
class regress_target:
class_agnostic = True
mean = (0.0, 0.0, 0.0, 0.0)
std = (0.1, 0.1, 0.2, 0.2)
class RoiParam:
fp16 = General.fp16
normalizer = NormalizeParam.normalizer
out_size = 7
stride = 16
class DatasetParam:
if is_train:
image_set = ("coco_train2017", )
else:
image_set = ("coco_val2017", )
backbone = Backbone(BackboneParam)
neck = Neck(NeckParam)
rpn_head = RpnHead(RpnParam)
roi_extractor = RoiExtractor(RoiParam)
bbox_head = BboxHead(BboxParam)
detector = Detector()
if is_train:
train_sym = detector.get_train_symbol(
backbone, neck, rpn_head, roi_extractor, bbox_head,
num_branch=Trident.num_branch, scaleaware=Trident.train_scaleaware)
rpn_test_sym = None
test_sym = None
else:
train_sym = None
rpn_test_sym = detector.get_rpn_test_symbol(backbone, neck, rpn_head, Trident.num_branch)
test_sym = detector.get_test_symbol(
backbone, neck, rpn_head, roi_extractor, bbox_head, num_branch=Trident.num_branch)
class ModelParam:
train_symbol = train_sym
test_symbol = test_sym
rpn_test_symbol = rpn_test_sym
from_scratch = False
random = True
memonger = False
memonger_until = "stage3_unit21_plus"
class pretrain:
prefix = "pretrain_model/resnet%s_v1b" % BackboneParam.depth
epoch = 0
fixed_param = ["conv0", "stage1", "gamma", "beta"]
class OptimizeParam:
class optimizer:
type = "sgd"
lr = 0.01 / 8 * len(KvstoreParam.gpus) * KvstoreParam.batch_image
momentum = 0.9
wd = 0.0001
clip_gradient = 5
class schedule:
begin_epoch = 0
end_epoch = 12
lr_iter = [120000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image),
160000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)]
class warmup:
type = "gradual"
lr = 0.0
iter = 3000 * 16 // (len(KvstoreParam.gpus) * KvstoreParam.batch_image)
class TestParam:
min_det_score = 0.001
max_det_per_image = 100
process_roidb = lambda x: x
if Trident.test_scaleaware:
process_output = lambda x, y: process_branch_outputs(
x, Trident.num_branch, Trident.valid_ranges, Trident.valid_ranges_on_origin)
| else:
process_output = lambda x, y: x
process_rpn_output = lambda x, y: process_branch_rpn_outputs(x, Trident.num_branch)
class model:
prefix = "experiments/{}/checkpoint".format(General.name)
epoch = OptimizeParam.schedule.end_epoch
class nms:
type = "nms"
thr = 0.5
class coco:
annotation = "data/coco/a | nnotations/instances_minival2014.json"
# data processing
class NormParam:
mean = tuple(i * 255 for i in (0.485, 0.456, 0.406)) # RGB order
std = tuple(i * 255 for i in (0.229, 0.224, 0.225))
class ResizeParam:
short = 800
long = 1200 if is_train else 2000
class PadParam:
short = 800
long = 1200 if is_train else 2000
max_num_gt = 100
class ScaleRange:
valid_ranges = Trident.valid_ranges
cal_on_origin = Trident.valid_ranges_on_origin # True: valid_ranges on origin image scale / valid_ranges on resized image scale
class AnchorTarget2DParam:
class generate:
short = 800 // 16
long = 1200 // 16
stride = 16
scales = (2, 4, 8, 16, 32)
aspects = (0.5, 1.0, 2.0)
class assign:
allowed_border = 0
pos_thr = 0.7
neg_thr = 0.3
min_pos_thr = 0.0
class sample:
image_anchor = 256
pos_fraction = 0.5
class trident:
invalid_anchor_threshd = 0.3
class RenameParam:
mapping = dict(image="data")
from core.detection_input import ReadRoiRecord, Resize2DImageBbox, \
ConvertImageFromHwcToChw, Flip2DImageBbox, Pad2DImageBbox, \
RenameRecord, Norm2DImage
from models.tridentnet.input import ScaleAwareRange, TridentAnchorTarget2D
if is_train:
transform = [
ReadRoiRecord(None),
Norm2DImage(NormParam),
Resize2DImageBbox(ResizeParam),
Flip2DImageBbox(),
Pad2DImageBbox(PadParam),
ConvertImageFromHwcToChw(),
ScaleAwareRange(ScaleRange),
TridentAnchorTarget2D(AnchorTarget2DParam),
RenameRecord(RenameParam.mapping)
]
data_name = ["data", "im_info", "gt_bbox"]
if Trident.train_scaleaw |
takinbo/rapidsms-borno | apps/bednets/tests.py | Python | lgpl-3.0 | 18,579 | 0.007159 | from rapidsms.tests.scripted import TestScript
from apps.form.models import *
from apps.reporters.models import *
import apps.reporters.app as reporter_app
import apps.supply.app as supply_app
import apps.form.app as form_app
import apps.default.app as default_app
from app import App
from django.core.management.commands.dumpdata import Command
import time
import random
import os
from datetime import datetime
class TestApp (TestScript):
#apps = (reporter_app.App, App,form_app.App, supply_app.App, default_app.App )
apps = (reporter_app.App, App,form_app.App, supply_app.App )
# the test_backend script does the loading of the dummy backend that allows reporters
# to work properly in tests
fixtures = ['nigeria_llin', 'test_kano_locations', 'test_backend']
def setUp(self):
TestScript.setUp(self)
def testFixtures(self):
self._testKanoLocations()
self._testForms()
self._testRoles()
def testScript(self):
a = """
8005551219 > llin register 20 dl crummy user
8005551219 < Hello crummy! You are now registered as Distribution point team leader at KANO State.
"""
self.runScript(a)
# this should succeed because we just created him
reporters = Reporter.objects.all()
Reporter.objects.get(alias="cuser")
dict = {"alias":"fail"}
# make sure checking a non-existant user fails
self.assertRaises(Reporter.DoesNotExist, Reporter.objects.get, **dict)
testRegistration = """
8005551212 > llin my status
8005551212 < Please register your phone with RapidSMS.
8005551212 > llin register 20 dl dummy user
8005551212 < Hello dummy! You are now registered as Distribution point team leader at KANO State.
8005551212 > llin my status
8005551212 < I think you are dummy user.
#duplicate submission
test_reg_dup > llin register 20 dl duplicate user
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader | at KANO State.
# this one should be a duplicate
test_reg_dup > llin register 20 dl duplicate user
test_reg_dup < Hello again duplicate! You are already registered as a Distribution point | team leader at KANO State.
# but all of these should create a new registration
test_reg_dup > llin register 20 dl duplicate user withanothername
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 dl duplicate userlonger
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 dl duplicated user
test_reg_dup < Hello duplicated! You are now registered as Distribution point team leader at KANO State.
test_reg_dup > llin register 20 sm duplicate user
test_reg_dup < Hello duplicate! You are now registered as Stock manager at KANO State.
test_reg_dup > llin register 2001 dl duplicate user
test_reg_dup < Hello duplicate! You are now registered as Distribution point team leader at AJINGI LGA.
# case sensitivity
test_reg_2 > llin REGISTER 20 dl another user
test_reg_2 < Hello another! You are now registered as Distribution point team leader at KANO State.
# different name formats
test_reg_3 > llin register 20 dl onename
test_reg_3 < Hello onename! You are now registered as Distribution point team leader at KANO State.
# these fail
test_reg_4 > llin register 20 dl mister three names
test_reg_4 < Hello mister! You are now registered as Distribution point team leader at KANO State.
test_reg_5 > llin register 20 dl mister four name guy
test_reg_5 < Hello mister! You are now registered as Distribution point team leader at KANO State.
# some other spellings
test_reg_short > llin regstr 20 dl short user
test_reg_short < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_short_2 > llin regs 20 dl short user
test_reg_short_2 < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_short_3 > llin reg 20 dl short user
test_reg_short_3 < Hello short! You are now registered as Distribution point team leader at KANO State.
test_reg_long > llin registered 20 dl long user
test_reg_long < Hello long! You are now registered as Distribution point team leader at KANO State.
# extra spaces
test_reg_8 > llin register 20 dl space guy
test_reg_8 < Hello space! You are now registered as Distribution point team leader at KANO State.
# new tests for more flexible roles
test_reg_dl > llin register 20 dl distribution leader
test_reg_dl < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_2 > llin register 20 ds distribution leader
test_reg_dl_2 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_3 > llin register 20 dm distribution leader
test_reg_dl_3 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_dl_4 > llin register 20 dp distribution leader
test_reg_dl_4 < Hello distribution! You are now registered as Distribution point team leader at KANO State.
test_reg_lf > llin register 20 lf lga focal person
test_reg_lf < Hello lga! You are now registered as LGA focal person at KANO State.
test_reg_lf > llin register 20 lp lga focal person
test_reg_lf < Hello again lga! You are already registered as a LGA focal person at KANO State.
# alas, we're not perfect
test_reg_fail > llin rgstr 20 dl sorry guy
test_reg_fail < Sorry we didn't understand that. Available forms are LLIN: REGISTER, NETCARDS, NETS, RECEIVE, ISSUE
"""
testRegistrationErrors = """
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 45 DL hello world
12345 < Invalid form. 45 not in list of location codes
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 20 pp hello world
12345 < Invalid form. Unknown role code: pp
12345 > llin my status
12345 < Please register your phone with RapidSMS.
12345 > llin register 6803 AL hello world
12345 < Invalid form. 6803 not in list of location codes. Unknown role code: AL
12345 > llin my status
12345 < Please register your phone with RapidSMS.
"""
testKeyword= """
tkw_1 > llin register 20 dl keyword tester
tkw_1 < Hello keyword! You are now registered as Distribution point team leader at KANO State.
# base case
tkw_1 > llin nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# capitalize the domain
tkw_1 > LLIN nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# drop an L
tkw_1 > lin nets 2001 123 456 78 90
tkw_1 < Thank you keyword. Received report for LLIN NETS: location=AJINGI, distributed=123, expected=456, actual=78, discrepancy=90
# mix the order - this is no longer supported
#tkw_1 > ILLn nets 2001 123 456 78 90
#tkw_1 < Thank you keyword. Received report for LLIN NETS |
citrix-openstack-build/tempest | tempest/api/compute/admin/test_flavors_access.py | Python | apache-2.0 | 5,575 | 0 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2013 NEC Corporation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
| #
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempe | st.api import compute
from tempest.api.compute import base
from tempest.common.utils.data_utils import rand_int_id
from tempest.common.utils.data_utils import rand_name
from tempest import exceptions
from tempest.test import attr
class FlavorsAccessTestJSON(base.BaseComputeAdminTest):
"""
Tests Flavor Access API extension.
Add and remove Flavor Access require admin privileges.
"""
_interface = 'json'
@classmethod
def setUpClass(cls):
super(FlavorsAccessTestJSON, cls).setUpClass()
if not compute.FLAVOR_EXTRA_DATA_ENABLED:
msg = "FlavorExtraData extension not enabled."
raise cls.skipException(msg)
cls.client = cls.os_adm.flavors_client
admin_client = cls._get_identity_admin_client()
resp, tenants = admin_client.list_tenants()
cls.tenant_id = [tnt['id'] for tnt in tenants if tnt['name'] ==
cls.flavors_client.tenant_name][0]
cls.flavor_name_prefix = 'test_flavor_access_'
cls.ram = 512
cls.vcpus = 1
cls.disk = 10
@attr(type='gate')
def test_flavor_access_add_remove(self):
# Test to add and remove flavor access to a given tenant.
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
# Add flavor access to a tenant.
resp_body = {
"tenant_id": str(self.tenant_id),
"flavor_id": str(new_flavor['id']),
}
add_resp, add_body = \
self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
self.assertEqual(add_resp.status, 200)
self.assertIn(resp_body, add_body)
# The flavor is present in list.
resp, flavors = self.flavors_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
self.assertIn(new_flavor['id'], map(lambda x: x['id'], flavors))
# Remove flavor access from a tenant.
remove_resp, remove_body = \
self.client.remove_flavor_access(new_flavor['id'], self.tenant_id)
self.assertEqual(remove_resp.status, 200)
self.assertNotIn(resp_body, remove_body)
# The flavor is not present in list.
resp, flavors = self.flavors_client.list_flavors_with_detail()
self.assertEqual(resp.status, 200)
self.assertNotIn(new_flavor['id'], map(lambda x: x['id'], flavors))
@attr(type=['negative', 'gate'])
def test_flavor_non_admin_add(self):
# Test to add flavor access as a user without admin privileges.
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
self.assertRaises(exceptions.Unauthorized,
self.flavors_client.add_flavor_access,
new_flavor['id'],
self.tenant_id)
@attr(type=['negative', 'gate'])
def test_flavor_non_admin_remove(self):
# Test to remove flavor access as a user without admin privileges.
flavor_name = rand_name(self.flavor_name_prefix)
new_flavor_id = rand_int_id(start=1000)
resp, new_flavor = self.client.create_flavor(flavor_name,
self.ram, self.vcpus,
self.disk,
new_flavor_id,
is_public='False')
self.addCleanup(self.client.delete_flavor, new_flavor['id'])
# Add flavor access to a tenant.
self.client.add_flavor_access(new_flavor['id'], self.tenant_id)
self.addCleanup(self.client.remove_flavor_access,
new_flavor['id'], self.tenant_id)
self.assertRaises(exceptions.Unauthorized,
self.flavors_client.remove_flavor_access,
new_flavor['id'],
self.tenant_id)
class FlavorsAdminTestXML(FlavorsAccessTestJSON):
_interface = 'xml'
|
redhat-openstack/ironic | ironic/tests/drivers/test_ipmitool.py | Python | apache-2.0 | 85,048 | 0 | # coding=utf-8
# Copyright 2012 Hewlett-Packard Development Company, L.P.
# Copyright (c) 2012 NTT DOCOMO, INC.
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Test class for IPMITool driver module."""
import os
import stat
import subprocess
import tempfile
import time
import types
import mock
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_utils import uuidutils
import six
from ironic.common import boot_devices
from ironic.common import driver_factory
from ironic.common import exception
from ironic.common import states
from ironic.common import utils
from ironic.conductor import task_manager
from ironic.drivers.modules import console_utils
from ironic.drivers.modules import ipmitool as ipmi
from ironic.drivers import utils as driver_utils
from ironic.tests import base
from ironic.tests.conductor import utils as mgr_utils
from ironic.tests.db import base as db_base
from ironic.tests.db import utils as db_utils
from ironic.tests.objects import utils as obj_utils
CONF = cfg.CONF
CONF.import_opt('min_command_interval',
'ironic.drivers.modules.ipminative',
group='ipmi')
INFO_DICT = db_utils.get_test_ipmi_info()
# BRIDGE_INFO_DICT will have all the bridging parameters appended
BRIDGE_INFO_DICT = INFO_DICT.copy()
BRIDGE_INFO_DICT.update(db_utils.get_test_ipmi_bridging_parameters())
class IPMIToolCheckInitTestCase(base.TestCase):
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_power_init_calls(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
ipmi.IPMIPower()
mock_support.assert_called_with(mock.ANY)
mock_check_dir.assert_called_once_with()
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_power_init_calls_raises_1(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
mock_check_dir.side_effect = iter(
[exception.PathNotFound(dir="foo_dir")])
self.assertRaises(exception.PathNotFound, ipmi.IPMIPower)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_power_init_calls_raises_2(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
mock_check_dir.side_effect = iter(
[exception.DirectoryNotWritable(dir="foo_dir")])
self.assertRaises(exception.DirectoryNotWritable, ipmi.IPMIPower)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_power_init_calls_raises_3(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
mock_check_dir.side_effect = iter([exception.InsufficientDiskSpace(
path="foo_dir", required=1, actual=0)])
self.assertRaises(exception.InsufficientDiskSpace, ipmi.IPMIPower)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_power_init_calls_already_checked(self,
mock_check_dir,
mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = True
ipmi.IPMIPower()
mock_support.assert_called_with(mock.ANY)
self.assertEqual(0, mock_check_dir.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_management_init_calls(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
ipmi.IPMIManagement()
mock_support.assert_called_with(mock.ANY)
mock_check_dir.assert_called_once_with()
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_management_init_calls_already_checked(self,
mock_check_dir,
mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = False
ipmi.IPMIManagement()
mock_support.assert_called_with(mock.ANY)
self.assertEqual(0, mock_check_dir.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_vendor_passthru_init_calls(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
ipmi.VendorPassthru()
mock_support.assert_called_with(mock.ANY)
mock_check_dir.assert_called_once_with()
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_vendor_passthru_init_calls_already_checked(self,
mock_check_dir,
| mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = True
ipmi.VendorPassthru()
mock_support.assert_called_with(mock.ANY)
self.assertEqu | al(0, mock_check_dir.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_console_init_calls(self, mock_check_dir, mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = None
ipmi.IPMIShellinaboxConsole()
mock_support.assert_called_with(mock.ANY)
mock_check_dir.assert_called_once_with()
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(utils, 'check_dir', autospec=True)
def test_console_init_calls_already_checked(self,
mock_check_dir,
mock_support):
mock_support.return_value = True
ipmi.TMP_DIR_CHECKED = True
ipmi.IPMIShellinaboxConsole()
mock_support.assert_called_with(mock.ANY)
self.assertEqual(0, mock_check_dir.call_count)
@mock.patch.object(ipmi, '_is_option_supported', autospec=True)
@mock.patch.object(subprocess, 'check_call', autospec=True)
class IPMIToolCheckOptionSupportedTestCase(base.TestCase):
def test_check_timing_pass(self, mock_chkcall, mock_support):
mock_chkcall.return_value = (None, None)
mock_support.return_value = None
expected = [mock.call('timing'),
mock.call('timing', True)]
ipmi._check_option_support(['timing'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.call_args_list)
def test_check_timing_fail(self, mock_chkcall, mock_support):
mock_chkcall.side_effect = iter(
[subprocess.CalledProcessError(1, 'ipmitool')])
mock_support.return_value = None
expected = [mock.call('timing'),
mock.call('timing', False)]
ipmi._check_option_support(['timing'])
self.assertTrue(mock_chkcall.called)
self.assertEqual(expected, mock_support.cal |
mikalstill/nova | nova/tests/unit/api/openstack/compute/test_server_metadata.py | Python | apache-2.0 | 31,012 | 0.000161 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_serialization import jsonutils
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
import six
import webob
from nova.api.openstack.compute import server_metadata \
| as server_metadata_v21
from nova.compute import vm_states
import nova.db.api
from nova import exception
from nova import test
from nova.tests.unit.api.openstack import fakes
CONF = cfg.CONF
def return_create_instance_metadata_max(context, server_id, metadata, delete):
return stub_max_server_metadata()
def return_create_instance_metadata(context, server_id, metadata, delete):
return stub_server_metadata()
def fake_instance_save(inst, **kwargs):
| inst.metadata = stub_server_metadata()
inst.obj_reset_changes()
def return_server_metadata(context, server_id):
if not isinstance(server_id, six.string_types) or not len(server_id) == 36:
msg = 'id %s must be a uuid in return server metadata' % server_id
raise Exception(msg)
return stub_server_metadata()
def return_empty_server_metadata(context, server_id):
return {}
def delete_server_metadata(context, server_id, key):
pass
def stub_server_metadata():
metadata = {
"key1": "value1",
"key2": "value2",
"key3": "value3",
}
return metadata
def stub_max_server_metadata():
metadata = {"metadata": {}}
for num in range(CONF.quota.metadata_items):
metadata['metadata']['key%i' % num] = "blah"
return metadata
def return_server_nonexistent(context, server_id,
columns_to_join=None, use_slave=False):
raise exception.InstanceNotFound(instance_id=server_id)
def fake_change_instance_metadata(self, context, instance, diff):
pass
class ServerMetaDataTestV21(test.TestCase):
validation_ex = exception.ValidationError
validation_ex_large = validation_ex
def setUp(self):
super(ServerMetaDataTestV21, self).setUp()
metadata = stub_server_metadata()
self.stub_out('nova.compute.api.API.get',
fakes.fake_compute_get(
**{'uuid': '0cc3346e-9fef-4445-abe6-5d2b2690ec64',
'name': 'fake',
'launched_at': timeutils.utcnow(),
'vm_state': vm_states.ACTIVE,
'metadata': metadata}))
self.stub_out('nova.db.api.instance_metadata_get',
return_server_metadata)
self.stub_out(
'nova.compute.rpcapi.ComputeAPI.change_instance_metadata',
fake_change_instance_metadata)
self._set_up_resources()
def _set_up_resources(self):
self.controller = server_metadata_v21.ServerMetadataController()
self.uuid = uuids.fake
self.url = '/fake/servers/%s/metadata' % self.uuid
def _get_request(self, param_url=''):
return fakes.HTTPRequestV21.blank(self.url + param_url)
def test_index(self):
req = self._get_request()
res_dict = self.controller.index(req, self.uuid)
expected = {
'metadata': {
'key1': 'value1',
'key2': 'value2',
'key3': 'value3',
},
}
self.assertEqual(expected, res_dict)
def test_index_nonexistent_server(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_server_nonexistent)
req = self._get_request()
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.index, req, self.url)
def test_index_no_data(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_empty_server_metadata)
req = self._get_request()
res_dict = self.controller.index(req, self.uuid)
expected = {'metadata': {}}
self.assertEqual(expected, res_dict)
def test_show(self):
req = self._get_request('/key2')
res_dict = self.controller.show(req, self.uuid, 'key2')
expected = {"meta": {'key2': 'value2'}}
self.assertEqual(expected, res_dict)
def test_show_nonexistent_server(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_server_nonexistent)
req = self._get_request('/key2')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key2')
def test_show_meta_not_found(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_empty_server_metadata)
req = self._get_request('/key6')
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.show, req, self.uuid, 'key6')
def test_delete(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_server_metadata)
self.stub_out('nova.db.api.instance_metadata_delete',
delete_server_metadata)
req = self._get_request('/key2')
req.method = 'DELETE'
res = self.controller.delete(req, self.uuid, 'key2')
self.assertIsNone(res)
def test_delete_nonexistent_server(self):
req = self._get_request('/key1')
req.method = 'DELETE'
with mock.patch('nova.compute.api.API.get',
side_effect=exception.InstanceNotFound(
instance_id=self.uuid)):
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key1')
def test_delete_meta_not_found(self):
self.stub_out('nova.db.api.instance_metadata_get',
return_empty_server_metadata)
req = self._get_request('/key6')
req.method = 'DELETE'
self.assertRaises(webob.exc.HTTPNotFound,
self.controller.delete, req, self.uuid, 'key6')
def test_create(self):
self.stub_out('nova.objects.Instance.save', fake_instance_save)
req = self._get_request()
req.method = 'POST'
req.content_type = "application/json"
body = {"metadata": {"key9": "value9"}}
req.body = jsonutils.dump_as_bytes(body)
res_dict = self.controller.create(req, self.uuid, body=body)
body['metadata'].update({
"key1": "value1",
"key2": "value2",
"key3": "value3",
})
self.assertEqual(body, res_dict)
def test_create_empty_body(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request()
req.method = 'POST'
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=None)
def test_create_item_empty_key(self):
self.stub_out('nova.db.api.instance_metadata_update',
return_create_instance_metadata)
req = self._get_request('/key1')
req.method = 'PUT'
body = {"metadata": {"": "value1"}}
req.body = jsonutils.dump_as_bytes(body)
req.headers["content-type"] = "application/json"
self.assertRaises(self.validation_ex,
self.controller.create, req, self.uuid, body=body)
def test_create_item_non_dict(self):
self.stub_out('nova.db.api.instance_metadata_update',
|
beallej/event-detection | WebApp/EventDetectionWeb.py | Python | mit | 2,619 | 0.006491 | import sys; import os
sys.path.insert(0, os.path.abspath('..'))
sys.path.insert(0, os.pa | th.abspath('.'))
from flask import Flask, render_template, request, redirect
import subprocess
from Utils import subprocess_helpers
from Ut | ils.DataSource import *
app = Flask(__name__)
dataSource = DataSource()
def launch_preprocessors():
process = subprocess.Popen(
subprocess_helpers.python_path + " Daemons/QueryProcessorDaemon.py && " + subprocess_helpers.python_path + " Daemons/ArticleProcessorDaemon.py",
executable=subprocess_helpers.executable, shell=True, universal_newlines=True)
@app.route("/", methods=["GET"])
def queries():
# Get lists of query from database with counts of associated articles
all_queries = dataSource.queries_route()
queries_formatted = [{"id": q[0], "subject": q[1], "verb": q[2], "direct_obj": q[3], "indirect_obj": q[4],
"loc": q[5], "article_count": q[6]} for q in all_queries]
return render_template("queries.html", queries=queries_formatted)
@app.route("/query", methods=["POST"])
def new_query():
# TODO: server side validation
subject = request.form["subject"]
verb = request.form["verb"]
direct_obj = request.form["direct-object"]
indirect_obj = request.form["indirect-object"]
loc = request.form["location"]
email = request.form["user-email"]
phone = request.form["user-phone"]
# Put into database
dataSource.new_query(email, phone, subject, verb, direct_obj, indirect_obj, loc)
return redirect("/")
@app.route("/query/<query_id>", methods=["GET"])
def query(query_id):
# find query by id
# if we don't find a query with that id, 404
articles, db_query = dataSource.query_route(query_id)
if db_query is not None:
articles_formatted = [{"title": a[0], "source": a[1], "url": a[2]} for a in articles]
query_formatted = {"id": db_query[0], "subject": db_query[1], "verb": db_query[2],
"direct_obj": db_query[3], "indirect_obj": db_query[4], "loc": db_query[5]}
return render_template("query.html", query=query_formatted, articles=articles_formatted)
return render_template("404.html"), 404
@app.route("/articles", methods=["GET"])
def articles():
articles = dataSource.articles_route()
articles_formatted = [{"title": a[0], "source": a[1], "url": a[2]} for a in articles]
return render_template("articles.html", articles=articles_formatted)
@app.errorhandler(404)
def page_not_found(e):
return render_template("404.html"), 404
if __name__ == "__main__":
app.run(debug=True)
|
KiChjang/servo | tests/wpt/web-platform-tests/tools/wptserve/wptserve/config.py | Python | mpl-2.0 | 11,668 | 0.0012 | import copy
import logging
import os
from collections import defaultdict
from collections.abc import Mapping
from . import sslutils
from .utils import get_port
_renamed_props = {
"host": "browser_host",
"bind_hostname": "bind_address",
"external_host": "server_host",
"host_ip": "server_host",
}
def _merge_dict(base_dict, override_dict):
rv = base_dict.copy()
for key, value in base_dict.items():
if key in override_dict:
if isinstance(value, dict):
rv[key] = _merge_dict(value, override_dict[key])
else:
rv[key] = override_dict[key]
return rv
class Config(Mapping):
"""wptserve config
Inherits from Mapping for backwards compatibility with the old dict-based config"""
def __init__(self, logger_name, data):
self.__dict__["_logger_name"] = logger_name
self.__dict__.update(data)
def __str__(self):
return str(self.__dict__)
def __setattr__(self, key, value):
raise ValueError("Config is immutable")
def __setitem__(self, key):
raise ValueError("Config is immutable")
def __getitem__(self, key):
try:
return getattr(self, key)
except AttributeError:
raise ValueError
def __contains__(self, key):
return key in self.__dict__
def __iter__(self):
return (x for x in self.__dict__ if not x.startswith("_"))
def __len__(self):
return len([item for item in self])
@property
def logger(self):
logger = logging.getLogger(self._logger_name)
logger.setLevel(self.log_level.upper())
return logger
def as_dict(self):
return json_types(self.__dict__)
# Environment variables are limited in size so we need to prune the most egregious contributors
# to size, the origin policy subdomains.
def as_dict_for_wd_env_variable(self):
result = self.as_dict()
for key in [
("subdomains",),
("domains", "alt"),
("domains", ""),
("all_domains", "alt"),
("all_do | mains", ""),
("domains_set",),
("all_domains_set",)
]:
target = result
for part in key[:-1]:
target = target[part]
value = target[key[-1]]
if isinstance(value, dict):
target[key[-1]] = {k:v for (k,v) i | n value.items() if not k.startswith("op")}
else:
target[key[-1]] = [x for x in value if not x.startswith("op")]
return result
def json_types(obj):
if isinstance(obj, dict):
return {key: json_types(value) for key, value in obj.items()}
if (isinstance(obj, str) or
isinstance(obj, int) or
isinstance(obj, float) or
isinstance(obj, bool) or
obj is None):
return obj
if isinstance(obj, list) or hasattr(obj, "__iter__"):
return [json_types(value) for value in obj]
raise ValueError
class ConfigBuilder(object):
"""Builder object for setting the wptsync config.
Configuration can be passed in as a dictionary to the constructor, or
set via attributes after construction. Configuration options must match
the keys on the _default class property.
The generated configuration is obtained by using the builder
object as a context manager; this returns a Config object
containing immutable configuration that may be shared between
threads and processes. In general the configuration is only valid
for the context used to obtain it.
with ConfigBuilder() as config:
# Use the configuration
print config.browser_host
The properties on the final configuration include those explicitly
supplied and computed properties. The computed properties are
defined by the computed_properties attribute on the class. This
is a list of property names, each corresponding to a _get_<name>
method on the class. These methods are called in the order defined
in computed_properties and are passed a single argument, a
dictionary containing the current set of properties. Thus computed
properties later in the list may depend on the value of earlier
ones.
"""
_default = {
"browser_host": "localhost",
"alternate_hosts": {},
"doc_root": os.path.dirname("__file__"),
"server_host": None,
"ports": {"http": [8000]},
"check_subdomains": True,
"log_level": "debug",
"bind_address": True,
"ssl": {
"type": "none",
"encrypt_after_connect": False,
"none": {},
"openssl": {
"openssl_binary": "openssl",
"base_path": "_certs",
"password": "web-platform-tests",
"force_regenerate": False,
"duration": 30,
"base_conf_path": None
},
"pregenerated": {
"host_key_path": None,
"host_cert_path": None,
},
},
"aliases": []
}
default_config_cls = Config
# Configuration properties that are computed. Each corresponds to a method
# _get_foo, which is called with the current data dictionary. The properties
# are computed in the order specified in the list.
computed_properties = ["log_level",
"paths",
"server_host",
"ports",
"domains",
"not_domains",
"all_domains",
"domains_set",
"not_domains_set",
"all_domains_set",
"ssl_config"]
def __init__(self,
logger=None,
subdomains=set(),
not_subdomains=set(),
config_cls=None,
**kwargs):
self._data = self._default.copy()
self._ssl_env = None
self._config_cls = config_cls or self.default_config_cls
if logger is None:
self._logger_name = "web-platform-tests"
else:
level_name = logging.getLevelName(logger.level)
if level_name != "NOTSET":
self.log_level = level_name
self._logger_name = logger.name
for k, v in self._default.items():
self._data[k] = kwargs.pop(k, v)
self._data["subdomains"] = subdomains
self._data["not_subdomains"] = not_subdomains
for k, new_k in _renamed_props.items():
if k in kwargs:
self.logger.warning(
"%s in config is deprecated; use %s instead" % (
k,
new_k
)
)
self._data[new_k] = kwargs.pop(k)
if kwargs:
raise TypeError("__init__() got unexpected keyword arguments %r" % (tuple(kwargs),))
def __setattr__(self, key, value):
if not key[0] == "_":
self._data[key] = value
else:
self.__dict__[key] = value
@property
def logger(self):
logger = logging.getLogger(self._logger_name)
logger.setLevel(self._data["log_level"].upper())
return logger
def update(self, override):
"""Load an overrides dict to override config values"""
override = override.copy()
for k in self._default:
if k in override:
self._set_override(k, override.pop(k))
for k, new_k in _renamed_props.items():
if k in override:
self.logger.warning(
"%s in config is deprecated; use %s instead" % (
k,
new_k
)
)
self._set_override(new_k, override.pop(k))
if override:
k = next(iter(override))
raise KeyError("unknown config override '%s'" % k)
def _set_override(self, k, v):
old_v = self |
Haynie-Research-and-Development/jarvis | deps/lib/python3.4/site-packages/pywink/devices/robot.py | Python | gpl-2.0 | 782 | 0 | from pywink.devices.base import WinkDevice
class WinkRobot(WinkDevice):
"""
Represents a Wink robot.
"""
def __init__(self, device_state_as_json, api_interface):
| super(WinkRobot, self).__init__(device_state_as_json, api_interface)
self._available = True
self._capability = "fired"
self._unit = None
def state(self):
return self._last_reading.get(self.capability(), False)
def available(self):
"""
Robots are virtual therefo | re they don't have a connection status
always return True.
"""
return self._available
def unit(self):
# Robots are a boolean sensor, they have no unit.
return self._unit
def capability(self):
return self._capability
|
Perkville/django-tastypie | tests/profilingtests/urls.py | Python | bsd-3-clause | 252 | 0 | from django.conf.urls import include, url
from tastypie.api import Api
from .resources import NoteResourc | e, UserResource
api = Api()
api.register(NoteResource())
api.register(UserResource())
|
urlpatterns = [
url(r'^api/', include(api.urls)),
]
|
suqinhuang/tp-qemu | qemu/tests/cpu_device_hotplug.py | Python | gpl-2.0 | 4,072 | 0.000246 | import logging
import re
import time
from autotest.client.shared import error
from virttest import utils_misc
@error.context_aware
def run(test, params, env):
"""
Runs vCPU hotplug tests based on CPU device:
"""
def hotplug(vm, current_cpus, total_cpus, vcpu_threads):
for cpu in range(current_cpus, total_cpus):
error.context("hot-pluging vCPU %s" % cpu, logging.info)
vm.hotplug_vcpu(cpu_id=cpu, plug_command=hotplug_cmd)
time.sleep(0.1)
time.sleep(5)
def hotunplug(vm, current_cpus, total_cpus, vcpu_threads):
for cpu in range(current_cpus, total_cpus):
error.context("hot-unpluging vCPU %s" % cpu, logging.info)
vm.hotplug_vcpu(cpu_id=cpu, plug_command=unplug_cmd, unplug="yes")
time.sleep(0.1)
# Need more time to unplug, so sleeping more than hotplug.
time.sleep(10)
def verify(vm, total_cpus):
output = vm.monitor.send_args_cmd("info cpus")
logging.debug("Output of info CPUs:\n%s", output)
cpu_regexp = re.compile("CPU #(\d+)")
total_cpus_monitor = len(cpu_regexp.findall(output))
if total_cpus_monitor != total_cpus:
raise error.TestFail("Monitor reports %s CPUs, when VM should have"
" %s" % (total_cpus_monitor, total_cpus))
error.context("hotplugging finished, let's wait a few sec and"
" check CPUs quantity in guest.", logging.info)
if not utils_misc.wait_for(lambda: utils_misc.check_if_vm_vcpu_match(
total_cpus, vm),
60 + total_cpus, first=10,
| step=5.0, text="retry later"):
raise error | .TestFail("CPU quantity mismatch cmd after hotplug !")
error.context("rebooting the vm and check CPU quantity !",
logging.info)
session = vm.reboot()
if not utils_misc.check_if_vm_vcpu_match(total_cpus, vm):
raise error.TestFail("CPU quantity mismatch cmd after hotplug "
"and reboot !")
error.context("boot the vm, with '-smp X,maxcpus=Y' option,"
"thus allow hotplug vcpu", logging.info)
vm = env.get_vm(params["main_vm"])
vm.verify_alive()
timeout = int(params.get("login_timeout", 360))
session = vm.wait_for_login(timeout=timeout)
n_cpus_add = int(params.get("n_cpus_add", 1))
n_cpus_remove = int(params.get("n_cpus_remove", 1))
maxcpus = int(params.get("maxcpus", 240))
current_cpus = int(params.get("smp", 2))
onoff_iterations = int(params.get("onoff_iterations", 20))
hotplug_cmd = params.get("cpu_hotplug_cmd", "")
unplug_cmd = params.get("cpu_hotunplug_cmd", "")
vcpu_cores = int(params.get("vcpu_cores", 1))
vcpu_threads = int(params.get("vcpu_threads", 1))
cpu_model = params.get("cpu_model", "host")
unplug = params.get("unplug", "no")
total_cpus = current_cpus
if unplug == "yes":
n_cpus_add = n_cpus_remove
hotplug_cmd = hotplug_cmd.replace("CPU_MODEL", cpu_model)
if (n_cpus_add * vcpu_threads) + current_cpus > maxcpus:
logging.warn("CPU quantity more than maxcpus, set it to %s", maxcpus)
total_cpus = maxcpus
else:
total_cpus = current_cpus + (n_cpus_add * vcpu_threads)
logging.info("current_cpus=%s, total_cpus=%s", current_cpus, total_cpus)
error.context("check if CPUs in guest matches qemu cmd "
"before hot-plug", logging.info)
if not utils_misc.check_if_vm_vcpu_match(current_cpus, vm):
raise error.TestError("CPU quantity mismatch cmd before hotplug !")
hotplug(vm, current_cpus, total_cpus, vcpu_threads)
verify(vm, total_cpus)
if unplug == "yes":
hotunplug(vm, current_cpus, total_cpus, vcpu_threads)
total_cpus = total_cpus - (n_cpus_remove * vcpu_threads)
if total_cpus <= 0:
total_cpus = current_cpus
verify(vm, total_cpus)
|
Juanlu001/CBC.Solve | cbc/common/utils.py | Python | gpl-3.0 | 1,493 | 0.008707 | "This module provides a set of common utility functions."
__author__ = "Anders Logg"
__copyright__ = "Copyright (C) 2009 Simula Research Laboratory and %s" % __author__
__license__ = "GNU GPL Version 3 or any later version"
from math import ceil
from numpy import linspace
from dolfin import PeriodicBC, warning
def is_periodic(bcs):
| "Check if boundary conditions are periodic"
return all(isinstance(bc, PeriodicBC) for bc in bcs)
def missing_function(function):
"Write an informative error message when function has not been overloaded"
error("The function %s() has not been specified. Please provide a specification of this function.")
def timestep_range(T, dt):
"""Return a matching time step range for given end time and time |
step. Note that the time step may be adjusted so that it matches
the given end time."""
# Compute range
ds = dt
n = ceil(T / dt)
t_range = linspace(0, T, n + 1)[1:]
dt = t_range[0]
# Warn about changing time step
if ds != dt:
warning("Changing time step from %g to %g" % (ds, dt))
return dt, t_range
def timestep_range_cfl(problem, mesh):
"""Return a sensible default time step and time step range based
on an approximate CFL condition."""
# Get problem parameters
T = problem.end_time()
dt = problem.time_step()
# Set time step based on mesh if not specified
if dt is None:
dt = 0.25*mesh.hmin()
return timestep_range(T, dt)
|
ShaperTools/openhtf | openhtf/core/test_descriptor.py | Python | apache-2.0 | 18,718 | 0.006037 | # Copyright 2014 Google Inc. All Rights Reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests in OpenHTF.
Tests are main entry point for OpenHTF tests. In its simplest form a
test is a series of Phases that are executed by the OpenHTF framework.
"""
import argparse
import collections
import logging
import os
import sys
import textwrap
import threading
from types import LambdaType
import uuid
import weakref
import colorama
import mutablerecords
from openhtf import util
from openhtf.core import phase_descriptor
from openhtf.core import phase_executor
from openhtf.core import phase_group
from openhtf.core import test_executor
from openhtf.core import test_record
from openhtf.util import conf
from openhtf.util import console_output
from openhtf.util import logs
import six
_LOG = logging.getLogger(__name__)
conf.declare('capture_source', description=textwrap.dedent(
'''Whether to capture the source of phases and the test module. This
defaults to False since this potentially reads many files and makes large
string copies.
Set to 'true' if you want to capture your test's source.'''),
default_value=False)
# TODO(arsharma): Deprecate this configuration after removing the old teardown
# specification.
conf.declare('teardown_timeout_s', default_value=30, description=
'Default timeout (in seconds) for test teardown functions; '
'this option is deprecated and only applies to the deprecated '
'Test level teardown function.')
cla | ss UnrecognizedTestUidError(Exception):
"""Raised when information is requested about an unknown Test UID."""
class InvalidTestPhaseError(Exception):
"""Raised when an invalid method is decorated."""
class InvalidTestStateError(Exception):
"""Raised when an operation is attempted in an invalid state."""
def create_arg_parser(add_help=False):
"""Creates an argparse.ArgumentParser for parsing command line flags.
If you want to add arguments, create your own wit | h this as a parent:
>>> parser = argparse.ArgumentParser(
'My args title', parents=[openhtf.create_arg_parser()])
>>> parser.parse_args()
Args:
add_help: boolean option passed through to arg parser.
Returns:
an `argparse.ArgumentParser`
"""
parser = argparse.ArgumentParser(
'OpenHTF-based testing',
parents=[
conf.ARG_PARSER,
console_output.ARG_PARSER,
logs.ARG_PARSER,
phase_executor.ARG_PARSER,
],
add_help=add_help)
parser.add_argument(
'--config-help', action='store_true',
help='Instead of executing the test, simply print all available config '
'keys and their description strings.')
return parser
class Test(object):
"""An object that represents an OpenHTF test.
Example:
def PhaseOne(test):
# Integrate more widgets
def PhaseTwo(test):
# Analyze widget integration status
Test(PhaseOne, PhaseTwo).execute()
Note that Test() objects *must* be created in the main thread, but can be
.execute()'d in a separate thread.
"""
TEST_INSTANCES = weakref.WeakValueDictionary()
HANDLED_SIGINT_ONCE = False
def __init__(self, *phases, **metadata):
# Some sanity checks on special metadata keys we automatically fill in.
if 'config' in metadata:
raise KeyError(
'Invalid metadata key "config", it will be automatically populated.')
self.created_time_millis = util.time_millis()
self.last_run_time_millis = None
self._test_options = TestOptions()
self._lock = threading.Lock()
self._executor = None
self._test_desc = TestDescriptor(
phases, test_record.CodeInfo.uncaptured(), metadata)
if conf.capture_source:
# First, we copy the phases with the real CodeInfo for them.
group = self._test_desc.phase_group.load_code_info()
# Then we replace the TestDescriptor with one that stores the test
# module's CodeInfo as well as our newly copied phases.
code_info = test_record.CodeInfo.for_module_from_stack(levels_up=2)
self._test_desc = self._test_desc._replace(
code_info=code_info, phase_group=group)
# Make sure configure() gets called at least once before Execute(). The
# user might call configure() again to override options, but we don't want
# to force them to if they want to use defaults. For default values, see
# the class definition of TestOptions.
if 'test_name' in metadata:
# Allow legacy metadata key for specifying test name.
self.configure(name=metadata['test_name'])
else:
self.configure()
@classmethod
def from_uid(cls, test_uid):
"""Get Test by UID.
Args:
test_uid: uuid for desired test.
Returns:
Test object for given by UID.
Raises:
UnrecognizedTestUidError: If the test_uid is not recognized.
"""
test = cls.TEST_INSTANCES.get(test_uid)
if not test:
raise UnrecognizedTestUidError('Test UID %s not recognized' % test_uid)
return test
@property
def uid(self):
if self._executor is not None:
return self._executor.uid
def make_uid(self):
"""Returns the next test execution's UID.
This identifier must be unique but trackable across invocations of
execute(). Therefore, it's made of four parts separated by ':'
* Process-specific (decided on process start up)
* Test descriptor-specific (decided on descriptor creation)
* Execution-specific (decided on test start)
"""
return '%s:%s:%s:%s' % (os.getpid(), self.descriptor.uid,
uuid.uuid4().hex[:16], util.time_millis())
@property
def descriptor(self):
"""Static data about this test, does not change across Execute() calls."""
return self._test_desc
@property
def state(self):
"""Transient state info about the currently executing test, or None."""
with self._lock:
if self._executor:
return self._executor.test_state
def get_option(self, option):
return getattr(self._test_options, option)
def add_output_callbacks(self, *callbacks):
"""Add the given function as an output module to this test."""
self._test_options.output_callbacks.extend(callbacks)
def configure(self, **kwargs):
"""Update test-wide configuration options. See TestOptions for docs."""
# These internally ensure they are safe to call multiple times with no weird
# side effects.
known_args, _ = create_arg_parser(add_help=True).parse_known_args()
if known_args.config_help:
sys.stdout.write(conf.help_text)
sys.exit(0)
logs.configure_logging()
for key, value in six.iteritems(kwargs):
setattr(self._test_options, key, value)
@classmethod
def handle_sig_int(cls, *_):
if cls.TEST_INSTANCES:
_LOG.error('Received SIGINT, stopping all tests.')
for test in cls.TEST_INSTANCES.values():
test.abort_from_sig_int()
if not cls.HANDLED_SIGINT_ONCE:
cls.HANDLED_SIGINT_ONCE = True
# Emilio 2018-09-21: Raising this KeyboardInterrupt caused a traceback to be shown on-screen after posting the
# test to the database. There's no point.
# raise KeyboardInterrupt
# Otherwise, does not raise KeyboardInterrupt to ensure that the tests are
# cleaned up.
def abort_from_sig_int(self):
"""Abort test execution abruptly, only in response to SIGINT."""
with self._lock:
_LOG.error('Aborting %s due to SIGINT', self)
if self._executor:
# TestState str()'s nicely to a descriptive string, so let's log that
# just for good measure.
_LOG.error('Test state: %s', self._execut |
amolborcar/learnpythonthehardway | ex28.py | Python | mit | 231 | 0.004329 | '''
1. True y
2. False y
3. False y
4. True y
5 | . True y
6. False n
7. False y
8. True y
9. False y
10. | False y
11. True y
12. False y
13. True y
14. False y
15. False y
16. True n
17. True y
18. True y
19. False y
20. False y
'''
|
team23/django_backend | django_backend/templatetags/django_backend_query_tags.py | Python | bsd-3-clause | 4,480 | 0.001116 | from django import template
from django.utils.datastructures import MultiValueDict
from django.utils.html import conditional_escape as escape
from django.utils.encoding import smart_unicode, smart_str
from django.template.base import token_kwargs
import urllib
register = template.Library()
class QueryStringNode(template.Node):
def __init__(self, variables, extends, varname):
self.variables = variables
self.extends = extends
self.varname = varname
def render(self, context):
varname = self.varname
try:
extends = None
if self.extends:
extends = self.extends.resolve(context)
variables = MultiValueDict()
if self.variables:
for key in self.variables:
for value in self.variables.getlist(key):
resolved_value = value.resolve(context)
if isinstance(resolved_value, (list, tuple, set)):
variables.setlist(key, resolved_value)
else:
variables.setlist(key, [resolved_value])
except template.VariableDoesNotExist:
return ''
result = MultiValueDict()
if extends:
if isinstance(extends, MultiValueDict):
result = extends.copy()
else:
for key in extends:
result[key] = extends[key]
if variables:
for key in variables:
value = variables.getlist(key)
if value == [None]:
if key in result:
del result[key]
else:
result.setlist(key, variables.getlist(key))
def _result_to_tuples():
for key in result:
for value in result.getlist(key):
yield (key, value)
result = smart_unicode(
urllib.urlencode([(smart_str(k), smart_str(v)) for k, v in _result_to_tuples()]))
if varname:
context[varname] = result
return ''
return escape(result)
@register.tag
def query_string(parser, token):
'''
{% query_string foo='bar' %}
{% query_string foo='bar' extends request.GET %}
{% query_string foo='bar' extends request.GET as query %}
{% query_string foo='bar' extends request.GET as query dict %}
{% query_string extends request.GET %}
{% query_string foo='bar' as query dict %}
{% query_string foo='bar' as query extends request.GET %}
'''
tokens = token.split_contents()
tag_name = tokens[0]
values = tokens[1 | :]
variables = MultiValueDict()
extends = None
varname = None
variable | s_finished = False
try:
i = 0
num_values = len(values)
while i < num_values:
if values[i] == 'extends':
variables_finished = True
extends = parser.compile_filter(values[i + 1])
i += 2
continue
if values[i] == 'as':
variables_finished = True
varname = values[i + 1]
i += 2
continue
if variables_finished:
raise template.TemplateSyntaxError(u'%r\'s parameters seem to be messed up, you mixed extra variables into the remainder.' % tag_name)
parsed_variable = token_kwargs([values[i]], parser)
if not parsed_variable:
raise template.TemplateSyntaxError(u'%r\'s parameters seem to be messed up, some variable could not be parsed.' % tag_name)
for k, v in parsed_variable.iteritems():
variables.appendlist(k, v)
i += 1
except IndexError:
raise template.TemplateSyntaxError(u'%r\'s parameters seem to be messed up, really bad.' % tag_name)
return QueryStringNode(variables, extends, varname)
@register.filter
def append_to_params(params, value):
return [v for v in params] + [value]
@register.filter
def remove_from_params(params, value):
return [v for v in params if v != value]
@register.filter
def remove_param(params, key):
'''
{% query_string extends request.GET|remove_param:"page" %}
'''
# If it's an empty object (like None or ''), return it unaltered.
if not params:
return params
params = params.copy()
if key in params:
del params[key]
return params
|
CDSherrill/psi4 | tests/pytests/test_mp2.py | Python | lgpl-3.0 | 21,931 | 0.010214 | import pytest
from .utils import *
import psi4
pytestmark = [pytest.mark.quick, pytest.mark.mp2]
_ref_h2o_ccpvdz = {
'df': {
'HF TOTAL ENERGY': -76.0167614256151865,
'MP2 SAME-SPIN CORRELATION ENERGY': -0.0527406422061238,
'MP2 OPPOSITE-SPIN CORRELATION ENERGY': -0.1562926850310142,
'MP2 CORRELATION ENERGY': -0.2090333272371381,
'MP2 TOTAL ENERGY': -76.2257947528523232,
'SCS-MP2 CORRELATION ENERGY': -0.2051314361059251,
'SCS-MP2 TOTAL ENERGY': -76.2218928617211162,
},
'conv': {
'HF TOTAL ENERGY': -76.01678947133706,
'MP2 SAME-SPIN CORRELATION ENERGY': -0.05268120425816,
'MP2 OPPOSITE-SPIN CORRELATION ENERGY': -0.15637564436589,
'MP2 CORRELATION ENERGY': -0.20905684862405,
'MP2 TOTAL ENERGY': -76.22584631996111,
'SCS-MP2 CORRELATION ENERGY': -0.20521117465845,
'SCS-MP2 TOTAL ENERGY': -76.22200064599551,
},
} # yapf: disable
for mp2type in ['df', 'conv']:
_ref_h2o_ccpvdz[mp2type]['SCF TOTAL ENERGY'] = _ref_h2o_ccpvdz[mp2type]['HF TOTAL ENERGY']
_ref_h2o_ccpvdz[mp2type]['CURRENT REFERENCE ENERGY'] = _ref_h2o_ccpvdz[mp2type]['HF TOTAL ENERGY']
_ref_h2o_ccpvdz[mp2type]['5050SCS-MP2 CORRELATION ENERGY'] = (
0.5 * (_ref_h2o_ccpvdz[mp2type]['MP2 SAME-SPIN CORRELATION ENERGY'] +
_ref_h2o_ccpvdz[mp2type]['MP2 OPPOSITE-SPIN CORRELATION ENERGY']))
_ref_h2o_ccpvdz[mp2type]['5050SCS-MP2 TOTAL ENERGY'] = _ref_h2o_ccpvdz[mp2type][
'5050SCS-MP2 CORRELATION ENERGY'] + _ref_h2o_ccpvdz[mp2type]['HF TOTAL ENERGY']
@pytest.mark.parametrize("inp", [
pytest.param({'name': 'Mp2', 'custom': 'SCS-MP2', 'options': {'mp2_type': 'df'}}, id='mp2 (df)'),
pytest.param({'name': 'Mp2', 'custom': 'MP2', 'options': {'mp2_type': 'conv'}}, id='mp2 (conv)'),
pytest.param({'name': 'Mp2', 'custom': 'SCS-MP2', 'options': {'mp2_type': 'df', 'mp2_os_scale': 1.2, 'mp2_ss_scale': 0.33333333}}, id='explicit scs mp2 (df)'),
pytest.param({'name': 'Mp2', 'custom': 'SCS-MP2', 'options': {'mp2_type': 'conv', 'os_scale': 1.2, 'ss_scale': 0.33333333}}, id='explicit scs mp2 (conv)'),
pytest.param({'name': 'Mp2', 'custom': '5050SCS-MP2', 'options': {'mp2_type': 'df', 'mp2_os_scale': 0.5, 'mp2_ss_scale': 0.5}}, id='user-def scs mp2 (df)'),
pytest.param({'name': 'Mp2', 'custom': '5050SCS-MP2', 'options': {'mp2_type': 'conv', 'os_scale': 0.5, 'ss_scale': 0.5}}, id='user-def scs mp2 (conv)'),
]) # yapf: disable
def test_scsmp2(inp):
"""Formerly known as dfmp2-4"""
h2o = psi4.geometry("""
O
H 1 1.0
H 1 1.0 2 90.0
""")
psi4.set_options({'basis': 'cc-pvdz'})
psi4.set_options(inp['options'])
ene, wfn = psi4.energy(inp['name'], return_wfn=True)
ref_block = _ref_h2o_ccpvdz[inp['options']['mp2_type']]
#ref_corl = ref_block[inp['pv'] + ' CORRELATION ENERGY']
#ref_tot = ref_block[inp['pv'] + ' TOTAL ENERGY']
ref_corl = ref_block['MP2 CORRELATION ENERGY']
ref_tot = ref_block['MP2 TOTAL ENERGY']
ref_custom_corl = ref_block[inp['custom'] + ' CORRELATION ENERGY']
ref_custom_tot = ref_block[inp['custom'] + ' TOTAL ENERGY']
for obj in [psi4.core, wfn]:
for pv in [
'HF TOTAL ENERGY', 'SCF TOTAL ENERGY', 'MP2 SAME-SPIN CORRELATION ENERGY',
'MP2 OPPOSITE-SPIN CORRELATION ENERGY', 'MP2 CORRELATION ENERGY', 'MP2 TOTAL ENERGY',
'SCS-MP2 CORRELATION ENERGY', 'SCS-MP2 TOTAL ENERGY', 'CURRENT REFERENCE ENERGY'
]:
| assert compare_values(ref_bl | ock[pv], obj.variable(pv), 5, pv)
if any((x in inp['options'] for x in ['os_scale', 'ss_scale', 'mp2_os_scale', 'mp2_ss_scale'])):
assert compare_values(ref_custom_corl, obj.variable('CUSTOM SCS-MP2 CORRELATION ENERGY'), 5,
'custom scsmp2 corl')
assert compare_values(ref_custom_tot, obj.variable('CUSTOM SCS-MP2 TOTAL ENERGY'), 5, 'custom scsmp2 ')
assert compare_values(ref_corl, obj.variable('CURRENT CORRELATION ENERGY'), 5, 'current corl')
assert compare_values(ref_tot, obj.variable('CURRENT ENERGY'), 5, 'current')
assert compare_values(ref_tot, ene, 5, 'return')
assert compare_values(ref_tot, wfn.energy(), 5, 'wfn')
@pytest.fixture
def clsd_open_pmols():
mols = {
'hf':
psi4.core.Molecule.from_string("""
H
F 1 0.917
"""),
'bh_h2p':
psi4.core.Molecule.from_string("""
1 2
B 0.10369114 0.00000000 0.00000000
H -1.13269886 0.00000000 0.00000000
H 3.00000000 0.37149000 0.00000000
H 3.00000000 -0.37149000 0.00000000
"""),
}
return mols
# yapf: disable
_ref_module = {scftype: {ref: {frz: {mp2type: {} for mp2type in ['conv', 'df', 'cd']} for frz in ['true', 'false']} for ref in ['rhf', 'uhf', 'rohf']} for scftype in ['pk', 'df']}
_ref_module['df']['rhf']['HF TOTAL ENERGY'] = -100.019400605629
_ref_module['df']['uhf']['HF TOTAL ENERGY'] = -25.945130559147
_ref_module['df']['rohf']['HF TOTAL ENERGY'] = -25.943606522029
_ref_module['pk']['rhf']['HF TOTAL ENERGY'] = -100.01941126902270
_ref_module['pk']['uhf']['HF TOTAL ENERGY'] = -25.94513842869638
#_ref_module['pk']['rohf']['HF TOTAL ENERGY'] =
# <<< scf DF, fc >>>
_ref_module['df']['rhf']['true']['conv']['MP2 CORRELATION ENERGY'] = -0.201612517228
_ref_module['df']['rhf']['true']['conv']['MP2 TOTAL ENERGY'] = -100.221013122857
_ref_module['df']['rhf']['true']['df']['MP2 CORRELATION ENERGY'] = -0.201610660387
_ref_module['df']['rhf']['true']['df']['MP2 TOTAL ENERGY'] = -100.221011266016
_ref_module['df']['rhf']['true']['df']['MP2 TOTAL GRADIENT'] = psi4.core.Matrix.from_list( # dfmp2 findif-5 fc df+df
[[ 0.00000000000000, 0.00000000000000, 0.00314716362539],
[ 0.00000000000000, 0.00000000000000, -0.00314716362539]])
_ref_module['df']['rhf']['true']['cd']['MP2 CORRELATION ENERGY'] = -0.201609396752
_ref_module['df']['rhf']['true']['cd']['MP2 TOTAL ENERGY'] = -100.221010002381
_ref_module['df']['uhf']['true']['conv']['MP2 CORRELATION ENERGY'] = -0.058421122206
_ref_module['df']['uhf']['true']['conv']['MP2 TOTAL ENERGY'] = -26.003551681354
_ref_module['df']['uhf']['true']['df']['MP2 CORRELATION ENERGY'] = -0.058390006825
_ref_module['df']['uhf']['true']['df']['MP2 TOTAL ENERGY'] = -26.003520565972
_ref_module['df']['uhf']['true']['df']['MP2 TOTAL GRADIENT'] = psi4.core.Matrix.from_list( # dfmp2 findif-5 fc df+df
[[ 0.00000000000000, 0.00000000000000, 0.01231996225662],
[ 0.00000000000000, 0.00000000000000, -0.01186374280678],
[ 0.00000000000000, 0.01031743020277, -0.00022810972492],
[ 0.00000000000000, -0.01031743020277, -0.00022810972492]])
_ref_module['df']['uhf']['true']['cd']['MP2 CORRELATION ENERGY'] = -0.058409837177
_ref_module['df']['uhf']['true']['cd']['MP2 TOTAL ENERGY'] = -26.003540396324
_ref_module['df']['rohf']['true']['conv']['MP2 CORRELATION ENERGY'] = -0.060939211739
_ref_module['df']['rohf']['true']['conv']['MP2 TOTAL ENERGY'] = -26.004545733768
_ref_module['df']['rohf']['true']['df']['MP2 CORRELATION ENERGY'] = -0.059372748391
_ref_module['df']['rohf']['true']['df']['MP2 TOTAL ENERGY'] = -26.002979270420
#_ref_module['df']['rohf']['true']['df']['MP2 TOTAL GRADIENT'] = psi4.core.Matrix.from_list(
_ref_module['df']['rohf']['true']['cd']['MP2 CORRELATION ENERGY'] = -0.059393510962
_ref_module['df']['rohf']['true']['cd']['MP2 TOTAL ENERGY'] = -26.003000032991
# <<< scf DF, nfc >>>
#_ref_module['df']['rhf']['false']['conv']['MP2 CORRELATION ENERGY'] =
#_ref_m |
faylau/oVirt3.3WebAPITest | src/TestData/DataCenter/ITC01010401_UpdateUninitializedDC.py | Python | apache-2.0 | 1,565 | 0.004496 | #encoding:utf-8
__authors__ = ['"Liu Fei" <fei.liu@cs2c.com.cn>']
__version__ = "V0.1"
'''
# ChangeLog:
#--------------------------------------------------------------------- | ------------
# Version Date Desc | Author
#---------------------------------------------------------------------------------
# V0.1 2014/10/24 初始版本 Liu Fei
#---------------------------------------------------------------------------------
'''
'''---------------------------------------------------------------------------------
@PreData
---------------------------------------------------------------------------------'''
pre_dc_name = 'DC-ITC01010401-1'
pre_dc_info = '''
<data_center>
<name>%s</name>
<local>true</local>
<version minor="1" major="3"/>
</data_center>
''' % pre_dc_name
'''---------------------------------------------------------------------------------
@note: TestData
---------------------------------------------------------------------------------'''
test_dc_name = 'DC-ITC01010401-2'
test_dc_info = '''
<data_center>
<name>%s</name>
<local>false</local>
<version minor="3" major="3"/>
</data_center>
''' % test_dc_name
'''---------------------------------------------------------------------------------
@note: ExpectedResult
---------------------------------------------------------------------------------'''
expected_status_code = 200
expected_info = '''
''' |
OpenGenus/cosmos | scripts/global-metadata.py | Python | gpl-3.0 | 1,198 | 0 | import json
import pathlib
import collections
avoid_extensions = [
"",
# ".md",
# ".png",
# ".csv",
# ".class",
# ".data",
# ".in",
# ".jpeg",
# ".jpg",
# ".out",
# ".textclipping",
# ".properties",
# ".txt",
# ".sbt",
]
avoid_dirs = ["project", "test", "img", | "image", "images"]
global_metadata = collections.defaultdict(dict)
original_paths = collections.defaultdict(str)
for path in pathlib.Path(__file__).parents[1].glob(
"scripts/metadata/code/**/**/*"):
if (path.suffix
and not any(elem in list(path.parts) | for elem in avoid_dirs)
and path.suffix.lower() not in avoid_extensions):
original_paths[path.parts[-2]] = "/".join(path.parts[:-2])
for algo in original_paths:
filename = pathlib.Path("{}/{}/data.json".format(original_paths[algo],
algo))
with open(filename) as fh:
existing_data = json.load(fh)
global_metadata[original_paths[algo].split('/')[-2]][algo] = existing_data
filename = pathlib.Path("scripts/global_metadata.json")
json_dump = json.dumps(global_metadata, indent=2)
filename.write_text(json_dump)
|
x007007007/pyScreenBrightness | src/pyScreenBrightness/base.py | Python | mit | 1,127 | 0.002662 | # -*- coding:utf-8 -*-
import abc
import platform
from UserList import UserList
class Monitor(object):
@abc.abstractmethod
def current(self):
pass
@abc.abstractmethod
def percent(self, range):
pass
@abc.abstractmethod
def reset(self):
pass
@abc.abstractmethod
def max(self):
pass
@abc.abstractmethod
def m | in(self):
pass
class Monitors(UserList):
@abc.abstractmethod
def percent(self, range):
pass
@abc.abstractmethod
def reset(self):
pass
@abc.abstractmethod
def max(self):
pass
@abc.abstractmethod
def min(self):
pass
def get_monitors():
if platform.system() == "Windows":
from .driver_win_wmi import WinWMIMonitors
return WinWMIMoni | tors()
elif platform.system() == "Darwin":
from .driver_mac import MacMonitors
return MacMonitors()
elif platform.system() == "Linux":
from .driver_linux import LinuxMonitors
return LinuxMonitors()
else:
raise OSError()
|
roadhead/satchmo | satchmo/payment/modules/protx/views.py | Python | bsd-3-clause | 3,443 | 0.009875 | """Protx checkout custom views"""
from django.utils.translation import ugettext as _
from satchmo.configuration | import config_get_group
from satchmo.payment.views import payship, confirm
import logging
log = logging.getLogger('protx. | views')
def pay_ship_info(request):
return payship.credit_pay_ship_info(request, config_get_group('PAYMENT_PROTX'), template="checkout/protx/pay_ship.html")
def confirm_info(request, template='checkout/protx/confirm.html', extra_context={}):
payment_module = config_get_group('PAYMENT_PROTX')
controller = confirm.ConfirmController(request, payment_module)
controller.templates['CONFIRM'] = template
controller.extra_context = extra_context
controller.onForm = secure3d_form_handler
controller.confirm()
return controller.response
def confirm_secure3d(request, secure3d_template='checkout/secure3d_form.html',
confirm_template='checkout/confirm.html', extra_context={}):
"""Handles confirming an order and processing the charges when secured by secure3d.
"""
payment_module = config_get_group('PAYMENT_PROTX')
controller = confirm.ConfirmController(request, payment_module, extra_context=extra_context)
controller.template['CONFIRM'] = confirm_template
if not controller.sanity_check():
return controller.response
auth3d = request.session.get('3D', None)
if not auth3d:
controller.processorMessage = _('3D Secure transaction expired. Please try again.')
else:
if request.method == "POST":
returnMD = request.POST.get('MD', None)
if not returnMD:
template = payment_module.lookup_template(secure3d_template)
ctx ={'order': controller.order, 'auth': auth3d }
return render_to_response(template, ctx, RequestContext(request))
elif returnMD == auth3d['MD']:
pares = request.POST.get('PaRes', None)
controller.processor.prepareData(controller.order)
controller.processor.prepareData3d(returnMD, pares)
if controller.process():
return controller.onSuccess(controller)
else:
controller.processorMessage = _('3D Secure transaction was not approved by payment gateway. Please contact us.')
else:
template = lookup_template(payment_module, secure3d_template)
ctx =RequestContext(request, {
'order': controller.order, 'auth': auth3d
})
return render_to_response(template, ctx)
return secure3d_form_handler(controller)
def secure3d_form_handler(controller):
"""At the confirmation step, protx may ask for a secure3d authentication. This method
catches that, and if so, sends to that step, otherwise the form as normal"""
if controller.processorReasonCode == '3DAUTH':
log.debug('caught secure 3D request for order #%i, putting 3D into session as %s',
controller.order.id, controller.processorReasonCode)
redirectUrl = controller.lookup_url('satchmo_checkout-secure3d')
controller.processor.response['TermUrl'] = redirectUrl
request.session['3D'] = controller.processorReasonCode
return http.HttpResponseRedirect(redirectUrl)
return controller._onForm(controller)
|
our-city-app/oca-backend | src/rogerthat/bizz/jobs/__init__.py | Python | apache-2.0 | 17,649 | 0.00289 | # -*- coding: utf-8 -*-
# Copyright 2020 Green Valley Belgium NV
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @@license_version:1.7@@
from __future__ import unicode_literals
from datetime import datetime
import logging
from types import NoneType
from google.appengine.ext import ndb, deferred, db
from google.appengine.ext.ndb.query import Cursor
from typing import Optional, List, Union, Tuple
from mcfw.rpc import returns, arguments
from rogerthat.bizz.communities.communities import get_community
from rogerthat.bizz.jobs.matching import rebuild_matches_check_current
from rogerthat.bizz.jobs.notifications import calculate_next_reminder
from rogerthat.bizz.jobs.translations import localize as localize_jobs
from rogerthat.capi.jobs import newJobs
from rogerthat.consts import JOBS_WORKER_QUEUE
from rogerthat.dal.mobile import get_mobile_key_by_account
from rogerthat.dal.profile import get_user_profile
from rogerthat.models import Nd | bUserProfile
from rogerthat.models.jobs import JobOffer, JobMatchingCriteria, JobMatchingCriteriaNotifications, JobMatch, \
JobMatchStatus, JobNotificationSchedule, JobOfferSourceType
from rogerthat.rpc import users
from rogerthat.rpc.models import RpcCAPI | Call, RpcException
from rogerthat.rpc.rpc import mapping, logError, CAPI_KEYWORD_ARG_PRIORITY, \
PRIORITY_HIGH
from rogerthat.service.api.messaging import add_chat_members
from rogerthat.to.jobs import GetJobsResponseTO, JobOfferTO, NewJobsResponseTO, \
NewJobsRequestTO, SaveJobsCriteriaResponseTO, GetJobsCriteriaResponseTO, \
JobKeyLabelTO, JobCriteriaLocationTO, JobCriteriaNotificationsTO, JobCriteriaGeoLocationTO, \
SaveJobsCriteriaRequestTO, JobOfferChatActionTO, JobOfferOpenActionTO, GetJobChatInfoResponseTO, JobChatAnonymousTO, \
CreateJobChatResponseTO, CreateJobChatRequestTO, JobsInfoTO, JobOfferProviderTO
from rogerthat.translations import localize
from rogerthat.utils import now, get_epoch_from_datetime
from rogerthat.utils.location import coordinates_to_city
from solutions.common.jobs.models import JobSolicitation
TAG_JOB_CHAT = '__rt__.jobs_chat'
CONTRACT_TYPES = [
'contract_type_001',
'contract_type_002',
'contract_type_003',
'contract_type_004',
'contract_type_005',
'contract_type_006',
'contract_type_007',
]
JOB_DOMAINS = [
'job_domain_001',
'job_domain_002',
'job_domain_003',
'job_domain_004',
'job_domain_005',
'job_domain_006',
'job_domain_007',
'job_domain_008',
'job_domain_009',
'job_domain_010',
'job_domain_011',
'job_domain_012',
'job_domain_013',
'job_domain_014',
'job_domain_015',
'job_domain_016',
'job_domain_017',
'job_domain_018',
'job_domain_019',
'job_domain_020',
'job_domain_021',
'job_domain_022',
'job_domain_023',
'job_domain_024',
]
def get_job_criteria(app_user):
# type: (users.User) -> GetJobsCriteriaResponseTO
user_profile = get_user_profile(app_user)
response = GetJobsCriteriaResponseTO()
response.location = JobCriteriaLocationTO()
response.location.address = None
response.location.geo = None
response.location.distance = 20000 # 20 Km
response.contract_types = []
response.job_domains = []
response.keywords = []
response.notifications = JobCriteriaNotificationsTO()
response.notifications.timezone = None
response.notifications.how_often = JobNotificationSchedule.NEVER
response.notifications.delivery_day = 'monday'
response.notifications.delivery_time = 64800 # 18:00
job_criteria = JobMatchingCriteria.create_key(app_user).get() # type: JobMatchingCriteria
for contract_type in CONTRACT_TYPES:
to = JobKeyLabelTO()
to.key = contract_type
to.label = localize_jobs(user_profile.language, contract_type)
to.enabled = contract_type in job_criteria.contract_types if job_criteria else False
response.contract_types.append(to)
response.contract_types.sort(key=lambda item: item.label)
for domain in JOB_DOMAINS:
to = JobKeyLabelTO()
to.key = domain
to.label = localize_jobs(user_profile.language, domain)
to.enabled = domain in job_criteria.job_domains if job_criteria else False
response.job_domains.append(to)
response.job_domains.sort(key=lambda item: item.label)
if job_criteria:
response.active = job_criteria.active
response.location = JobCriteriaLocationTO()
response.location.address = job_criteria.address
response.location.geo = JobCriteriaGeoLocationTO()
response.location.geo.latitude = job_criteria.geo_location.lat
response.location.geo.longitude = job_criteria.geo_location.lon
response.location.distance = job_criteria.distance
response.keywords = job_criteria.keywords
if job_criteria.notifications:
response.notifications.how_often = job_criteria.notifications.how_often
if job_criteria.notifications.delivery_day:
response.notifications.delivery_day = job_criteria.notifications.delivery_day
if job_criteria.notifications.delivery_time:
response.notifications.delivery_time = job_criteria.notifications.delivery_time
else:
response.active = True # user first usage
return response
@returns(SaveJobsCriteriaResponseTO)
@arguments(app_user=users.User, request=SaveJobsCriteriaRequestTO)
def save_job_criteria(app_user, request):
# type: (users.User, SaveJobsCriteriaRequestTO) -> SaveJobsCriteriaResponseTO
job_criteria_key = JobMatchingCriteria.create_key(app_user)
job_criteria = job_criteria_key.get() # type: JobMatchingCriteria
new_job_profile = not job_criteria
if new_job_profile:
if not request.criteria:
return SaveJobsCriteriaResponseTO(active=False, new_profile=new_job_profile)
job_criteria = JobMatchingCriteria(key=job_criteria_key)
job_criteria.last_load_request = datetime.utcnow()
job_criteria.demo = get_community(get_user_profile(app_user).community_id).demo
original_job_criteria = None
else:
original_job_criteria = job_criteria.to_dict(exclude=['notifications', 'active'])
notifications = None
job_criteria.active = request.active
if request.criteria:
location = request.criteria.location
notifications = request.criteria.notifications
if location.geo:
job_criteria.geo_location = ndb.GeoPt(location.geo.latitude, location.geo.longitude)
if location.address:
job_criteria.address = location.address
else:
job_criteria.address = coordinates_to_city(job_criteria.geo_location.lat,
job_criteria.geo_location.lon)
job_criteria.distance = location.distance
job_criteria.contract_types = sorted(request.criteria.contract_types)
job_criteria.job_domains = sorted(request.criteria.job_domains)
job_criteria.keywords = sorted(request.criteria.keywords)
if not job_criteria.job_domains:
raise RpcException('at_least_one_job_domain_required', app_user)
if not job_criteria.contract_types:
raise RpcException('at_least_one_contract_type_required', app_user)
updated_criteria = job_criteria.to_dict(exclude=['notifications', 'active'])
should_build_matches = original_job_criteria != updated_criteria
should_calculate_reminder = should_build_matches
should_clear_notifications = should_build_matches
og_notifications = job_criteria.notifications and job_c |
anupkdas-nus/global_synapses | pyNN-dispackgaes/nest/standardmodels/synapses.py | Python | gpl-3.0 | 7,616 | 0.003283 | """
Synapse Dynamics classes for nest
:copyright: Copyright 2006-2016 by the PyNN team, see AUTHORS.
:license: CeCILL, see LICENSE for details.
"""
import nest
from pyNN.standardmodels import synapses, build_translations
from pyNN.nest.synapses import NESTSynapseMixin
import logging
from ..conversion import make_sli_compatible
logger = logging.getLogger("PyNN")
class StaticSynapse(synapses.StaticSynapse, NESTSynapseMixin):
translations = build_translations(
('weight', 'weight', 1000.0),
('delay', 'delay')
)
nest_name = 'static_synapse'
class STDPMechanism(synapses.STDPMechanism, NESTSynapseMixin):
"""Specification of STDP models."""
base_translations = build_translations(
('weight', 'weight', 1000.0), # nA->pA, uS->nS
('delay', 'delay'),
('dendritic_delay_fraction', 'dendritic_delay_fraction')
) # will be extended by translations from timing_dependence, etc.
def __init__(self, timing_dependence=None, weight_dependence=None,
voltage_dependence=None, dendritic_delay_fraction=1.0,
weight=0.0, delay=None):
if dendritic_delay_fraction != 1:
raise ValueError("NEST does not currently support axonal delays: "
"for the purpose of STDP calculations all delays "
"are assu | med to be dendr | itic.")
# could perhaps support axonal delays using parrot neurons?
super(STDPMechanism, self).__init__(timing_dependence, weight_dependence,
voltage_dependence, dendritic_delay_fraction,
weight, delay)
def _get_nest_synapse_model(self):
base_model = self.possible_models
if isinstance(base_model, set):
logger.warning("Several STDP models are available for these connections:")
logger.warning(", ".join(model for model in base_model))
base_model = list(base_model)[0]
logger.warning("By default, %s is used" % base_model)
available_models = nest.Models(mtype='synapses')
if base_model not in available_models:
raise ValueError("Synapse dynamics model '%s' not a valid NEST synapse model. "
"Possible models in your NEST build are: %s" % (base_model, available_models))
# Defaults must be simple floats, so we use the NEST defaults
# for any inhomogeneous parameters, and set the inhomogeneous values
# later
synapse_defaults = {}
for name, value in self.native_parameters.items():
if value.is_homogeneous:
value.shape = (1,)
synapse_defaults[name] = value.evaluate(simplify=True)
synapse_defaults.pop("dendritic_delay_fraction")
synapse_defaults.pop("w_min_always_zero_in_NEST")
# Tau_minus is a parameter of the post-synaptic cell, not of the connection
synapse_defaults.pop("tau_minus")
synapse_defaults = make_sli_compatible(synapse_defaults)
nest.SetDefaults(base_model + '_lbl', synapse_defaults)
return base_model + '_lbl'
class TsodyksMarkramSynapse(synapses.TsodyksMarkramSynapse, NESTSynapseMixin):
__doc__ = synapses.TsodyksMarkramSynapse.__doc__
translations = build_translations(
('weight', 'weight', 1000.0),
('delay', 'delay'),
('U', 'U'),
('tau_rec', 'tau_rec'),
('tau_facil', 'tau_fac')
)
nest_name = 'tsodyks_synapse'
class SimpleStochasticSynapse(synapses.SimpleStochasticSynapse, NESTSynapseMixin):
translations = build_translations(
('weight', 'weight', 1000.0),
('delay', 'delay'),
('p', 'p'),
)
nest_name = 'simple_stochastic_synapse'
class StochasticTsodyksMarkramSynapse(synapses.StochasticTsodyksMarkramSynapse, NESTSynapseMixin):
translations = build_translations(
('weight', 'weight', 1000.0),
('delay', 'delay'),
('U', 'U'),
('tau_rec', 'tau_rec'),
('tau_facil', 'tau_fac')
)
nest_name = 'stochastic_stp_synapse'
class MultiQuantalSynapse(synapses.MultiQuantalSynapse, NESTSynapseMixin):
translations = build_translations(
('weight', 'weight', 1000.0),
('delay', 'delay'),
('U', 'U'),
('n', 'n'),
('tau_rec', 'tau_rec'),
('tau_facil', 'tau_fac')
)
nest_name = 'quantal_stp_synapse'
class AdditiveWeightDependence(synapses.AdditiveWeightDependence):
__doc__ = synapses.AdditiveWeightDependence.__doc__
translations = build_translations(
('w_max', 'Wmax', 1000.0), # unit conversion
('w_min', 'w_min_always_zero_in_NEST'),
)
possible_models = set(['stdp_synapse']) #,'stdp_synapse_hom'])
extra_parameters = {
'mu_plus': 0.0,
'mu_minus': 0.0
}
def __init__(self, w_min=0.0, w_max=1.0):
if w_min != 0:
raise Exception("Non-zero minimum weight is not supported by NEST.")
synapses.AdditiveWeightDependence.__init__(self, w_min, w_max)
class MultiplicativeWeightDependence(synapses.MultiplicativeWeightDependence):
__doc__ = synapses.MultiplicativeWeightDependence.__doc__
translations = build_translations(
('w_max', 'Wmax', 1000.0), # unit conversion
('w_min', 'w_min_always_zero_in_NEST'),
)
possible_models = set(['stdp_synapse']) #,'stdp_synapse_hom'])
extra_parameters = {
'mu_plus': 1.0,
'mu_minus': 1.0
}
def __init__(self, w_min=0.0, w_max=1.0):
if w_min != 0:
raise Exception("Non-zero minimum weight is not supported by NEST.")
synapses.MultiplicativeWeightDependence.__init__(self, w_min, w_max)
class AdditivePotentiationMultiplicativeDepression(synapses.AdditivePotentiationMultiplicativeDepression):
__doc__ = synapses.AdditivePotentiationMultiplicativeDepression.__doc__
translations = build_translations(
('w_max', 'Wmax', 1000.0), # unit conversion
('w_min', 'w_min_always_zero_in_NEST'),
)
possible_models = set(['stdp_synapse']) #,'stdp_synapse_hom'])
extra_parameters = {
'mu_plus': 0.0,
'mu_minus': 1.0
}
def __init__(self, w_min=0.0, w_max=1.0):
if w_min != 0:
raise Exception("Non-zero minimum weight is not supported by NEST.")
synapses.AdditivePotentiationMultiplicativeDepression.__init__(self, w_min, w_max)
class GutigWeightDependence(synapses.GutigWeightDependence):
__doc__ = synapses.GutigWeightDependence.__doc__
translations = build_translations(
('w_max', 'Wmax', 1000.0), # unit conversion
('w_min', 'w_min_always_zero_in_NEST'),
('mu_plus', 'mu_plus'),
('mu_minus', 'mu_minus'),
)
possible_models = set(['stdp_synapse']) #,'stdp_synapse_hom'])
def __init__(self, w_min=0.0, w_max=1.0, mu_plus=0.5, mu_minus=0.5):
if w_min != 0:
raise Exception("Non-zero minimum weight is not supported by NEST.")
synapses.GutigWeightDependence.__init__(self, w_min, w_max)
class SpikePairRule(synapses.SpikePairRule):
__doc__ = synapses.SpikePairRule.__doc__
translations = build_translations(
('tau_plus', 'tau_plus'),
('tau_minus', 'tau_minus'), # defined in post-synaptic neuron
('A_plus', 'lambda'),
('A_minus', 'alpha', 'A_minus/A_plus', 'alpha*lambda'),
)
possible_models = set(['stdp_synapse']) #,'stdp_synapse_hom'])
|
pinballwizard/phone | manage.py | Python | lgpl-3.0 | 248 | 0 | #!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "phone.settings")
| from django.core.management import execute_from_command_l | ine
execute_from_command_line(sys.argv)
|
missionpinball/mpf_mc | mpfmc/tests/test_Assets.py | Python | mit | 13,153 | 0.000228 | import time
from mpfmc.tests.MpfMcTestCase import MpfMcTestCase
class TestAssets(MpfMcTestCase):
def get_machine_path(self):
return 'tests/machine_files/assets_and_image'
def get_config_file(self):
return 'test_asset_loading.yaml'
def test_machine_wide_asset_loading(self):
# test that the images asset class gets built correctly
self.assertTrue(hasattr(self.mc, 'images'))
self.assertTrue(self.mc.asset_manager._asset_classes)
self.assertEqual(self.mc.asset_manager._asset_classes[0].path_string, 'images')
# tests that assets are registered as expected with various conditions
# /images folder
self.assertIn('image1', self.mc.images) # .gif
self.assertIn('image2', self.mc.images) # .jpg
self.assertIn('image3', self.mc.images) # .png
# test subfolders listed in assets:images machine-wide config folders
self.assertIn('image4', self.mc.images) # /images/preload
self.assertIn('image5', self.mc.images) # /images/on_demand
# test images from subfolder not listed in assets:images
self.assertIn('image11', self.mc.images) # /images/custom1
# test subfolder under another subfolder listed in assets:images
self.assertIn('image14', self.mc.images) # /images/preload/subfolder
# test images from the images: section that have names configured to be
# different from their file names
self.assertIn('image_12_new_name', self.mc.images) # image12.png
# custom1/image13.png
self.assertIn('image_13_new_name', self.mc.images)
# test that the images that were renamed were not also loaded based on
# their original names
self.assertNotIn('image12', self.mc.images)
self.assertNotIn('image13', self.mc.images)
# test that config dicts are merged and/or overwritten properly
# test custom k/v pair from default config based on the folder the
# asset was in
self.assertEqual(self.mc.images['image4'].config['test_key'],
'test_value')
self.assertEqual(self.mc.images['image14'].config['test_key'],
'test_value')
# test custom k/v pair from asset entry in the images: section
self.assertEqual(self.mc.images['image3'].config['test_key'],
'test_value_override3')
# same as above, but test that it also works when the asset name is
# different from the file name
self.assertEqual(
self.mc.images['image_12_new_name'].config['test_key'],
'test_value_override12')
# Test that mode assets were loaded properly
self.assertIn('image6', self.mc.images)
self.assertIn('image7', self.mc.images)
self.assertIn('image8', self.mc.images)
self.assertIn('image9', self.mc.images)
self.assertIn('image10', self.mc.images)
# Make sure all the assets are loaded. Wait if not
while (self.mc.asset_manager.num_assets_to_load <
self.mc.asset_manager.num_assets_loaded):
time.sleep(.1)
self.advance_time()
# Need to wait a bit since the loading was a separate thread
self.advance_time(.1)
# Make sure the ones that should have loaded on startup actually loaded
self.assertTrue(self.mc.images['image1'].loaded)
self.assertFalse(self.mc.images['image1'].loading)
self.assertFalse(self.mc.images['image1'].unloading)
self.assertTrue(self.mc.images['image2'].loaded)
self.assertFalse(self.mc.images['image2'].loading)
self.assertFalse(self.mc.images['image2'].unloading)
self.assertTrue(self.mc.images['image3'].loaded)
self.assertFalse(self.mc.images['image3'].loading)
self.assertFalse(self.mc.images['image3'].unloading)
self.assertTrue(self.mc.images['image8'].loaded)
self.assertFalse(self.mc.images['image8'].loading)
self.assertFalse(self.mc.images['image8'].unloading)
self.assertTrue(self.mc.images['image2'].loaded)
self.assertFalse(self.mc.images['image2'].loading)
self.assertFalse(self.mc.images['image2'].unloading)
self.assertTrue(self.mc.images['image4'].loaded)
self.assertFalse(self.mc.images['image4'].loading)
self.assertFalse(self.mc.images['image4'].unloading)
self.assertTrue(self.mc.images['image7'].loaded)
self.assertFalse(self.mc.images['image7'].loading)
self.assertFalse(self.mc.images['image7'].unloading)
self.assertTrue(self.mc.images['image11'].loaded)
self.assertFalse(self.mc.images['image11'].loading)
self.assertFalse(self.mc.images['image11'].unloading)
self.assertTrue(self.mc.images['image_12_new_name'].loaded)
self.assertFalse(self.mc.images['image_12_new_name'].loading)
self.assertFalse(self.mc.images['image_12_new | _name'].unloading | )
self.assertTrue(self.mc.images['image_13_new_name'].loaded)
self.assertFalse(self.mc.images['image_13_new_name'].loading)
self.assertFalse(self.mc.images['image_13_new_name'].unloading)
# Make sure the ones that should not have loaded on startup didn't load
self.assertFalse(self.mc.images['image5'].loaded)
self.assertFalse(self.mc.images['image5'].loading)
self.assertFalse(self.mc.images['image5'].unloading)
self.assertFalse(self.mc.images['image9'].loaded)
self.assertFalse(self.mc.images['image9'].loading)
self.assertFalse(self.mc.images['image9'].unloading)
self.assertFalse(self.mc.images['image10'].loaded)
self.assertFalse(self.mc.images['image10'].loading)
self.assertFalse(self.mc.images['image10'].unloading)
self.assertFalse(self.mc.images['image6'].loaded)
self.assertFalse(self.mc.images['image6'].loading)
self.assertFalse(self.mc.images['image6'].unloading)
# Start the mode and make sure those assets load
self.mc.modes['mode1'].start()
self.advance_time()
# Give it a second to load. This file is tiny, so it shouldn't take
# this long
time.sleep(.001)
self.advance_time(.1)
for x in range(10):
if not self.mc.images['image9'].loaded or not self.mc.images['image6'].loaded:
time.sleep(.1)
self.advance_time(.1)
self.assertTrue(self.mc.images['image9'].loaded)
self.assertFalse(self.mc.images['image9'].loading)
self.assertFalse(self.mc.images['image9'].unloading)
self.assertTrue(self.mc.images['image6'].loaded)
self.assertFalse(self.mc.images['image6'].loading)
self.assertFalse(self.mc.images['image6'].unloading)
# test mode stop which should unload those assets
self.mc.modes['mode1'].stop()
for x in range(10):
if self.mc.images['image9'].loaded:
self.assertTrue(self.mc.images['image9'].unloading)
self.advance_time(.1)
self.assertFalse(self.mc.images['image9'].loaded)
self.assertFalse(self.mc.images['image9'].loading)
self.assertFalse(self.mc.images['image9'].unloading)
def test_random_asset_group(self):
# three assets, no weights
# make sure the asset group was created
self.assertIn('group1', self.mc.images)
# make sure the randomness is working. To test this, we request the
# asset 10,000 times and then count the results and assume that each
# should be 3,333 +- 500 just to make sure the test never fails/
res = list()
for x in range(10000):
res.append(self.mc.images['group1'].image)
self.assertAlmostEqual(3333, res.count(self.mc.images['image1']),
delta=500)
self.assertAlmostEqual(3333, res.count(self.mc.images['image2']),
delta=500)
self.assertAlmostEqual(3333, res.count(self.mc.images['image3']),
delta=50 |
MonicaHsu/truvaluation | venv/lib/python2.7/uuid.py | Python | mit | 21,828 | 0.00197 | r"""UUID objects (universally unique identifiers) according to RFC 4122.
This module provides immutable UUID objects (class UUID) and the functions
uuid1(), uuid3(), uuid4(), uuid5() for generating version 1, 3, 4, and 5
UUIDs as specified in RFC 4122.
If all you want is a unique ID, you should probably call uuid1() or uuid4().
Note that uuid1() may compromise privacy since it creates a UUID containing
the computer's network address. uuid4() creates a random UUID.
Typical usage:
>>> import uuid
# make a UUID based on the host ID and current time
>>> uuid.uuid1()
UUID('a8098c1a-f86e-11da-bd1a-00112444be1e')
# make a UUID using an MD5 hash of a namespace UUID and a name
>>> uuid.uuid3(uuid.NAMESPACE_DNS, 'python.org')
UUID('6fa459ea-ee8a-3ca4-894e-db77e160355e')
# make a random UUID
>>> uuid.uuid4()
UUID('16fd2706-8baf-433b-82eb-8c7fada847da')
# make a UUID using a SHA-1 hash of a namespace UUID and a name
>>> uuid.uuid5(uuid.NAMESPACE_DNS, 'python.org')
UUID('886313e1-3b8a-5372-9b90-0c9aee199e5d')
# make a UUID from a string of hex digits (braces and hyphens ignored)
>>> x = uuid.UUID('{00010203-0405-0607-0809-0a0b0c0d0e0f}')
# convert a UUID to a string of hex digits in standard form
>>> str(x)
'00010203-0405-0607-0809-0a0b0c0d0e0f'
# get the raw 16 bytes of the UUID
>>> x.bytes
'\x00\x01\x02\x03\x04\x05\x06\x07\x08\t\n\x0b\x0c\r\x0e\x0f'
# make a UUID from a 16-byte string
>>> uuid.UUID(bytes=x.bytes)
UUID('00010203-0405-0607-0809-0a0b0c0d0e0f')
"""
__author__ = 'Ka-Ping Yee <ping@zesty.ca>'
RESERVED_NCS, RFC_4122, RESERVED_MICROSOFT, RESERVED_FUTURE = [
'reserved for NCS compatibility', 'specified in RFC 4122',
'reserved for Microsoft compatibility', 'reserved for future definition']
class UUID(object):
"""Instances of the UUID class represent UUIDs as specified in RFC 4122.
UUID objects are immutable, hashable, and usable as dictionary keys.
Converting a UUID to a string with str() yields something in the form
'12345678-1234-1234-1234-123456789abc'. The UUID constructor accepts
five possible forms: a similar string of hexadecimal digits, or a tuple
of six integer fields (with 32-bit, 16-bit, 16-bit, 8-bit, 8-bit, and
48-bit values respectively) as an argument named 'fields', or a string
of 16 bytes (with all the integer fields in big-endian order) as an
argument named 'bytes', or a string of 16 bytes (with the first three
fields in little-endian order) as an argument named 'bytes_le', or a
single 128-bit integer as an argument named 'int'.
UUIDs have these read-only attributes:
bytes the UUID as a 16-byte string (containing the six
integer fields in big-endian byte order)
bytes_le the UUID as a 16-byte string (with time_low, time_mid,
and time_hi_version in little-endian byte order)
fields a tuple of the six integer fields of the UUID,
which are also available as six individual attributes
and two derived attributes:
time_low the first 32 bits of the UUID
time_mid the next 16 bits of the UUID
time_hi_version the next 16 bits of the UUID
clock_seq_hi_variant the next 8 bits of the UUID
clock_seq_low the next 8 bits of the UUID
node the last 48 bits of the UUID
time the 60-bit timestamp
clock_seq the 14-bit sequence number
hex the UUID as a 32-character hexadecimal string
int the UUID as a 128-bit integer
urn the UUID as a URN as specified in RFC 4122
variant the UUID variant (one of the constants RESERVED_NCS,
RFC_4122, RESERVED_MICROSOFT, or RESERVED_FUTURE)
version the UUID version number (1 through 5, meaningful only
when the variant is RFC_4122)
"""
def __init__(self, hex=None, bytes=None, bytes_le=None, fields=None,
int=None, version=None):
r"""Create a UUID from either a string of 32 hexadecimal digits,
a string of 16 bytes as the 'bytes' argument, a string of 16 bytes
in little-endian order as the 'bytes_le' argument, a tuple of six
integers (32-bit time_low, 16-bit time_mid, 16-bit time_hi_version,
8-bit clock_seq_hi_variant, 8-bit clock_seq_low, 48-bit node) as
the 'fields' argument, or a single 128-bit integer as the 'int'
argument. When a string of hex digits is given, curly braces,
hyphens, and a URN prefix are all optional. For example, these
expressions all yield the same UUID:
UUID('{12345678-1234-5678-1234-567812345678}')
UUID('12345678123456781234567812345678')
UUID('urn:uuid:12345678-1234-5678-1234-567812345678')
UUID(bytes='\x12\x34\x56\x78'*4)
UUID(bytes_le='\x78\x56\x34\x12\x34\x12\x78\x56' +
'\x12\x34\x56\x78\x12\x34\x56\x78')
UUID(fields=(0x12345678, 0x1234, 0x5678, 0x12, 0x34, 0x567812345678))
UUID(int=0x12345678123456781234567812345678)
Exactly one of 'hex', 'bytes', 'bytes_le', 'fields', or 'int' must
be given. The 'version' argument is optional; if given, the resulting
UUID will have its variant and version set according to RFC 4122,
overriding the given 'hex', 'bytes', 'bytes_le', 'fields', or 'int'.
"""
if [hex, bytes, bytes_le, fields, int].count(None) != 4:
raise TypeError('need one of hex, bytes, bytes_le, fields, or int')
if hex is not None:
hex = hex.replace('urn:', '').replace('uuid:', '')
hex = hex.strip('{}').replace('-', '')
if len(hex) != 32:
| raise ValueError('badly formed hexadecimal | UUID string')
int = long(hex, 16)
if bytes_le is not None:
if len(bytes_le) != 16:
raise ValueError('bytes_le is not a 16-char string')
bytes = (bytes_le[3] + bytes_le[2] + bytes_le[1] + bytes_le[0] +
bytes_le[5] + bytes_le[4] + bytes_le[7] + bytes_le[6] +
bytes_le[8:])
if bytes is not None:
if len(bytes) != 16:
raise ValueError('bytes is not a 16-char string')
int = long(('%02x'*16) % tuple(map(ord, bytes)), 16)
if fields is not None:
if len(fields) != 6:
raise ValueError('fields is not a 6-tuple')
(time_low, time_mid, time_hi_version,
clock_seq_hi_variant, clock_seq_low, node) = fields
if not 0 <= time_low < 1<<32L:
raise ValueError('field 1 out of range (need a 32-bit value)')
if not 0 <= time_mid < 1<<16L:
raise ValueError('field 2 out of range (need a 16-bit value)')
if not 0 <= time_hi_version < 1<<16L:
raise ValueError('field 3 out of range (need a 16-bit value)')
if not 0 <= clock_seq_hi_variant < 1<<8L:
raise ValueError('field 4 out of range (need an 8-bit value)')
if not 0 <= clock_seq_low < 1<<8L:
raise ValueError('field 5 out of range (need an 8-bit value)')
if not 0 <= node < 1<<48L:
raise ValueError('field 6 out of range (need a 48-bit value)')
clock_seq = (clock_seq_hi_variant << 8L) | clock_seq_low
int = ((time_low << 96L) | (time_mid << 80L) |
(time_hi_version << 64L) | (clock_seq << 48L) | node)
if int is not None:
if not 0 <= int < 1<<128L:
raise ValueError('int is out of range (need a 128-bit value)')
if version is not None:
if not 1 <= version <= 5:
raise ValueError('illegal version number')
# Set the variant to RFC 4122.
int &= ~(0xc000 << 48L)
int |= 0x8000 << 48L |
XKNX/xknx | xknx/devices/cover.py | Python | mit | 13,841 | 0.000506 | """
Module for managing a cover via KNX.
It provides functionality for
* moving cover up/down or to a specific position
* reading the current state from KNX bus.
* Cover will also predict the current position.
"""
from __future__ import annotations
import logging
from typing import TYPE_CHECKING, Any, Iterator
from xknx.remote_value import (
GroupAddressesType,
RemoteValue,
RemoteValueScaling,
RemoteValueStep,
RemoteValueSwitch,
RemoteValueUpDown,
)
from .device import Device, DeviceCallbackType
from .travelcalculator import TravelCalculator, TravelStatus
if TYPE_CHECKING:
from xknx.telegram import Telegram
from xknx.xknx import XKNX
logger = logging.getLogger("xknx.log")
class Cover(Device):
"""Class for managing a cover."""
DEFAULT_TRAVEL_TIME_DOWN = 22
DEFAULT_TRAVEL_TIME_UP = 22
def __init__(
self,
xknx: XKNX,
name: str,
group_address_long: GroupAddressesType | None = None,
group_address_short: GroupAddressesType | None = None,
group_address_stop: GroupAddressesType | None = None,
group_address_position: GroupAddressesType | None = None,
group_address_position_state: GroupAddressesType | None = None,
group_address_angle: GroupAddressesType | None = None,
group_address_angle_state: GroupAddressesType | None = None,
group_address_locked_state: GroupAddressesType | None = None,
sync_state: bool | int | float | str = True,
travel_time_down: float = DEFAULT_TRAVEL_TIME_DOWN,
travel_time_up: float = DEFAULT_TRAVEL_TIME_UP,
invert_position: bool = False,
invert_angle: bool = False,
device_updated_cb: DeviceCallbackType | None = None,
):
"""Initialize Cover class."""
super().__init__(xknx, | name, device_updated_cb)
# self.after_update for position changes is called after updating the
# travelcalculator (in process_group_write and set_*) - angle changes
# are updated from RemoteValue objects
self.updown = RemoteValueUpDown(
xknx,
group_address_long,
device_name=self.name,
after_update_cb=None,
invert=invert_position,
)
self.st | ep = RemoteValueStep(
xknx,
group_address_short,
device_name=self.name,
after_update_cb=self.after_update,
invert=invert_position,
)
self.stop_ = RemoteValueSwitch(
xknx,
group_address=group_address_stop,
sync_state=False,
device_name=self.name,
after_update_cb=None,
)
position_range_from = 100 if invert_position else 0
position_range_to = 0 if invert_position else 100
self.position_current = RemoteValueScaling(
xknx,
group_address_state=group_address_position_state,
sync_state=sync_state,
device_name=self.name,
feature_name="Position",
after_update_cb=self._current_position_from_rv,
range_from=position_range_from,
range_to=position_range_to,
)
self.position_target = RemoteValueScaling(
xknx,
group_address=group_address_position,
device_name=self.name,
feature_name="Target position",
after_update_cb=self._target_position_from_rv,
range_from=position_range_from,
range_to=position_range_to,
)
angle_range_from = 100 if invert_angle else 0
angle_range_to = 0 if invert_angle else 100
self.angle = RemoteValueScaling(
xknx,
group_address_angle,
group_address_angle_state,
sync_state=sync_state,
device_name=self.name,
feature_name="Tilt angle",
after_update_cb=self.after_update,
range_from=angle_range_from,
range_to=angle_range_to,
)
self.locked = RemoteValueSwitch(
xknx,
group_address_state=group_address_locked_state,
sync_state=sync_state,
device_name=self.name,
feature_name="Locked",
after_update_cb=self.after_update,
)
self.travel_time_down = travel_time_down
self.travel_time_up = travel_time_up
self.travelcalculator = TravelCalculator(travel_time_down, travel_time_up)
self.travel_direction_tilt: TravelStatus | None = None
def _iter_remote_values(self) -> Iterator[RemoteValue[Any, Any]]:
"""Iterate the devices RemoteValue classes."""
yield self.updown
yield self.step
yield self.stop_
yield self.position_current
yield self.position_target
yield self.angle
yield self.locked
async def set_down(self) -> None:
"""Move cover down."""
if self.updown.writable:
await self.updown.down()
self.travelcalculator.start_travel_down()
self.travel_direction_tilt = None
await self.after_update()
elif self.position_target.writable:
await self.position_target.set(self.travelcalculator.position_closed)
async def set_up(self) -> None:
"""Move cover up."""
if self.updown.writable:
await self.updown.up()
self.travelcalculator.start_travel_up()
self.travel_direction_tilt = None
await self.after_update()
elif self.position_target.writable:
await self.position_target.set(self.travelcalculator.position_open)
async def set_short_down(self) -> None:
"""Move cover short down."""
await self.step.increase()
async def set_short_up(self) -> None:
"""Move cover short up."""
await self.step.decrease()
async def stop(self) -> None:
"""Stop cover."""
if self.stop_.writable:
await self.stop_.on()
elif self.step.writable:
if TravelStatus.DIRECTION_UP in (
self.travelcalculator.travel_direction,
self.travel_direction_tilt,
):
await self.step.decrease()
elif TravelStatus.DIRECTION_DOWN in (
self.travelcalculator.travel_direction,
self.travel_direction_tilt,
):
await self.step.increase()
else:
logger.warning("Stop not supported for device %s", self.get_name())
return
self.travelcalculator.stop()
self.travel_direction_tilt = None
await self.after_update()
async def set_position(self, position: int) -> None:
"""Move cover to a desginated postion."""
if not self.position_target.writable:
# No direct positioning group address defined
# fully open or close is always possible even if current position is not known
current_position = self.current_position()
if current_position is None:
if position == self.travelcalculator.position_open:
await self.updown.up()
elif position == self.travelcalculator.position_closed:
await self.updown.down()
else:
logger.warning(
"Current position unknown. Initialize cover by moving to end position."
)
return
elif position < current_position:
await self.updown.up()
elif position > current_position:
await self.updown.down()
self.travelcalculator.start_travel(position)
await self.after_update()
else:
await self.position_target.set(position)
async def _target_position_from_rv(self) -> None:
"""Update the target postion from RemoteValue (Callback)."""
new_target = self.position_target.value
if new_target is not None:
self.travelcalculator.start_travel(new_target)
await self.after_update()
|
JulyJ/MindBot | mindbot/command/news/canadanews.py | Python | mit | 1,352 | 0.002219 | """
This module serves commands to retrieve Canadian News.
Powered by http://www.statcan.gc.ca/, shows 4 actual news.
Updated daily.
"""
from requests import get, RequestException, status_codes
from ..commandbase import CommandBase
| class CanadaStatsCommand(CommandBase):
name = '/canadastat'
help_text = ' - Daily Articles with Open Canada Statistics reviews.'
disable_web_page_preview = 'false'
NEWS_TEXT = ('[{response[title]}](http://www.statcan.gc.ca{response[photo]})\n'
| '{response[summary]}\n'
'{response[date]}, [Read More...](http://www.statcan.gc.ca{response[link]})')
def __call__(self, *args, **kwargs):
super().__call__(*args, **kwargs)
json = self.get_json()
if json:
for article in json['daily']['article']:
self.send_telegram_message(self.NEWS_TEXT.format(response=article))
else:
self.send_telegram_message('No news were retrieved.')
def get_json(self):
url = 'http://www.statcan.gc.ca/sites/json/daily-banner-eng.json'
try:
response = get(url)
except RequestException as e:
self._logger.debug('RequestException {}'.format(e))
return
if response.status_code == status_codes.codes.ok:
return response.json()
|
SmithChart/twitter-printer | dbConnector.py | Python | gpl-2.0 | 6,036 | 0.012922 | #!/usr/bin/env python3
import os
import sqlite3
import time
import random
import hashlib
import unittest
import weakref
import os
import uuid
class WrongDbFileError(Exception):
def __init__(self, message):
self.message = message
class InvalidPubMethodError(Exception):
def __init__(self, message):
self.message = message
class DbConnector(object):
def __init__(self,dbFile):
self.conn = False
self.dbFile = None
random.seed()
db_filename = dbFile
schema_filename = 'wurstDB.sql'
db_is_new = not os.path.exists(db_filename)
with sqlite3.connect(db_filename) as self.conn:
if db_is_new:
with open(schema_filename, 'rt') as f:
schema = f.read()
self.conn.executescript(schema)
self.conn.commit()
self.dbFile = dbFile
def close(self):
if self.conn:
self.conn.close()
self.conn = False
self.dbFile = None
def getEan(self,volume,pubMethod):
dateCreate = int(time.time())
c = self.conn.cursor()
ean = ""
for i in range(0,11):
ean += str(random.randrange(0,10))
c.execute("SELECT valid FROM pubMethod WHERE pubMethod = ?",[str(pubMethod)])
valid = False
for val in c.fetchall():
val, = val
if val == 1:
valid = True
if not valid:
raise InvalidPubMethodError("%s is not a valid PubMethod"%pubMethod)
c.execute("INSERT INTO wurst(code, valid, used, dateGenerated, pubMethod) VALUES (?,?,?,?,?)",[str(ean),volume,0,dateCreate,str(pubMethod)])
self.conn.commit()
return ean
def getCode(self,volume,pubMethod):
dateCreate = int(time.time())
c = self.conn.cursor()
m = hashlib.sha256()
m.update(str(random.random()).encode())
code = m.hexdigest()
c.execute("SELECT valid FROM pubMethod WHERE pubMethod = ?",[str(pubMethod)])
valid = False
for val in c.fetchall():
val, = val
if val == 1:
valid = True
if not valid:
raise InvalidPubMethodError("%s is not a valid PubM | ethod"%pubMethod)
c.execute("INSERT INTO wurst(code, valid, used, dateGenerated, pubMethod) VALUES (?,?,?,?,?)",[str(code),volume,0,dateCreate,str(pubMethod)])
| self.conn.commit()
return code
def useCode(self,code):
dateUse = int(time.time())
c = self.conn.cursor()
c.execute("SELECT wurst.valid,used FROM wurst INNER JOIN pubMethod ON pubMethod.pubMethod = wurst.pubMethod WHERE code == ? and used < wurst.valid and pubMethod.valid != 0",[str(code)])
ret = False
for row in c.fetchall():
valid, used = row
c.execute("UPDATE wurst SET used = used + 1, dateUsed = ? WHERE code == ? and used < valid",[dateUse,str(code)])
ret = True
self.conn.commit()
return ret
def addPubMethod(self,pubMethod):
c = self.conn.cursor()
c.execute("INSERT OR IGNORE INTO pubMethod(pubMethod) VALUES (?)",[str(pubMethod)])
self.conn.commit()
def blackList(self,pubMethod):
c = self.conn.cursor()
c.execute("UPDATE OR IGNORE pubMethod SET valid = 0 WHERE pubMethod == ?",[str(pubMethod)])
self.conn.commit()
def enablePubMethod(self,pubMethod):
c = self.conn.cursor()
c.execute("UPDATE OR IGNORE pubMethod SET valid = 1 WHERE pubMethod == ?",[str(pubMethod)])
self.conn.commit()
class TestDbConnector(unittest.TestCase):
def setUp(self):
self.dbFileName = "/tmp/"+str(uuid.uuid4())
def test_init(self):
db1 = DbConnector(self.dbFileName)
self.assertIsNotNone(DbConnector(self.dbFileName), 'DB connection failed')
db1.close()
def test_close(self):
db2 = DbConnector(self.dbFileName)
db2.close()
self.assertEqual(db2.conn , False, 'DB connection is not closed')
def test_getCode(self):
db1 = DbConnector(self.dbFileName)
db1.addPubMethod('test')
db1.getCode(1,'test')
with self.assertRaises(InvalidPubMethodError) as e:
db1.getCode(1,'test3')
db1.close()
def test_getEan(self):
db1 = DbConnector(self.dbFileName)
db1.addPubMethod('test')
print(db1.getEan(1,'test'))
with self.assertRaises(InvalidPubMethodError) as e:
db1.getCode(1,'test3')
db1.close()
def test_useCode(self):
db1 = DbConnector(self.dbFileName)
db1.addPubMethod('test')
c = db1.getCode(2,'test')
c2 = db1.getCode(1,'test')
t = db1.useCode('1')
self.assertEqual(t , False, 'did accept unknown code')
t = db1.useCode(c)
self.assertEqual(t , True, 'did not accept known code')
t = db1.useCode(c)
self.assertEqual(t , True, 'did accept volume of 2 only once')
t = db1.useCode(c)
self.assertEqual(t , False, 'did accept volume of 2 too often')
t = db1.useCode(c2)
self.assertEqual(t , True, 'code was affected by another code')
db1.close()
def test_blacklist(self):
db1 = DbConnector(self.dbFileName)
db1.addPubMethod('test')
db1.addPubMethod('test2')
c = db1.getCode(2,'test')
c2 = db1.getCode(1,'test2')
db1.blackList('test')
t = db1.useCode(c)
self.assertEqual(t , False, 'blacklisted pubMethods are accepted')
t = db1.useCode(c2)
self.assertEqual(t , True, 'not blacklisted pubMethods are not accepted')
c = db1.conn.cursor()
c.execute("UPDATE pubMethod SET valid = 1 WHERE pubMethod == ?",['test'])
db1.conn.commit()
db1.close()
def tearDown(self):
os.remove(self.dbFileName)
if __name__ == "__main__":
unittest.main(verbosity=2)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.