repo_name
stringlengths 5
100
| ref
stringlengths 12
67
| path
stringlengths 4
244
| copies
stringlengths 1
8
| content
stringlengths 0
1.05M
⌀ |
|---|---|---|---|---|
val2k/linux
|
refs/heads/master
|
tools/perf/scripts/python/check-perf-trace.py
|
1997
|
# perf script event handlers, generated by perf script -g python
# (c) 2010, Tom Zanussi <tzanussi@gmail.com>
# Licensed under the terms of the GNU GPL License version 2
#
# This script tests basic functionality such as flag and symbol
# strings, common_xxx() calls back into perf, begin, end, unhandled
# events, etc. Basically, if this script runs successfully and
# displays expected results, Python scripting support should be ok.
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from Core import *
from perf_trace_context import *
unhandled = autodict()
def trace_begin():
print "trace_begin"
pass
def trace_end():
print_unhandled()
def irq__softirq_entry(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, vec):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "vec=%s\n" % \
(symbol_str("irq__softirq_entry", "vec", vec)),
def kmem__kmalloc(event_name, context, common_cpu,
common_secs, common_nsecs, common_pid, common_comm,
common_callchain, call_site, ptr, bytes_req, bytes_alloc,
gfp_flags):
print_header(event_name, common_cpu, common_secs, common_nsecs,
common_pid, common_comm)
print_uncommon(context)
print "call_site=%u, ptr=%u, bytes_req=%u, " \
"bytes_alloc=%u, gfp_flags=%s\n" % \
(call_site, ptr, bytes_req, bytes_alloc,
flag_str("kmem__kmalloc", "gfp_flags", gfp_flags)),
def trace_unhandled(event_name, context, event_fields_dict):
try:
unhandled[event_name] += 1
except TypeError:
unhandled[event_name] = 1
def print_header(event_name, cpu, secs, nsecs, pid, comm):
print "%-20s %5u %05u.%09u %8u %-20s " % \
(event_name, cpu, secs, nsecs, pid, comm),
# print trace fields not included in handler args
def print_uncommon(context):
print "common_preempt_count=%d, common_flags=%s, common_lock_depth=%d, " \
% (common_pc(context), trace_flag_str(common_flags(context)), \
common_lock_depth(context))
def print_unhandled():
keys = unhandled.keys()
if not keys:
return
print "\nunhandled events:\n\n",
print "%-40s %10s\n" % ("event", "count"),
print "%-40s %10s\n" % ("----------------------------------------", \
"-----------"),
for event_name in keys:
print "%-40s %10d\n" % (event_name, unhandled[event_name])
|
wlyeow/pysolclient
|
refs/heads/master
|
test/test_simple.py
|
1
|
from pysolclient import *
from pysolclient import _defaultEventCallback
from ctypes import *
import time
import pprint
def test_destination_eq():
c = Destination()
d = Destination()
c.setDest('c')
d.setDest('d')
assert c != d
d.setDest('c')
assert c == d
c.destType = Destination.QUEUE
assert c != d
d.destType = Destination.QUEUE
assert c == d
def test_connect_non_blocking():
context = Context()
sprops = SessionProperties(\
HOST='localhost',
VPN_NAME='default',
USERNAME='default',
CONNECT_BLOCKING=PROP_DISABLE_VAL)
session = Session(context, sprops)
assert session.connect() == ReturnCode.IN_PROGRESS
class TestDirectMessages:
@classmethod
def setup_class(cls):
cls.context = Context()
@classmethod
def teardown_class(cls):
del cls.context
def setup(self):
self.sprops = SessionProperties(\
HOST='localhost',
VPN_NAME='default',
USERNAME='default')
class SeqNumData:
def __init__(self):
self.last = None
self.noSeqNum = 0
self.missing = []
self.discard = []
self.received = 0
def test_sequence_numbers(self):
self.sprops.GENERATE_SEQUENCE_NUMBER = PROP_ENABLE_VAL
topic = 'nosetest/direct/test_seq_num'
messages = 10000
wait_timeout = 10
# setup rx
def rxMsg(session_p, msg_p, user_p):
udata = cast(user_p, POINTER(py_object)).contents.value
udata.received += 1
msg = Message(msg_p)
m = msg.getSeqNum()
if m is None:
udata.noSeqNum += 1
return CALLBACK_TAKE_MSG
if msg.isDiscardIndicated():
udata.discard.append(m)
if udata.last is None:
udata.last = m
return CALLBACK_TAKE_MSG
if udata.last + 1 != m:
udata.missing.extend(range(udata.last + 1, m))
udata.last = m
return CALLBACK_TAKE_MSG
# setup funcInfo
data = self.SeqNumData()
D = py_object(data)
funcInfo = SessionFuncInfo()
funcInfo.rxMsgInfo.callback_p = MSG_CALLBACK_TYPE(rxMsg)
funcInfo.rxMsgInfo.user_p = cast(byref(D), c_void_p)
funcInfo.eventInfo.callback_p = EVENT_CALLBACK_TYPE(_defaultEventCallback)
session = Session(self.context, self.sprops, funcInfo)
session.connect()
session.topicSubscribe(topic)
msg = Message()
err = msg.applyProps(Dest=Destination(topic),
Delivery=Message.DELIVERY_MODE_DIRECT)
if err:
for n, e in err.items():
print('Cannot set {}:\t{}: {}'.format(n, type(e).__name__, str(e)))
if e.trace: print(e.trace)
raise RuntimeError('Cannot continue')
for _ in range(messages):
session.sendMsg(msg)
while wait_timeout > 0:
if data.received == messages: break
time.sleep(0.1)
wait_timeout -= 0.1
session.disconnect()
pprint.pprint(data.__dict__)
assert data.received == messages
|
migueldvb/django-guardian
|
refs/heads/master
|
guardian/templatetags/guardian_tags.py
|
31
|
"""
``django-guardian`` template tags. To use in a template just put the following
*load* tag inside a template::
{% load guardian_tags %}
"""
from __future__ import unicode_literals
from django import template
from django.contrib.auth.models import Group, AnonymousUser
from django.template import get_library
from django.template import InvalidTemplateLibrary
from django.template.defaulttags import LoadNode
from guardian.compat import get_user_model
from guardian.exceptions import NotUserNorGroup
from guardian.core import ObjectPermissionChecker
register = template.Library()
@register.tag
def friendly_load(parser, token):
'''
Tries to load a custom template tag set. Non existing tag libraries
are ignored.
This means that, if used in conjuction with ``if_has_tag``, you can try to
load the comments template tag library to enable comments even if the
comments framework is not installed.
For example::
{% load friendly_loader %}
{% friendly_load comments webdesign %}
{% if_has_tag render_comment_list %}
{% render_comment_list for obj %}
{% else %}
{% if_has_tag lorem %}
{% lorem %}
{% endif_has_tag %}
{% endif_has_tag %}
'''
bits = token.contents.split()
for taglib in bits[1:]:
try:
lib = get_library(taglib)
parser.add_library(lib)
except InvalidTemplateLibrary:
pass
return LoadNode()
class ObjectPermissionsNode(template.Node):
def __init__(self, for_whom, obj, context_var):
self.for_whom = template.Variable(for_whom)
self.obj = template.Variable(obj)
self.context_var = context_var
def render(self, context):
for_whom = self.for_whom.resolve(context)
if isinstance(for_whom, get_user_model()):
self.user = for_whom
self.group = None
elif isinstance(for_whom, AnonymousUser):
self.user = get_user_model().get_anonymous()
self.group = None
elif isinstance(for_whom, Group):
self.user = None
self.group = for_whom
else:
raise NotUserNorGroup("User or Group instance required (got %s)"
% for_whom.__class__)
obj = self.obj.resolve(context)
check = ObjectPermissionChecker(for_whom)
perms = check.get_perms(obj)
context[self.context_var] = perms
return ''
@register.tag
def get_obj_perms(parser, token):
"""
Returns a list of permissions (as ``codename`` strings) for a given
``user``/``group`` and ``obj`` (Model instance).
Parses ``get_obj_perms`` tag which should be in format::
{% get_obj_perms user/group for obj as "context_var" %}
.. note::
Make sure that you set and use those permissions in same template
block (``{% block %}``).
Example of usage (assuming ``flatpage`` and ``perm`` objects are
available from *context*)::
{% get_obj_perms request.user for flatpage as "flatpage_perms" %}
{% if "delete_flatpage" in flatpage_perms %}
<a href="/pages/delete?target={{ flatpage.url }}">Remove page</a>
{% endif %}
.. note::
Please remember that superusers would always get full list of permissions
for a given object.
"""
bits = token.split_contents()
format = '{% get_obj_perms user/group for obj as "context_var" %}'
if len(bits) != 6 or bits[2] != 'for' or bits[4] != 'as':
raise template.TemplateSyntaxError("get_obj_perms tag should be in "
"format: %s" % format)
for_whom = bits[1]
obj = bits[3]
context_var = bits[5]
if context_var[0] != context_var[-1] or context_var[0] not in ('"', "'"):
raise template.TemplateSyntaxError("get_obj_perms tag's context_var "
"argument should be in quotes")
context_var = context_var[1:-1]
return ObjectPermissionsNode(for_whom, obj, context_var)
|
envi-idl/envipyarc
|
refs/heads/master
|
envipyarc/templates/envivector.py
|
1
|
"""
Maps the ENVI Task data type to a GPTool datatype
"""
from __future__ import absolute_import
from string import Template
from envipyarclib.gptool.parameter.template import Template as ParamTemplate
class ENVIVECTOR(ParamTemplate):
"""
Class template for the datatype
"""
def get_parameter(self, task_param):
if task_param['direction'].upper() == 'OUTPUT':
return Template('''
$name = arcpy.Parameter(
displayName="$displayName",
name="$name",
datatype="$dataType",
parameterType="$paramType",
direction="$direction",
multiValue=$multiValue
)
''')
# Return the input template
return Template('''
$name = arcpy.Parameter(
displayName="$displayName",
name="$name",
datatype=["$dataType","GPString"],
parameterType="$paramType",
direction="$direction",
multiValue=$multiValue
)
''')
def parameter_names(self, task_param):
return [Template('$name')]
def default_value(self):
return Template('''
${name}.value = "$defaultValue"
''')
def update_parameter(self):
return Template('')
def pre_execute(self):
return Template('''
path = str(parameters[self.i${name}].value)
input_params['${name}'] = {'url': path,
'factory':'URLVector'
}
''')
def post_execute(self):
return Template('''
if '${name}' in task_results:
parameters[self.i${name}].value = task_results['${name}']['url']
''')
def template():
"""Returns the template object."""
return ENVIVECTOR('DEShapefile')
|
lewisamarshall/emigrate
|
refs/heads/master
|
setup.py
|
1
|
#!/usr/bin/python
import sys
if sys.version_info < (3,):
from emigrate.__version__ import __version__
else:
__version__ = open('emigrate/__version__.py').read().strip().split("'")[-2]
from setuptools import setup, find_packages
try:
import pypandoc
long_description = pypandoc.convert('README.md', 'rst')
except:
long_description = None
setup(name='emigrate',
version=__version__,
author='Lewis A. Marshall',
author_email='lewis.a.marshall@gmail.com',
url="https://github.com/lewisamarshall/emigrate",
classifiers=[
"Programming Language :: Python",
"Development Status :: 3 - Alpha",
"Environment :: Console",
"Intended Audience :: Science/Research",
"Operating System :: OS Independent",
"Topic :: Software Development :: Libraries :: Python Modules",
"Topic :: Scientific/Engineering :: Chemistry",
],
use_2to3=True,
description='A package for simulating electrophoresis.',
packages=find_packages(),
long_description=long_description,
requires=['numpy', 'scipy', 'ionize', 'h5py', 'ionize', 'click', 'matplotlib'],
entry_points={'console_scripts': ['emigrate = emigrate.__main__:main']},
test_suite="emigrate.tests",
)
|
tribut/vdirsyncer
|
refs/heads/master
|
tests/storage/dav/__init__.py
|
1
|
# -*- coding: utf-8 -*-
import os
import requests
import requests.exceptions
from tests import assert_item_equals
import vdirsyncer.exceptions as exceptions
from vdirsyncer.storage.base import Item
from .. import StorageTests
dav_server = os.environ.get('DAV_SERVER', '').strip() or 'radicale'
def _get_server_mixin(server_name):
from . import __name__ as base
x = __import__('{}.servers.{}'.format(base, server_name), fromlist=[''])
return x.ServerMixin
ServerMixin = _get_server_mixin(dav_server)
class DavStorageTests(ServerMixin, StorageTests):
dav_server = dav_server
def test_dav_broken_item(self, s):
item = Item(u'HAHA:YES')
try:
s.upload(item)
except (exceptions.Error, requests.exceptions.HTTPError):
pass
assert not list(s.list())
def test_dav_empty_get_multi_performance(self, s, monkeypatch):
def breakdown(*a, **kw):
raise AssertionError('Expected not to be called.')
monkeypatch.setattr('requests.sessions.Session.request', breakdown)
assert list(s.get_multi([])) == []
def test_dav_unicode_href(self, s, get_item, monkeypatch):
if self.dav_server != 'radicale':
# Radicale is unable to deal with unicode hrefs
monkeypatch.setattr(s, '_get_href',
lambda item: item.ident + s.fileext)
item = get_item(uid=u'lolätvdirsynceröü град сатану')
href, etag = s.upload(item)
item2, etag2 = s.get(href)
assert_item_equals(item, item2)
|
swt30/beets
|
refs/heads/master
|
beetsplug/edit.py
|
7
|
# This file is part of beets.
# Copyright 2016
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Open metadata information in a text editor to let the user edit it.
"""
from __future__ import division, absolute_import, print_function
from beets import plugins
from beets import util
from beets import ui
from beets.dbcore import types
from beets.importer import action
from beets.ui.commands import _do_query, PromptChoice
from copy import deepcopy
import subprocess
import yaml
from tempfile import NamedTemporaryFile
import os
# These "safe" types can avoid the format/parse cycle that most fields go
# through: they are safe to edit with native YAML types.
SAFE_TYPES = (types.Float, types.Integer, types.Boolean)
class ParseError(Exception):
"""The modified file is unreadable. The user should be offered a chance to
fix the error.
"""
def edit(filename, log):
"""Open `filename` in a text editor.
"""
cmd = util.shlex_split(util.editor_command())
cmd.append(filename)
log.debug(u'invoking editor command: {!r}', cmd)
try:
subprocess.call(cmd)
except OSError as exc:
raise ui.UserError(u'could not run editor command {!r}: {}'.format(
cmd[0], exc
))
def dump(arg):
"""Dump a sequence of dictionaries as YAML for editing.
"""
return yaml.safe_dump_all(
arg,
allow_unicode=True,
default_flow_style=False,
)
def load(s):
"""Read a sequence of YAML documents back to a list of dictionaries
with string keys.
Can raise a `ParseError`.
"""
try:
out = []
for d in yaml.load_all(s):
if not isinstance(d, dict):
raise ParseError(
u'each entry must be a dictionary; found {}'.format(
type(d).__name__
)
)
# Convert all keys to strings. They started out as strings,
# but the user may have inadvertently messed this up.
out.append({unicode(k): v for k, v in d.items()})
except yaml.YAMLError as e:
raise ParseError(u'invalid YAML: {}'.format(e))
return out
def _safe_value(obj, key, value):
"""Check whether the `value` is safe to represent in YAML and trust as
returned from parsed YAML.
This ensures that values do not change their type when the user edits their
YAML representation.
"""
typ = obj._type(key)
return isinstance(typ, SAFE_TYPES) and isinstance(value, typ.model_type)
def flatten(obj, fields):
"""Represent `obj`, a `dbcore.Model` object, as a dictionary for
serialization. Only include the given `fields` if provided;
otherwise, include everything.
The resulting dictionary's keys are strings and the values are
safely YAML-serializable types.
"""
# Format each value.
d = {}
for key in obj.keys():
value = obj[key]
if _safe_value(obj, key, value):
# A safe value that is faithfully representable in YAML.
d[key] = value
else:
# A value that should be edited as a string.
d[key] = obj.formatted()[key]
# Possibly filter field names.
if fields:
return {k: v for k, v in d.items() if k in fields}
else:
return d
def apply_(obj, data):
"""Set the fields of a `dbcore.Model` object according to a
dictionary.
This is the opposite of `flatten`. The `data` dictionary should have
strings as values.
"""
for key, value in data.items():
if _safe_value(obj, key, value):
# A safe value *stayed* represented as a safe type. Assign it
# directly.
obj[key] = value
else:
# Either the field was stringified originally or the user changed
# it from a safe type to an unsafe one. Parse it as a string.
obj.set_parse(key, unicode(value))
class EditPlugin(plugins.BeetsPlugin):
def __init__(self):
super(EditPlugin, self).__init__()
self.config.add({
# The default fields to edit.
'albumfields': 'album albumartist',
'itemfields': 'track title artist album',
# Silently ignore any changes to these fields.
'ignore_fields': 'id path',
})
self.register_listener('before_choose_candidate',
self.before_choose_candidate_listener)
def commands(self):
edit_command = ui.Subcommand(
'edit',
help=u'interactively edit metadata'
)
edit_command.parser.add_option(
u'-f', u'--field',
metavar='FIELD',
action='append',
help=u'edit this field also',
)
edit_command.parser.add_option(
u'--all',
action='store_true', dest='all',
help=u'edit all fields',
)
edit_command.parser.add_album_option()
edit_command.func = self._edit_command
return [edit_command]
def _edit_command(self, lib, opts, args):
"""The CLI command function for the `beet edit` command.
"""
# Get the objects to edit.
query = ui.decargs(args)
items, albums = _do_query(lib, query, opts.album, False)
objs = albums if opts.album else items
if not objs:
ui.print_(u'Nothing to edit.')
return
# Get the fields to edit.
if opts.all:
fields = None
else:
fields = self._get_fields(opts.album, opts.field)
self.edit(opts.album, objs, fields)
def _get_fields(self, album, extra):
"""Get the set of fields to edit.
"""
# Start with the configured base fields.
if album:
fields = self.config['albumfields'].as_str_seq()
else:
fields = self.config['itemfields'].as_str_seq()
# Add the requested extra fields.
if extra:
fields += extra
# Ensure we always have the `id` field for identification.
fields.append('id')
return set(fields)
def edit(self, album, objs, fields):
"""The core editor function.
- `album`: A flag indicating whether we're editing Items or Albums.
- `objs`: The `Item`s or `Album`s to edit.
- `fields`: The set of field names to edit (or None to edit
everything).
"""
# Present the YAML to the user and let her change it.
success = self.edit_objects(objs, fields)
# Save the new data.
if success:
self.save_changes(objs)
def edit_objects(self, objs, fields):
"""Dump a set of Model objects to a file as text, ask the user
to edit it, and apply any changes to the objects.
Return a boolean indicating whether the edit succeeded.
"""
# Get the content to edit as raw data structures.
old_data = [flatten(o, fields) for o in objs]
# Set up a temporary file with the initial data for editing.
new = NamedTemporaryFile(suffix='.yaml', delete=False)
old_str = dump(old_data)
new.write(old_str)
new.close()
# Loop until we have parseable data and the user confirms.
try:
while True:
# Ask the user to edit the data.
edit(new.name, self._log)
# Read the data back after editing and check whether anything
# changed.
with open(new.name) as f:
new_str = f.read()
if new_str == old_str:
ui.print_(u"No changes; aborting.")
return False
# Parse the updated data.
try:
new_data = load(new_str)
except ParseError as e:
ui.print_(u"Could not read data: {}".format(e))
if ui.input_yn(u"Edit again to fix? (Y/n)", True):
continue
else:
return False
# Show the changes.
# If the objects are not on the DB yet, we need a copy of their
# original state for show_model_changes.
objs_old = [deepcopy(obj) if not obj._db else None
for obj in objs]
self.apply_data(objs, old_data, new_data)
changed = False
for obj, obj_old in zip(objs, objs_old):
changed |= ui.show_model_changes(obj, obj_old)
if not changed:
ui.print_(u'No changes to apply.')
return False
# Confirm the changes.
choice = ui.input_options(
(u'continue Editing', u'apply', u'cancel')
)
if choice == u'a': # Apply.
return True
elif choice == u'c': # Cancel.
return False
elif choice == u'e': # Keep editing.
# Reset the temporary changes to the objects.
for obj in objs:
obj.read()
continue
# Remove the temporary file before returning.
finally:
os.remove(new.name)
def apply_data(self, objs, old_data, new_data):
"""Take potentially-updated data and apply it to a set of Model
objects.
The objects are not written back to the database, so the changes
are temporary.
"""
if len(old_data) != len(new_data):
self._log.warn(u'number of objects changed from {} to {}',
len(old_data), len(new_data))
obj_by_id = {o.id: o for o in objs}
ignore_fields = self.config['ignore_fields'].as_str_seq()
for old_dict, new_dict in zip(old_data, new_data):
# Prohibit any changes to forbidden fields to avoid
# clobbering `id` and such by mistake.
forbidden = False
for key in ignore_fields:
if old_dict.get(key) != new_dict.get(key):
self._log.warn(u'ignoring object whose {} changed', key)
forbidden = True
break
if forbidden:
continue
id_ = int(old_dict['id'])
apply_(obj_by_id[id_], new_dict)
def save_changes(self, objs):
"""Save a list of updated Model objects to the database.
"""
# Save to the database and possibly write tags.
for ob in objs:
if ob._dirty:
self._log.debug(u'saving changes to {}', ob)
ob.try_sync(ui.should_write(), ui.should_move())
# Methods for interactive importer execution.
def before_choose_candidate_listener(self, session, task):
"""Append an "Edit" choice and an "edit Candidates" choice (if
there are candidates) to the interactive importer prompt.
"""
choices = [PromptChoice('d', 'eDit', self.importer_edit)]
if task.candidates:
choices.append(PromptChoice('c', 'edit Candidates',
self.importer_edit_candidate))
return choices
def importer_edit(self, session, task):
"""Callback for invoking the functionality during an interactive
import session on the *original* item tags.
"""
# Assign temporary ids to the Items.
for i, obj in enumerate(task.items):
obj.id = i + 1
# Present the YAML to the user and let her change it.
fields = self._get_fields(album=False, extra=[])
success = self.edit_objects(task.items, fields)
# Remove temporary ids.
for obj in task.items:
obj.id = None
# Save the new data.
if success:
# Return action.RETAG, which makes the importer write the tags
# to the files if needed without re-applying metadata.
return action.RETAG
else:
# Edit cancelled / no edits made. Revert changes.
for obj in task.items:
obj.read()
def importer_edit_candidate(self, session, task):
"""Callback for invoking the functionality during an interactive
import session on a *candidate*. The candidate's metadata is
applied to the original items.
"""
# Prompt the user for a candidate.
sel = ui.input_options([], numrange=(1, len(task.candidates)))
# Force applying the candidate on the items.
task.match = task.candidates[sel - 1]
task.apply_metadata()
return self.importer_edit(session, task)
|
jackrzhang/zulip
|
refs/heads/master
|
zerver/lib/events.py
|
1
|
# See https://zulip.readthedocs.io/en/latest/subsystems/events-system.html for
# high-level documentation on how this system works.
import copy
import ujson
from collections import defaultdict
from django.utils.translation import ugettext as _
from django.conf import settings
from importlib import import_module
from typing import (
cast, Any, Callable, Dict, Iterable, List, Optional, Sequence, Set, Tuple, Union
)
session_engine = import_module(settings.SESSION_ENGINE)
from zerver.lib.alert_words import user_alert_words
from zerver.lib.attachments import user_attachments
from zerver.lib.avatar import avatar_url, get_avatar_field
from zerver.lib.bot_config import load_bot_config_template
from zerver.lib.hotspots import get_next_hotspots
from zerver.lib.integrations import EMBEDDED_BOTS
from zerver.lib.message import (
aggregate_unread_data,
apply_unread_message_event,
get_raw_unread_data,
get_starred_message_ids,
)
from zerver.lib.narrow import check_supported_events_narrow_filter
from zerver.lib.push_notifications import push_notifications_enabled
from zerver.lib.soft_deactivation import maybe_catch_up_soft_deactivated_user
from zerver.lib.realm_icon import realm_icon_url
from zerver.lib.request import JsonableError
from zerver.lib.topic import TOPIC_NAME
from zerver.lib.topic_mutes import get_topic_mutes
from zerver.lib.actions import (
validate_user_access_to_subscribers_helper,
do_get_streams, get_default_streams_for_realm,
gather_subscriptions_helper, get_cross_realm_dicts,
get_status_dict, streams_to_dicts_sorted,
default_stream_groups_to_dicts_sorted,
get_owned_bot_dicts,
get_available_notification_sounds,
)
from zerver.lib.user_groups import user_groups_in_realm_serialized
from zerver.tornado.event_queue import request_event_queue, get_user_events
from zerver.models import Client, Message, Realm, UserPresence, UserProfile, CustomProfileFieldValue, \
get_user_profile_by_id, \
get_realm_user_dicts, realm_filters_for_realm, get_user,\
custom_profile_fields_for_realm, get_realm_domains, \
get_default_stream_groups, CustomProfileField, Stream
from zproject.backends import email_auth_enabled, password_auth_enabled
from version import ZULIP_VERSION
def get_raw_user_data(realm_id: int, client_gravatar: bool) -> Dict[int, Dict[str, str]]:
user_dicts = get_realm_user_dicts(realm_id)
# TODO: Consider optimizing this query away with caching.
custom_profile_field_values = CustomProfileFieldValue.objects.filter(user_profile__realm_id=realm_id)
profiles_by_user_id = defaultdict(dict) # type: Dict[int, Dict[str, Any]]
for profile_field in custom_profile_field_values:
user_id = profile_field.user_profile_id
profiles_by_user_id[user_id][profile_field.field_id] = profile_field.value
def user_data(row: Dict[str, Any]) -> Dict[str, Any]:
avatar_url = get_avatar_field(
user_id=row['id'],
realm_id= realm_id,
email=row['email'],
avatar_source=row['avatar_source'],
avatar_version=row['avatar_version'],
medium=False,
client_gravatar=client_gravatar,
)
is_admin = row['is_realm_admin']
is_guest = row['is_guest']
is_bot = row['is_bot']
# This format should align with get_cross_realm_dicts() and notify_created_user
result = dict(
email=row['email'],
user_id=row['id'],
avatar_url=avatar_url,
is_admin=is_admin,
is_guest=is_guest,
is_bot=is_bot,
full_name=row['full_name'],
timezone=row['timezone'],
is_active = row['is_active'],
date_joined = row['date_joined'].isoformat(),
)
if not is_bot:
result['profile_data'] = profiles_by_user_id.get(row['id'], {})
return result
return {
row['id']: user_data(row)
for row in user_dicts
}
def always_want(msg_type: str) -> bool:
'''
This function is used as a helper in
fetch_initial_state_data, when the user passes
in None for event_types, and we want to fetch
info for every event type. Defining this at module
level makes it easier to mock.
'''
return True
# Fetch initial data. When event_types is not specified, clients want
# all event types. Whenever you add new code to this function, you
# should also add corresponding events for changes in the data
# structures and new code to apply_events (and add a test in EventsRegisterTest).
def fetch_initial_state_data(user_profile: UserProfile,
event_types: Optional[Iterable[str]],
queue_id: str, client_gravatar: bool,
include_subscribers: bool = True) -> Dict[str, Any]:
state = {'queue_id': queue_id} # type: Dict[str, Any]
realm = user_profile.realm
if event_types is None:
# return True always
want = always_want # type: Callable[[str], bool]
else:
want = set(event_types).__contains__
if want('alert_words'):
state['alert_words'] = user_alert_words(user_profile)
if want('custom_profile_fields'):
fields = custom_profile_fields_for_realm(realm.id)
state['custom_profile_fields'] = [f.as_dict() for f in fields]
state['custom_profile_field_types'] = CustomProfileField.FIELD_TYPE_CHOICES_DICT
if want('hotspots'):
state['hotspots'] = get_next_hotspots(user_profile)
if want('message'):
# The client should use get_messages() to fetch messages
# starting with the max_message_id. They will get messages
# newer than that ID via get_events()
messages = Message.objects.filter(usermessage__user_profile=user_profile).order_by('-id')[:1]
if messages:
state['max_message_id'] = messages[0].id
else:
state['max_message_id'] = -1
if want('muted_topics'):
state['muted_topics'] = get_topic_mutes(user_profile)
if want('pointer'):
state['pointer'] = user_profile.pointer
if want('presence'):
state['presences'] = get_status_dict(user_profile)
if want('realm'):
for property_name in Realm.property_types:
state['realm_' + property_name] = getattr(realm, property_name)
# Most state is handled via the property_types framework;
# these manual entries are for those realm settings that don't
# fit into that framework.
state['realm_authentication_methods'] = realm.authentication_methods_dict()
state['realm_allow_message_editing'] = realm.allow_message_editing
state['realm_allow_community_topic_editing'] = realm.allow_community_topic_editing
state['realm_allow_message_deleting'] = realm.allow_message_deleting
state['realm_message_content_edit_limit_seconds'] = realm.message_content_edit_limit_seconds
state['realm_message_content_delete_limit_seconds'] = realm.message_content_delete_limit_seconds
state['realm_icon_url'] = realm_icon_url(realm)
state['realm_icon_source'] = realm.icon_source
state['max_icon_file_size'] = settings.MAX_ICON_FILE_SIZE
state['realm_bot_domain'] = realm.get_bot_domain()
state['realm_uri'] = realm.uri
state['realm_available_video_chat_providers'] = realm.VIDEO_CHAT_PROVIDERS
state['realm_presence_disabled'] = realm.presence_disabled
state['realm_digest_emails_enabled'] = realm.digest_emails_enabled and settings.SEND_DIGEST_EMAILS
state['realm_is_zephyr_mirror_realm'] = realm.is_zephyr_mirror_realm
state['realm_email_auth_enabled'] = email_auth_enabled(realm)
state['realm_password_auth_enabled'] = password_auth_enabled(realm)
state['realm_push_notifications_enabled'] = push_notifications_enabled()
if realm.notifications_stream and not realm.notifications_stream.deactivated:
notifications_stream = realm.notifications_stream
state['realm_notifications_stream_id'] = notifications_stream.id
else:
state['realm_notifications_stream_id'] = -1
signup_notifications_stream = realm.get_signup_notifications_stream()
if signup_notifications_stream:
state['realm_signup_notifications_stream_id'] = signup_notifications_stream.id
else:
state['realm_signup_notifications_stream_id'] = -1
if want('realm_domains'):
state['realm_domains'] = get_realm_domains(realm)
if want('realm_emoji'):
state['realm_emoji'] = realm.get_emoji()
if want('realm_filters'):
state['realm_filters'] = realm_filters_for_realm(realm.id)
if want('realm_user_groups'):
state['realm_user_groups'] = user_groups_in_realm_serialized(realm)
if want('realm_user'):
state['raw_users'] = get_raw_user_data(
realm_id=realm.id,
client_gravatar=client_gravatar,
)
# For the user's own avatar URL, we force
# client_gravatar=False, since that saves some unnecessary
# client-side code for handing medium-size avatars. See #8253
# for details.
state['avatar_source'] = user_profile.avatar_source
state['avatar_url_medium'] = avatar_url(
user_profile,
medium=True,
client_gravatar=False,
)
state['avatar_url'] = avatar_url(
user_profile,
medium=False,
client_gravatar=False,
)
state['can_create_streams'] = user_profile.can_create_streams()
state['can_subscribe_other_users'] = user_profile.can_subscribe_other_users()
state['cross_realm_bots'] = list(get_cross_realm_dicts())
state['is_admin'] = user_profile.is_realm_admin
state['is_guest'] = user_profile.is_guest
state['user_id'] = user_profile.id
state['enter_sends'] = user_profile.enter_sends
state['email'] = user_profile.email
state['delivery_email'] = user_profile.delivery_email
state['full_name'] = user_profile.full_name
if want('realm_bot'):
state['realm_bots'] = get_owned_bot_dicts(user_profile)
# This does not yet have an apply_event counterpart, since currently,
# new entries for EMBEDDED_BOTS can only be added directly in the codebase.
if want('realm_embedded_bots'):
realm_embedded_bots = []
for bot in EMBEDDED_BOTS:
realm_embedded_bots.append({'name': bot.name,
'config': load_bot_config_template(bot.name)})
state['realm_embedded_bots'] = realm_embedded_bots
if want('subscription'):
subscriptions, unsubscribed, never_subscribed = gather_subscriptions_helper(
user_profile, include_subscribers=include_subscribers)
state['subscriptions'] = subscriptions
state['unsubscribed'] = unsubscribed
state['never_subscribed'] = never_subscribed
if want('update_message_flags') and want('message'):
# Keeping unread_msgs updated requires both message flag updates and
# message updates. This is due to the fact that new messages will not
# generate a flag update so we need to use the flags field in the
# message event.
state['raw_unread_msgs'] = get_raw_unread_data(user_profile)
if want('starred_messages'):
state['starred_messages'] = get_starred_message_ids(user_profile)
if want('stream'):
state['streams'] = do_get_streams(user_profile)
state['stream_name_max_length'] = Stream.MAX_NAME_LENGTH
state['stream_description_max_length'] = Stream.MAX_DESCRIPTION_LENGTH
if want('default_streams'):
state['realm_default_streams'] = streams_to_dicts_sorted(
get_default_streams_for_realm(realm.id))
if want('default_stream_groups'):
state['realm_default_stream_groups'] = default_stream_groups_to_dicts_sorted(
get_default_stream_groups(realm))
if want('update_display_settings'):
for prop in UserProfile.property_types:
state[prop] = getattr(user_profile, prop)
state['emojiset_choices'] = user_profile.emojiset_choices()
if want('update_global_notifications'):
for notification in UserProfile.notification_setting_types:
state[notification] = getattr(user_profile, notification)
state['available_notification_sounds'] = get_available_notification_sounds()
if want('zulip_version'):
state['zulip_version'] = ZULIP_VERSION
return state
def remove_message_id_from_unread_mgs(state: Dict[str, Dict[str, Any]],
message_id: int) -> None:
raw_unread = state['raw_unread_msgs']
for key in ['pm_dict', 'stream_dict', 'huddle_dict']:
raw_unread[key].pop(message_id, None)
raw_unread['unmuted_stream_msgs'].discard(message_id)
raw_unread['mentions'].discard(message_id)
def apply_events(state: Dict[str, Any], events: Iterable[Dict[str, Any]],
user_profile: UserProfile, client_gravatar: bool,
include_subscribers: bool = True,
fetch_event_types: Optional[Iterable[str]] = None) -> None:
for event in events:
if fetch_event_types is not None and event['type'] not in fetch_event_types:
# TODO: continuing here is not, most precisely, correct.
# In theory, an event of one type, e.g. `realm_user`,
# could modify state that doesn't come from that
# `fetch_event_types` value, e.g. the `our_person` part of
# that code path. But it should be extremely rare, and
# fixing that will require a nontrivial refactor of
# `apply_event`. For now, be careful in your choice of
# `fetch_event_types`.
continue
apply_event(state, event, user_profile, client_gravatar, include_subscribers)
def apply_event(state: Dict[str, Any],
event: Dict[str, Any],
user_profile: UserProfile,
client_gravatar: bool,
include_subscribers: bool) -> None:
if event['type'] == "message":
state['max_message_id'] = max(state['max_message_id'], event['message']['id'])
if 'raw_unread_msgs' in state:
apply_unread_message_event(
user_profile,
state['raw_unread_msgs'],
event['message'],
event['flags'],
)
elif event['type'] == "hotspots":
state['hotspots'] = event['hotspots']
elif event['type'] == "custom_profile_fields":
state['custom_profile_fields'] = event['fields']
elif event['type'] == "pointer":
state['pointer'] = max(state['pointer'], event['pointer'])
elif event['type'] == "realm_user":
person = event['person']
person_user_id = person['user_id']
if event['op'] == "add":
person = copy.deepcopy(person)
if client_gravatar:
if 'gravatar.com' in person['avatar_url']:
person['avatar_url'] = None
person['is_active'] = True
if not person['is_bot']:
person['profile_data'] = {}
state['raw_users'][person_user_id] = person
elif event['op'] == "remove":
state['raw_users'][person_user_id]['is_active'] = False
elif event['op'] == 'update':
is_me = (person_user_id == user_profile.id)
if is_me:
if ('avatar_url' in person and 'avatar_url' in state):
state['avatar_source'] = person['avatar_source']
state['avatar_url'] = person['avatar_url']
state['avatar_url_medium'] = person['avatar_url_medium']
for field in ['is_admin', 'delivery_email', 'email', 'full_name']:
if field in person and field in state:
state[field] = person[field]
# In the unlikely event that the current user
# just changed to/from being an admin, we need
# to add/remove the data on all bots in the
# realm. This is ugly and probably better
# solved by removing the all-realm-bots data
# given to admin users from this flow.
if ('is_admin' in person and 'realm_bots' in state):
prev_state = state['raw_users'][user_profile.id]
was_admin = prev_state['is_admin']
now_admin = person['is_admin']
if was_admin and not now_admin:
state['realm_bots'] = []
if not was_admin and now_admin:
state['realm_bots'] = get_owned_bot_dicts(user_profile)
if client_gravatar and 'avatar_url' in person:
# Respect the client_gravatar setting in the `users` data.
if 'gravatar.com' in person['avatar_url']:
person['avatar_url'] = None
person['avatar_url_medium'] = None
if person_user_id in state['raw_users']:
p = state['raw_users'][person_user_id]
for field in p:
if field in person:
p[field] = person[field]
if 'custom_profile_field' in person:
custom_field_id = person['custom_profile_field']['id']
custom_field_new_value = person['custom_profile_field']['value']
p['profile_data'][custom_field_id] = custom_field_new_value
elif event['type'] == 'realm_bot':
if event['op'] == 'add':
state['realm_bots'].append(event['bot'])
if event['op'] == 'remove':
email = event['bot']['email']
for bot in state['realm_bots']:
if bot['email'] == email:
bot['is_active'] = False
if event['op'] == 'delete':
state['realm_bots'] = [item for item
in state['realm_bots'] if item['email'] != event['bot']['email']]
if event['op'] == 'update':
for bot in state['realm_bots']:
if bot['email'] == event['bot']['email']:
if 'owner_id' in event['bot']:
bot['owner'] = get_user_profile_by_id(event['bot']['owner_id']).email
else:
bot.update(event['bot'])
elif event['type'] == 'stream':
if event['op'] == 'create':
for stream in event['streams']:
if not stream['invite_only']:
stream_data = copy.deepcopy(stream)
if include_subscribers:
stream_data['subscribers'] = []
stream_data['stream_weekly_traffic'] = None
stream_data['is_old_stream'] = False
stream_data['is_announcement_only'] = False
# Add stream to never_subscribed (if not invite_only)
state['never_subscribed'].append(stream_data)
state['streams'].append(stream)
state['streams'].sort(key=lambda elt: elt["name"])
if event['op'] == 'delete':
deleted_stream_ids = {stream['stream_id'] for stream in event['streams']}
state['streams'] = [s for s in state['streams'] if s['stream_id'] not in deleted_stream_ids]
state['never_subscribed'] = [stream for stream in state['never_subscribed'] if
stream['stream_id'] not in deleted_stream_ids]
if event['op'] == 'update':
# For legacy reasons, we call stream data 'subscriptions' in
# the state var here, for the benefit of the JS code.
for obj in state['subscriptions']:
if obj['name'].lower() == event['name'].lower():
obj[event['property']] = event['value']
# Also update the pure streams data
for stream in state['streams']:
if stream['name'].lower() == event['name'].lower():
prop = event['property']
if prop in stream:
stream[prop] = event['value']
elif event['op'] == "occupy":
state['streams'] += event['streams']
elif event['op'] == "vacate":
stream_ids = [s["stream_id"] for s in event['streams']]
state['streams'] = [s for s in state['streams'] if s["stream_id"] not in stream_ids]
elif event['type'] == 'default_streams':
state['realm_default_streams'] = event['default_streams']
elif event['type'] == 'default_stream_groups':
state['realm_default_stream_groups'] = event['default_stream_groups']
elif event['type'] == 'realm':
if event['op'] == "update":
field = 'realm_' + event['property']
state[field] = event['value']
# Tricky interaction: Whether we can create streams can get changed here.
if (field in ['realm_create_stream_by_admins_only',
'realm_waiting_period_threshold']) and 'can_create_streams' in state:
state['can_create_streams'] = user_profile.can_create_streams()
state['can_subscribe_other_users'] = user_profile.can_subscribe_other_users()
elif event['op'] == "update_dict":
for key, value in event['data'].items():
state['realm_' + key] = value
# It's a bit messy, but this is where we need to
# update the state for whether password authentication
# is enabled on this server.
if key == 'authentication_methods':
state['realm_password_auth_enabled'] = (value['Email'] or value['LDAP'])
state['realm_email_auth_enabled'] = value['Email']
elif event['type'] == "subscription":
if not include_subscribers and event['op'] in ['peer_add', 'peer_remove']:
return
if event['op'] in ["add"]:
if not include_subscribers:
# Avoid letting 'subscribers' entries end up in the list
for i, sub in enumerate(event['subscriptions']):
event['subscriptions'][i] = copy.deepcopy(event['subscriptions'][i])
del event['subscriptions'][i]['subscribers']
def name(sub: Dict[str, Any]) -> str:
return sub['name'].lower()
if event['op'] == "add":
added_names = set(map(name, event["subscriptions"]))
was_added = lambda s: name(s) in added_names
# add the new subscriptions
state['subscriptions'] += event['subscriptions']
# remove them from unsubscribed if they had been there
state['unsubscribed'] = [s for s in state['unsubscribed'] if not was_added(s)]
# remove them from never_subscribed if they had been there
state['never_subscribed'] = [s for s in state['never_subscribed'] if not was_added(s)]
elif event['op'] == "remove":
removed_names = set(map(name, event["subscriptions"]))
was_removed = lambda s: name(s) in removed_names
# Find the subs we are affecting.
removed_subs = list(filter(was_removed, state['subscriptions']))
# Remove our user from the subscribers of the removed subscriptions.
if include_subscribers:
for sub in removed_subs:
sub['subscribers'] = [id for id in sub['subscribers'] if id != user_profile.id]
# We must effectively copy the removed subscriptions from subscriptions to
# unsubscribe, since we only have the name in our data structure.
state['unsubscribed'] += removed_subs
# Now filter out the removed subscriptions from subscriptions.
state['subscriptions'] = [s for s in state['subscriptions'] if not was_removed(s)]
elif event['op'] == 'update':
for sub in state['subscriptions']:
if sub['name'].lower() == event['name'].lower():
sub[event['property']] = event['value']
elif event['op'] == 'peer_add':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
for sub in state['never_subscribed']:
if (sub['name'] in event['subscriptions'] and
user_id not in sub['subscribers']):
sub['subscribers'].append(user_id)
elif event['op'] == 'peer_remove':
user_id = event['user_id']
for sub in state['subscriptions']:
if (sub['name'] in event['subscriptions'] and
user_id in sub['subscribers']):
sub['subscribers'].remove(user_id)
elif event['type'] == "presence":
# TODO: Add user_id to presence update events / state format!
presence_user_profile = get_user(event['email'], user_profile.realm)
state['presences'][event['email']] = UserPresence.get_status_dict_by_user(
presence_user_profile)[event['email']]
elif event['type'] == "update_message":
# We don't return messages in /register, so we don't need to
# do anything for content updates, but we may need to update
# the unread_msgs data if the topic of an unread message changed.
if TOPIC_NAME in event:
stream_dict = state['raw_unread_msgs']['stream_dict']
topic = event[TOPIC_NAME]
for message_id in event['message_ids']:
if message_id in stream_dict:
stream_dict[message_id]['topic'] = topic
elif event['type'] == "delete_message":
max_message = Message.objects.filter(
usermessage__user_profile=user_profile).order_by('-id').first()
if max_message:
state['max_message_id'] = max_message.id
else:
state['max_message_id'] = -1
remove_id = event['message_id']
remove_message_id_from_unread_mgs(state, remove_id)
elif event['type'] == "reaction":
# The client will get the message with the reactions directly
pass
elif event['type'] == "submessage":
# The client will get submessages with their messages
pass
elif event['type'] == 'typing':
# Typing notification events are transient and thus ignored
pass
elif event['type'] == "attachment":
# Attachment events are just for updating the "uploads" UI;
# they are not sent directly.
pass
elif event['type'] == "update_message_flags":
# We don't return messages in `/register`, so most flags we
# can ignore, but we do need to update the unread_msgs data if
# unread state is changed.
if event['flag'] == 'read' and event['operation'] == 'add':
for remove_id in event['messages']:
remove_message_id_from_unread_mgs(state, remove_id)
if event['flag'] == 'starred' and event['operation'] == 'add':
state['starred_messages'] += event['messages']
if event['flag'] == 'starred' and event['operation'] == 'remove':
state['starred_messages'] = [message for message in state['starred_messages']
if not (message in event['messages'])]
elif event['type'] == "realm_domains":
if event['op'] == 'add':
state['realm_domains'].append(event['realm_domain'])
elif event['op'] == 'change':
for realm_domain in state['realm_domains']:
if realm_domain['domain'] == event['realm_domain']['domain']:
realm_domain['allow_subdomains'] = event['realm_domain']['allow_subdomains']
elif event['op'] == 'remove':
state['realm_domains'] = [realm_domain for realm_domain in state['realm_domains']
if realm_domain['domain'] != event['domain']]
elif event['type'] == "realm_emoji":
state['realm_emoji'] = event['realm_emoji']
elif event['type'] == "alert_words":
state['alert_words'] = event['alert_words']
elif event['type'] == "muted_topics":
state['muted_topics'] = event["muted_topics"]
elif event['type'] == "realm_filters":
state['realm_filters'] = event["realm_filters"]
elif event['type'] == "update_display_settings":
assert event['setting_name'] in UserProfile.property_types
state[event['setting_name']] = event['setting']
elif event['type'] == "update_global_notifications":
assert event['notification_name'] in UserProfile.notification_setting_types
state[event['notification_name']] = event['setting']
elif event['type'] == "invites_changed":
pass
elif event['type'] == "user_group":
if event['op'] == 'add':
state['realm_user_groups'].append(event['group'])
state['realm_user_groups'].sort(key=lambda group: group['id'])
elif event['op'] == 'update':
for user_group in state['realm_user_groups']:
if user_group['id'] == event['group_id']:
user_group.update(event['data'])
elif event['op'] == 'add_members':
for user_group in state['realm_user_groups']:
if user_group['id'] == event['group_id']:
user_group['members'].extend(event['user_ids'])
user_group['members'].sort()
elif event['op'] == 'remove_members':
for user_group in state['realm_user_groups']:
if user_group['id'] == event['group_id']:
members = set(user_group['members'])
user_group['members'] = list(members - set(event['user_ids']))
user_group['members'].sort()
elif event['op'] == 'remove':
state['realm_user_groups'] = [ug for ug in state['realm_user_groups']
if ug['id'] != event['group_id']]
else:
raise AssertionError("Unexpected event type %s" % (event['type'],))
def do_events_register(user_profile: UserProfile, user_client: Client,
apply_markdown: bool = True,
client_gravatar: bool = False,
event_types: Optional[Iterable[str]] = None,
queue_lifespan_secs: int = 0,
all_public_streams: bool = False,
include_subscribers: bool = True,
narrow: Iterable[Sequence[str]] = [],
fetch_event_types: Optional[Iterable[str]] = None) -> Dict[str, Any]:
# Technically we don't need to check this here because
# build_narrow_filter will check it, but it's nicer from an error
# handling perspective to do it before contacting Tornado
check_supported_events_narrow_filter(narrow)
# Note that we pass event_types, not fetch_event_types here, since
# that's what controls which future events are sent.
queue_id = request_event_queue(user_profile, user_client, apply_markdown, client_gravatar,
queue_lifespan_secs, event_types, all_public_streams,
narrow=narrow)
if queue_id is None:
raise JsonableError(_("Could not allocate event queue"))
if fetch_event_types is not None:
event_types_set = set(fetch_event_types) # type: Optional[Set[str]]
elif event_types is not None:
event_types_set = set(event_types)
else:
event_types_set = None
# Fill up the UserMessage rows if a soft-deactivated user has returned
maybe_catch_up_soft_deactivated_user(user_profile)
ret = fetch_initial_state_data(user_profile, event_types_set, queue_id,
client_gravatar=client_gravatar,
include_subscribers=include_subscribers)
# Apply events that came in while we were fetching initial data
events = get_user_events(user_profile, queue_id, -1)
apply_events(ret, events, user_profile, include_subscribers=include_subscribers,
client_gravatar=client_gravatar,
fetch_event_types=fetch_event_types)
post_process_state(ret)
if len(events) > 0:
ret['last_event_id'] = events[-1]['id']
else:
ret['last_event_id'] = -1
return ret
def post_process_state(ret: Dict[str, Any]) -> None:
'''
NOTE:
Below is an example of post-processing initial state data AFTER we
apply events. For large payloads like `unread_msgs`, it's helpful
to have an intermediate data structure that is easy to manipulate
with O(1)-type operations as we apply events.
Then, only at the end, we put it in the form that's more appropriate
for client.
'''
if 'raw_unread_msgs' in ret:
ret['unread_msgs'] = aggregate_unread_data(ret['raw_unread_msgs'])
del ret['raw_unread_msgs']
'''
See the note above; the same technique applies below.
'''
if 'raw_users'in ret:
user_dicts = list(ret['raw_users'].values())
ret['realm_users'] = [d for d in user_dicts if d['is_active']]
ret['realm_non_active_users'] = [d for d in user_dicts if not d['is_active']]
'''
Be aware that we do intentional aliasing in the below code.
We can now safely remove the `is_active` field from all the
dicts that got partitioned into the two lists above.
We remove the field because it's already implied, and sending
it to clients makes clients prone to bugs where they "trust"
the field but don't actually update in live updates. It also
wastes bandwidth.
'''
for d in user_dicts:
d.pop('is_active')
del ret['raw_users']
|
demonchild2112/travis-test
|
refs/heads/master
|
grr/server/grr_response_server/prometheus_stats_collector_test.py
|
2
|
#!/usr/bin/env python
"""Tests for PrometheusStatsCollector."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from absl import app
from grr_response_core.stats import stats_test_utils
from grr_response_server import prometheus_stats_collector
from grr.test_lib import test_lib
class PrometheusStatsCollectorTest(stats_test_utils.StatsCollectorTest):
def _CreateStatsCollector(self):
return prometheus_stats_collector.PrometheusStatsCollector()
def main(argv):
test_lib.main(argv)
if __name__ == "__main__":
app.run(main)
|
opennode/nodeconductor-assembly-waldur
|
refs/heads/develop
|
src/waldur_openstack/openstack/tests/test_floating_ips.py
|
1
|
from rest_framework import status, test
from .. import models
from . import factories, fixtures
class FloatingIPListRetrieveTestCase(test.APITransactionTestCase):
def setUp(self):
self.fixture = fixtures.OpenStackFixture()
self.active_ip = factories.FloatingIPFactory(
runtime_state='ACTIVE', service_project_link=self.fixture.openstack_spl
)
self.down_ip = factories.FloatingIPFactory(
runtime_state='DOWN', service_project_link=self.fixture.openstack_spl
)
self.other_ip = factories.FloatingIPFactory(runtime_state='UNDEFINED')
def test_floating_ip_list_can_be_filtered_by_project(self):
data = {
'project': self.fixture.project.uuid.hex,
}
# when
self.client.force_authenticate(self.fixture.staff)
response = self.client.get(factories.FloatingIPFactory.get_list_url(), data)
# then
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_ip_uuids = [ip['uuid'] for ip in response.data]
expected_ip_uuids = [ip.uuid.hex for ip in (self.active_ip, self.down_ip)]
self.assertEqual(sorted(response_ip_uuids), sorted(expected_ip_uuids))
def test_floating_ip_list_can_be_filtered_by_service(self):
data = {
'service_uuid': self.fixture.openstack_service.uuid.hex,
}
# when
self.client.force_authenticate(self.fixture.staff)
response = self.client.get(factories.FloatingIPFactory.get_list_url(), data)
# then
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_ip_uuids = [ip['uuid'] for ip in response.data]
expected_ip_uuids = [ip.uuid.hex for ip in (self.active_ip, self.down_ip)]
self.assertEqual(sorted(response_ip_uuids), sorted(expected_ip_uuids))
def test_floating_ip_list_can_be_filtered_by_status(self):
data = {
'runtime_state': 'ACTIVE',
}
# when
self.client.force_authenticate(self.fixture.staff)
response = self.client.get(factories.FloatingIPFactory.get_list_url(), data)
# then
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_ip_uuids = [ip['uuid'] for ip in response.data]
expected_ip_uuids = [self.active_ip.uuid.hex]
self.assertEqual(response_ip_uuids, expected_ip_uuids)
def test_admin_receive_only_ips_from_his_project(self):
# when
self.client.force_authenticate(self.fixture.admin)
response = self.client.get(factories.FloatingIPFactory.get_list_url())
# then
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_ip_uuids = [ip['uuid'] for ip in response.data]
expected_ip_uuids = [ip.uuid.hex for ip in (self.active_ip, self.down_ip)]
self.assertEqual(sorted(response_ip_uuids), sorted(expected_ip_uuids))
def test_owner_receive_only_ips_from_his_customer(self):
# when
self.client.force_authenticate(self.fixture.owner)
response = self.client.get(factories.FloatingIPFactory.get_list_url())
# then
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_ip_uuids = [ip['uuid'] for ip in response.data]
expected_ip_uuids = [ip.uuid.hex for ip in (self.active_ip, self.down_ip)]
self.assertEqual(sorted(response_ip_uuids), sorted(expected_ip_uuids))
def test_regular_user_does_not_receive_any_ips(self):
# when
self.client.force_authenticate(self.fixture.user)
response = self.client.get(factories.FloatingIPFactory.get_list_url())
# then
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_ip_uuids = [ip['uuid'] for ip in response.data]
expected_ip_uuids = []
self.assertEqual(response_ip_uuids, expected_ip_uuids)
def test_admin_can_retrieve_floating_ip_from_his_project(self):
# when
self.client.force_authenticate(self.fixture.admin)
response = self.client.get(factories.FloatingIPFactory.get_url(self.active_ip))
# then
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data['uuid'], self.active_ip.uuid.hex)
def test_owner_can_not_retrieve_floating_ip_not_from_his_customer(self):
# when
self.client.force_authenticate(self.fixture.owner)
response = self.client.get(factories.FloatingIPFactory.get_url(self.other_ip))
# then
self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)
def test_floating_ip_metadata(self):
self.active_ip.state = models.FloatingIP.States.OK
self.active_ip.save()
url = factories.FloatingIPFactory.get_url(self.active_ip)
self.client.force_authenticate(self.fixture.staff)
response = self.client.options(url)
actions = dict(response.data['actions'])
self.assertEqual(
actions,
{
"destroy": {
"title": "Destroy",
"url": url,
"enabled": True,
"reason": None,
"destructive": True,
"type": "button",
"method": "DELETE",
},
"pull": {
"title": "Pull",
"url": url + "pull/",
"enabled": True,
"reason": None,
"destructive": False,
"type": "button",
"method": "POST",
},
},
)
|
codefisher/web_games
|
refs/heads/master
|
jeopardy/templatetags/jeopardy.py
|
1
|
from django import template
register = template.Library()
@register.inclusion_tag('jeopardy/cell.html', takes_context=True)
def jeopardy_game_cell(context):
game = context['game']
topic = context['topic']
point = context['point']
questions_done = context['questions_done']
return {
"done": [topic.pk, point.pk] in questions_done,
"game": game,
"topic": topic,
"point": point
}
|
MichaelNedzelsky/intellij-community
|
refs/heads/master
|
python/testData/inspections/unusedImport/subpackageInInitPy/package1/__init__.py
|
83
|
from .module_b import ClassB
__all__ = module_b.__all__[:]
|
endlessm/chromium-browser
|
refs/heads/master
|
components/flags_ui/PRESUBMIT.py
|
21
|
# Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
def _CommonChecks(input_api, output_api):
results = []
try:
import sys
old_sys_path = sys.path[:]
cwd = input_api.PresubmitLocalPath()
sys.path += [input_api.os_path.join(cwd, '..', '..', 'tools')]
import web_dev_style.presubmit_support
results += web_dev_style.presubmit_support.CheckStyle(input_api, output_api)
finally:
sys.path = old_sys_path
return results
def CheckChangeOnUpload(input_api, output_api):
return _CommonChecks(input_api, output_api)
def CheckChangeOnCommit(input_api, output_api):
return _CommonChecks(input_api, output_api)
|
JIoJIaJIu/servo
|
refs/heads/master
|
tests/wpt/web-platform-tests/XMLHttpRequest/resources/chunked.py
|
219
|
def main(request, response):
chunks = ["First chunk\r\n",
"Second chunk\r\n",
"Yet another (third) chunk\r\n",
"Yet another (fourth) chunk\r\n",
]
response.headers.set("Transfer-Encoding", "chunked");
response.headers.set("Trailer", "X-Test-Me");
response.headers.set("Content-Type", "text/plain");
response.write_status_headers()
for value in chunks:
response.writer.write("%x\r\n" % len(value))
response.writer.write(value)
response.writer.write("\r\n")
response.writer.write("0\r\n")
response.writer.write("X-Test-Me: Trailer header value\r\n\r\n")
|
mingderwang/angr
|
refs/heads/master
|
tests/test_explorer.py
|
10
|
import nose
import angr
import os
location = str(os.path.join(os.path.dirname(os.path.realpath(__file__)), '../../binaries/tests'))
def test_xpl():
p = angr.Project(os.path.join(location, "x86_64/all"))
pltaddr = p.loader.main_bin.get_call_stub_addr("printf")
nose.tools.assert_equal(pltaddr, 0x400560)
a = p.surveyors.Explorer(find=(0x400560,), num_find=4)
a.run()
nose.tools.assert_equal(len(a.found), 4)
if __name__ == '__main__':
test_xpl()
|
allotria/intellij-community
|
refs/heads/master
|
python/testData/dotNet/expected.skeleton.Deep.py
|
79
|
# encoding: utf-8
# module SingleNameSpace.Some.Deep calls itself Deep
# from SingleNameSpace, Version=1.0.0.0, Culture=neutral, PublicKeyToken=null
# by generatorXXX
# no doc
# no imports
# no functions
# classes
class WeHaveClass(object):
""" WeHaveClass() """
MyClass = None
|
t-wissmann/qutebrowser
|
refs/heads/master
|
tests/unit/mainwindow/statusbar/test_percentage.py
|
2
|
# vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2020 Florian Bruhin (The Compiler) <mail@qutebrowser.org>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Test Percentage widget."""
import pytest
from qutebrowser.mainwindow.statusbar.percentage import Percentage
@pytest.fixture
def percentage(qtbot):
"""Fixture providing a Percentage widget."""
widget = Percentage()
# Force immediate update of percentage widget
widget._set_text.set_delay(-1)
qtbot.add_widget(widget)
return widget
@pytest.mark.parametrize('y, raw, expected', [
(0, False, '[top]'),
(100, False, '[bot]'),
(75, False, '[75%]'),
(25, False, '[25%]'),
(5, False, '[05%]'),
(None, False, '[???]'),
(0, True, '[top]'),
(100, True, '[bot]'),
(75, True, '[75]'),
(25, True, '[25]'),
(5, True, '[05]'),
(None, True, '[???]'),
])
def test_percentage_text(percentage, y, raw, expected):
"""Test text displayed by the widget based on the y position of a page.
Args:
y: y position of the page as an int in the range [0, 100].
parametrized.
expected: expected text given y position. parametrized.
"""
if raw:
percentage.set_raw()
percentage.set_perc(x=None, y=y)
assert percentage.text() == expected
def test_tab_change(percentage, fake_web_tab):
"""Make sure the percentage gets changed correctly when switching tabs."""
percentage.set_perc(x=None, y=10)
tab = fake_web_tab(scroll_pos_perc=(0, 20))
percentage.on_tab_changed(tab)
assert percentage.text() == '[20%]'
|
jcasner/nupic
|
refs/heads/master
|
tests/unit/nupic/encoders/adaptivescalar_test.py
|
28
|
#!/usr/bin/env python
# ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
import tempfile
import unittest
import numpy
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.encoders.adaptivescalar import AdaptiveScalarEncoder
from nupic.encoders.base import defaultDtype
try:
import capnp
except ImportError:
capnp = None
if capnp:
from nupic.encoders.adaptivescalar_capnp import AdaptiveScalarEncoderProto
class AdaptiveScalarTest(unittest.TestCase):
"""Tests for AdaptiveScalarEncoder"""
def setUp(self):
# forced: it's strongly recommended to use w>=21, in the example we force
# skip the check for readibility
self._l = AdaptiveScalarEncoder(name="scalar", n=14, w=5, minval=1,
maxval=10, periodic=False, forced=True)
def testMissingValues(self):
"""missing values"""
# forced: it's strongly recommended to use w>=21, in the example we force
# skip the check for readib.
mv = AdaptiveScalarEncoder(name="mv", n=14, w=3, minval=1, maxval=8,
periodic=False, forced=True)
empty = mv.encode(SENTINEL_VALUE_FOR_MISSING_DATA)
self.assertEqual(empty.sum(), 0)
def testNonPeriodicEncoderMinMaxSpec(self):
"""Non-periodic encoder, min and max specified"""
self.assertTrue(numpy.array_equal(
self._l.encode(1),
numpy.array([1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(
self._l.encode(2),
numpy.array([0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0],
dtype=defaultDtype)))
self.assertTrue(numpy.array_equal(
self._l.encode(10),
numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1],
dtype=defaultDtype)))
def testTopDownDecode(self):
"""Test the input description generation and topDown decoding"""
l = self._l
v = l.minval
while v < l.maxval:
output = l.encode(v)
decoded = l.decode(output)
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
(rangeMin, rangeMax) = ranges[0]
self.assertEqual(rangeMin, rangeMax)
self.assertLess(abs(rangeMin - v), l.resolution)
topDown = l.topDownCompute(output)[0]
self.assertLessEqual(abs(topDown.value - v), l.resolution)
# Test bucket support
bucketIndices = l.getBucketIndices(v)
topDown = l.getBucketInfo(bucketIndices)[0]
self.assertLessEqual(abs(topDown.value - v), l.resolution / 2)
self.assertEqual(topDown.value, l.getBucketValues()[bucketIndices[0]])
self.assertEqual(topDown.scalar, topDown.value)
self.assertTrue(numpy.array_equal(topDown.encoding, output))
# Next value
v += l.resolution / 4
def testFillHoles(self):
"""Make sure we can fill in holes"""
l=self._l
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 1, 1]))
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertSequenceEqual(ranges[0], [10, 10])
decoded = l.decode(numpy.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0, 1]))
(fieldsDict, _) = decoded
self.assertEqual(len(fieldsDict), 1)
(ranges, _) = fieldsDict.values()[0]
self.assertEqual(len(ranges), 1)
self.assertSequenceEqual(ranges[0], [10, 10])
def testNonPeriodicEncoderMinMaxNotSpec(self):
"""Non-periodic encoder, min and max not specified"""
l = AdaptiveScalarEncoder(name="scalar", n=14, w=5, minval=None,
maxval=None, periodic=False, forced=True)
def _verify(v, encoded, expV=None):
if expV is None:
expV = v
self.assertTrue(numpy.array_equal(
l.encode(v),
numpy.array(encoded, dtype=defaultDtype)))
self.assertLessEqual(
abs(l.getBucketInfo(l.getBucketIndices(v))[0].value - expV),
l.resolution/2)
def _verifyNot(v, encoded):
self.assertFalse(numpy.array_equal(
l.encode(v), numpy.array(encoded, dtype=defaultDtype)))
_verify(1, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(2, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(10, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(3, [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0])
_verify(-9, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(-8, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(-7, [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(-6, [0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(-5, [0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0])
_verify(0, [0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
_verify(8, [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0])
_verify(8, [0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 0])
_verify(10, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(11, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(12, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(13, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(14, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(15, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
#"""Test switching learning off"""
l = AdaptiveScalarEncoder(name="scalar", n=14, w=5, minval=1, maxval=10,
periodic=False, forced=True)
_verify(1, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_verify(10, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(20, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(10, [0, 0, 0, 0, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0])
l.setLearning(False)
_verify(30, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1], expV=20)
_verify(20, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(-10, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], expV=1)
_verify(-1, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0], expV=1)
l.setLearning(True)
_verify(30, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verifyNot(20, [0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1])
_verify(-10, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
_verifyNot(-1, [1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0])
def testSetFieldStats(self):
"""Test setting the min and max using setFieldStats"""
def _dumpParams(enc):
return (enc.n, enc.w, enc.minval, enc.maxval, enc.resolution,
enc._learningEnabled, enc.recordNum,
enc.radius, enc.rangeInternal, enc.padding, enc.nInternal)
sfs = AdaptiveScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=10,
periodic=False, forced=True)
reg = AdaptiveScalarEncoder(name='scalar', n=14, w=5, minval=1, maxval=100,
periodic=False, forced=True)
self.assertNotEqual(_dumpParams(sfs), _dumpParams(reg),
("Params should not be equal, since the two encoders "
"were instantiated with different values."))
# set the min and the max using sFS to 1,100 respectively.
sfs.setFieldStats("this", {"this":{"min":1, "max":100}})
#Now the parameters for both should be the same
self.assertEqual(_dumpParams(sfs), _dumpParams(reg),
("Params should now be equal, but they are not. sFS "
"should be equivalent to initialization."))
@unittest.skipUnless(
capnp, "pycapnp is not installed, skipping serialization test.")
def testReadWrite(self):
originalValue = self._l.encode(1)
proto1 = AdaptiveScalarEncoderProto.new_message()
self._l.write(proto1)
# Write the proto to a temp file and read it back into a new proto
with tempfile.TemporaryFile() as f:
proto1.write(f)
f.seek(0)
proto2 = AdaptiveScalarEncoderProto.read(f)
encoder = AdaptiveScalarEncoder.read(proto2)
self.assertIsInstance(encoder, AdaptiveScalarEncoder)
self.assertEqual(encoder.recordNum, self._l.recordNum)
self.assertDictEqual(encoder.slidingWindow.__dict__,
self._l.slidingWindow.__dict__)
self.assertEqual(encoder.w, self._l.w)
self.assertEqual(encoder.minval, self._l.minval)
self.assertEqual(encoder.maxval, self._l.maxval)
self.assertEqual(encoder.periodic, self._l.periodic)
self.assertEqual(encoder.n, self._l.n)
self.assertEqual(encoder.radius, self._l.radius)
self.assertEqual(encoder.resolution, self._l.resolution)
self.assertEqual(encoder.name, self._l.name)
self.assertEqual(encoder.verbosity, self._l.verbosity)
self.assertEqual(encoder.clipInput, self._l.clipInput)
self.assertTrue(numpy.array_equal(encoder.encode(1), originalValue))
self.assertEqual(self._l.decode(encoder.encode(1)),
encoder.decode(self._l.encode(1)))
# Feed in a new value and ensure the encodings match
result1 = self._l.encode(7)
result2 = encoder.encode(7)
self.assertTrue(numpy.array_equal(result1, result2))
if __name__ == '__main__':
unittest.main()
|
kenshay/ImageScript
|
refs/heads/master
|
ProgramData/SystemFiles/Python/Lib/site-packages/django/contrib/contenttypes/fields.py
|
36
|
from __future__ import unicode_literals
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, models, router, transaction
from django.db.models import DO_NOTHING
from django.db.models.base import ModelBase, make_foreign_order_accessors
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ReverseManyToOneDescriptor,
lazy_related_operation,
)
from django.db.models.query_utils import PathInfo
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.functional import cached_property
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ForwardManyToOneDescriptor) by adding itself as a model attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, private=True)
setattr(cls, name, self)
def get_filter_kwargs_for_object(self, obj):
"""See corresponding method on Field"""
return {
self.fk_field: getattr(obj, self.fk_field),
self.ct_field: getattr(obj, self.ct_field),
}
def get_forward_related_filter(self, obj):
"""See corresponding method on RelatedField"""
return {
self.fk_field: obj.pk,
self.ct_field: ContentType.objects.get_for_model(obj).pk,
}
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.name)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, cls=None):
if instance is None:
return self
# Don't use getattr(instance, self.ct_field) here because that might
# reload the same ContentType over and over (#5570). Instead, get the
# content type ID here, and later when the actual instance is needed,
# use ContentType.objects.get_for_id(), which has a global cache.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
pk_val = getattr(instance, self.fk_field)
try:
rel_obj = getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
else:
if rel_obj and (ct_id != self.get_content_type(obj=rel_obj, using=instance._state.db).id or
rel_obj._meta.pk.to_python(pk_val) != rel_obj._get_pk_val()):
rel_obj = None
if rel_obj is not None:
return rel_obj
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=pk_val)
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(
field, to,
related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['on_delete'] = models.CASCADE
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _is_matching_generic_foreign_key(self, field):
"""
Return True if field is a GenericForeignKey whose content type and
object id fields correspond to the equivalent attributes on this
GenericRelation.
"""
return (
isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
)
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.private_fields
if any(self._is_matching_generic_foreign_key(field) for field in fields):
return []
else:
return [
checks.Error(
"The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey." % (
target._meta.app_label, target._meta.object_name
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def _get_path_info_with_parent(self):
"""
Return the path that joins the current model through any parent models.
The idea is that if you have a GFK defined on a parent model then we
need to join the parent model first, then the child model.
"""
# With an inheritance chain ChildTag -> Tag and Tag defines the
# GenericForeignKey, and a TaggedItem model has a GenericRelation to
# ChildTag, then we need to generate a join from TaggedItem to Tag
# (as Tag.object_id == TaggedItem.pk), and another join from Tag to
# ChildTag (as that is where the relation is to). Do this by first
# generating a join to the parent model, then generating joins to the
# child models.
path = []
opts = self.remote_field.model._meta
parent_opts = opts.get_field(self.object_id_field_name).model._meta
target = parent_opts.pk
path.append(PathInfo(self.model._meta, parent_opts, (target,), self.remote_field, True, False))
# Collect joins needed for the parent -> child chain. This is easiest
# to do if we collect joins for the child -> parent chain and then
# reverse the direction (call to reverse() and use of
# field.remote_field.get_path_info()).
parent_field_chain = []
while parent_opts != opts:
field = opts.get_ancestor_link(parent_opts.model)
parent_field_chain.append(field)
opts = field.remote_field.model._meta
parent_field_chain.reverse()
for field in parent_field_chain:
path.extend(field.remote_field.get_path_info())
return path
def get_path_info(self):
opts = self.remote_field.model._meta
object_id_field = opts.get_field(self.object_id_field_name)
if object_id_field.model != opts.model:
return self._get_path_info_with_parent()
else:
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.remote_field, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return force_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['private_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericManyToOneDescriptor(self.remote_field))
# Add get_RELATED_order() and set_RELATED_order() to the model this
# field belongs to, if the model on the other end of this relation
# is ordered with respect to its corresponding GenericForeignKey.
if not cls._meta.abstract:
def make_generic_foreign_order_accessors(related_model, model):
if self._is_matching_generic_foreign_key(model._meta.order_with_respect_to):
make_foreign_order_accessors(model, related_model)
lazy_related_operation(make_generic_foreign_order_accessors, self.model, self.remote_field.model)
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericManyToOneDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super(GenericRelatedObjectManager, self).__init__()
self.instance = instance
self.model = rel.model
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance._get_pk_val()
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def _apply_rel_filters(self, queryset):
"""
Filter the queryset for the instance this manager is bound to.
"""
db = self._db or router.db_for_read(self.model, instance=self.instance)
return queryset.using(db).filter(**self.core_filters)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
queryset = super(GenericRelatedObjectManager, self).get_queryset()
return self._apply_rel_filters(queryset)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs, **kwargs):
bulk = kwargs.pop('bulk', True)
db = router.db_for_write(self.model, instance=self.instance)
def check_and_update_obj(obj):
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected, got %r" % (
self.model._meta.object_name, obj
))
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
if bulk:
pks = []
for obj in objs:
if obj._state.adding or obj._state.db != db:
raise ValueError(
"%r instance isn't saved. Use bulk=False or save "
"the object first." % obj
)
check_and_update_obj(obj)
pks.append(obj.pk)
self.model._base_manager.using(db).filter(pk__in=pks).update(**{
self.content_type_field_name: self.content_type,
self.object_id_field_name: self.pk_val,
})
else:
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
check_and_update_obj(obj)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
bulk = kwargs.pop('bulk', True)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs, bulk=bulk)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs, bulk=bulk)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
|
cnsoft/kbengine-cocos2dx
|
refs/heads/cocos2dx-cnsoft
|
kbe/res/scripts/common/Lib/turtledemo/lindenmayer.py
|
164
|
#!/usr/bin/env python3
""" turtle-example-suite:
xtx_lindenmayer_indian.py
Each morning women in Tamil Nadu, in southern
India, place designs, created by using rice
flour and known as kolam on the thresholds of
their homes.
These can be described by Lindenmayer systems,
which can easily be implemented with turtle
graphics and Python.
Two examples are shown here:
(1) the snake kolam
(2) anklets of Krishna
Taken from Marcia Ascher: Mathematics
Elsewhere, An Exploration of Ideas Across
Cultures
"""
################################
# Mini Lindenmayer tool
###############################
from turtle import *
def replace( seq, replacementRules, n ):
for i in range(n):
newseq = ""
for element in seq:
newseq = newseq + replacementRules.get(element,element)
seq = newseq
return seq
def draw( commands, rules ):
for b in commands:
try:
rules[b]()
except TypeError:
try:
draw(rules[b], rules)
except:
pass
def main():
################################
# Example 1: Snake kolam
################################
def r():
right(45)
def l():
left(45)
def f():
forward(7.5)
snake_rules = {"-":r, "+":l, "f":f, "b":"f+f+f--f--f+f+f"}
snake_replacementRules = {"b": "b+f+b--f--b+f+b"}
snake_start = "b--f--b--f"
drawing = replace(snake_start, snake_replacementRules, 3)
reset()
speed(3)
tracer(1,0)
ht()
up()
backward(195)
down()
draw(drawing, snake_rules)
from time import sleep
sleep(3)
################################
# Example 2: Anklets of Krishna
################################
def A():
color("red")
circle(10,90)
def B():
from math import sqrt
color("black")
l = 5/sqrt(2)
forward(l)
circle(l, 270)
forward(l)
def F():
color("green")
forward(10)
krishna_rules = {"a":A, "b":B, "f":F}
krishna_replacementRules = {"a" : "afbfa", "b" : "afbfbfbfa" }
krishna_start = "fbfbfbfb"
reset()
speed(0)
tracer(3,0)
ht()
left(45)
drawing = replace(krishna_start, krishna_replacementRules, 3)
draw(drawing, krishna_rules)
tracer(1)
return "Done!"
if __name__=='__main__':
msg = main()
print(msg)
mainloop()
|
guewen/OpenUpgrade
|
refs/heads/master
|
addons/hr_timesheet_sheet/wizard/hr_timesheet_current.py
|
381
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_timesheet_current_open(osv.osv_memory):
_name = 'hr.timesheet.current.open'
_description = 'hr.timesheet.current.open'
def open_timesheet(self, cr, uid, ids, context=None):
ts = self.pool.get('hr_timesheet_sheet.sheet')
if context is None:
context = {}
view_type = 'form,tree'
user_ids = self.pool.get('hr.employee').search(cr, uid, [('user_id','=',uid)], context=context)
if not len(user_ids):
raise osv.except_osv(_('Error!'), _('Please create an employee and associate it with this user.'))
ids = ts.search(cr, uid, [('user_id','=',uid),('state','in',('draft','new')),('date_from','<=',time.strftime('%Y-%m-%d')), ('date_to','>=',time.strftime('%Y-%m-%d'))], context=context)
if len(ids) > 1:
view_type = 'tree,form'
domain = "[('id','in',["+','.join(map(str, ids))+"]),('user_id', '=', uid)]"
elif len(ids)==1:
domain = "[('user_id', '=', uid)]"
else:
domain = "[('user_id', '=', uid)]"
value = {
'domain': domain,
'name': _('Open Timesheet'),
'view_type': 'form',
'view_mode': view_type,
'res_model': 'hr_timesheet_sheet.sheet',
'view_id': False,
'type': 'ir.actions.act_window'
}
if len(ids) == 1:
value['res_id'] = ids[0]
return value
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
blaggacao/odoo
|
refs/heads/master
|
addons/account_bank_statement_import/account_bank_statement_import.py
|
23
|
# -*- coding: utf-8 -*-
from openerp.osv import fields, osv
from openerp.tools.translate import _
import logging
_logger = logging.getLogger(__name__)
_IMPORT_FILE_TYPE = [('none', _('No Import Format Available'))]
def add_file_type(selection_value):
global _IMPORT_FILE_TYPE
if _IMPORT_FILE_TYPE[0][0] == 'none':
_IMPORT_FILE_TYPE = [selection_value]
else:
_IMPORT_FILE_TYPE.append(selection_value)
class account_bank_statement_import(osv.TransientModel):
_name = 'account.bank.statement.import'
_description = 'Import Bank Statement'
def _get_import_file_type(self, cr, uid, context=None):
return _IMPORT_FILE_TYPE
_columns = {
'data_file': fields.binary('Bank Statement File', required=True, help='Get you bank statements in electronic format from your bank and select them here.'),
'file_type': fields.selection(_get_import_file_type, 'File Type', required=True),
'journal_id': fields.many2one('account.journal', 'Journal', required=True, help="The journal for which the bank statements will be created"),
}
def _get_first_file_type(self, cr, uid, context=None):
return self._get_import_file_type(cr, uid, context=context)[0][0]
def _get_default_journal(self, cr, uid, context=None):
company_id = self.pool.get('res.company')._company_default_get(cr, uid, 'account.bank.statement', context=context)
journal_ids = self.pool.get('account.journal').search(cr, uid, [('type', '=', 'bank'), ('company_id', '=', company_id)], context=context)
return journal_ids and journal_ids[0] or False
_defaults = {
'file_type': _get_first_file_type,
'journal_id': _get_default_journal,
}
def _detect_partner(self, cr, uid, identifying_string, identifying_field='acc_number', context=None):
"""Try to find a bank account and its related partner for the given 'identifying_string', looking on the field 'identifying_field'.
:param identifying_string: varchar
:param identifying_field: varchar corresponding to the name of a field of res.partner.bank
:returns: tuple(ID of the bank account found or False, ID of the partner for the bank account found or False)
"""
partner_id = False
bank_account_id = False
if identifying_string:
ids = self.pool.get('res.partner.bank').search(cr, uid, [(identifying_field, '=', identifying_string)], context=context)
if ids:
bank_account_id = ids[0]
partner_id = self.pool.get('res.partner.bank').browse(cr, uid, bank_account_id, context=context).partner_id.id
else:
#create the bank account, not linked to any partner. The reconciliation will link the partner manually
#chosen at the bank statement final confirmation time.
try:
type_model, type_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'base', 'bank_normal')
type_id = self.pool.get('res.partner.bank.type').browse(cr, uid, type_id, context=context)
bank_code = type_id.code
except ValueError:
bank_code = 'bank'
acc_number = identifying_field == 'acc_number' and identifying_string or _('Undefined')
bank_account_vals = {
'acc_number': acc_number,
'state': bank_code,
}
bank_account_vals[identifying_field] = identifying_string
bank_account_id = self.pool.get('res.partner.bank').create(cr, uid, bank_account_vals, context=context)
return bank_account_id, partner_id
def import_bank_statement(self, cr, uid, bank_statement_vals=False, context=None):
""" Get a list of values to pass to the create() of account.bank.statement object, and returns a list of ID created using those values"""
statement_ids = []
for vals in bank_statement_vals:
statement_ids.append(self.pool.get('account.bank.statement').create(cr, uid, vals, context=context))
return statement_ids
def process_none(self, cr, uid, data_file, journal_id=False, context=None):
raise osv.except_osv(_('Error'), _('No available format for importing bank statement. You can install one of the file format available through the module installation.'))
def parse_file(self, cr, uid, ids, context=None):
""" Process the file chosen in the wizard and returns a list view of the imported bank statements"""
data = self.browse(cr, uid, ids[0], context=context)
vals = getattr(self, "process_%s" % data.file_type)(cr, uid, data.data_file, data.journal_id.id, context=context)
statement_ids = self.import_bank_statement(cr, uid, vals, context=context)
model, action_id = self.pool.get('ir.model.data').get_object_reference(cr, uid, 'account', 'action_bank_statement_tree')
action = self.pool[model].read(cr, uid, action_id, context=context)
action['domain'] = "[('id', 'in', [" + ', '.join(map(str, statement_ids)) + "])]"
return action
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
amenonsen/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/cnos/cnos_banner.py
|
52
|
#!/usr/bin/python
#
# Copyright (C) 2017 Lenovo, Inc.
# (c) 2017, Ansible by Red Hat, inc
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to send banner commands to Lenovo Switches
# Two types of banners are supported login and motd
# Lenovo Networking
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: cnos_banner
version_added: "2.8"
author: "Anil Kumar Muraleedharan (@amuraleedhar)"
short_description: Manage multiline banners on Lenovo CNOS devices
description:
- This will configure both login and motd banners on remote devices
running Lenovo CNOS. It allows playbooks to add or remote
banner text from the active running configuration.
notes:
- Tested against CNOS 10.8.1
options:
banner:
description:
- Specifies which banner should be configured on the remote device.
In Ansible 2.8 and earlier only I(login) and I(motd) were supported.
required: true
choices: ['login', 'motd']
text:
description:
- The banner text that should be
present in the remote device running configuration. This argument
accepts a multiline string, with no empty lines. Requires
I(state=present).
state:
description:
- Specifies whether or not the configuration is
present in the current devices active running configuration.
default: present
choices: ['present', 'absent']
provider:
description:
- B(Deprecated)
- "Starting with Ansible 2.5 we recommend using
C(connection: network_cli)."
- For more information please see the
L(CNOS Platform Options guide, ../network/user_guide/platform_cnos.html).
- HORIZONTALLINE
- A dict object containing connection details.
version_added: "2.8"
suboptions:
host:
description:
- Specifies the DNS host name or address for connecting to the remote
device over the specified transport. The value of host is used as
the destination address for the transport.
required: true
port:
description:
- Specifies the port to use when building the connection to the
remote device.
default: 22
username:
description:
- Configures the username to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_USERNAME) will be used
instead.
password:
description:
- Specifies the password to use to authenticate the connection to
the remote device. This value is used to authenticate
the SSH session. If the value is not specified in the task, the
value of environment variable C(ANSIBLE_NET_PASSWORD) will be used
instead.
timeout:
description:
- Specifies the timeout in seconds for communicating with the network
device for either connecting or sending commands. If the timeout
is exceeded before the operation is completed, the module will
error.
default: 10
ssh_keyfile:
description:
- Specifies the SSH key to use to authenticate the connection to
the remote device. This value is the path to the
key used to authenticate the SSH session. If the value is not
specified in the task, the value of environment variable
C(ANSIBLE_NET_SSH_KEYFILE)will be used instead.
authorize:
description:
- Instructs the module to enter privileged mode on the remote device
before sending any commands. If not specified, the device will
attempt to execute all commands in non-privileged mode. If the
value is not specified in the task, the value of environment
variable C(ANSIBLE_NET_AUTHORIZE) will be used instead.
type: bool
default: 'no'
auth_pass:
description:
- Specifies the password to use if required to enter privileged mode
on the remote device. If I(authorize) is false, then this argument
does nothing. If the value is not specified in the task, the value
of environment variable C(ANSIBLE_NET_AUTH_PASS) will be used
instead.
"""
EXAMPLES = """
- name: configure the login banner
cnos_banner:
banner: login
text: |
this is my login banner
that contains a multiline
string
state: present
- name: remove the motd banner
cnos_banner:
banner: motd
state: absent
- name: Configure banner from file
cnos_banner:
banner: motd
text: "{{ lookup('file', './config_partial/raw_banner.cfg') }}"
state: present
"""
RETURN = """
commands:
description: The list of configuration mode commands to send to the device
returned: always
type: list
sample:
- banner login
- this is my login banner
- that contains a multiline
- string
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.cnos.cnos import load_config, run_commands
from ansible.module_utils.network.cnos.cnos import check_args
from ansible.module_utils.network.cnos.cnos import cnos_argument_spec
from ansible.module_utils._text import to_text
import re
def map_obj_to_commands(updates, module):
commands = list()
want, have = updates
state = module.params['state']
if state == 'absent' and 'text' in have.keys() and have['text']:
commands.append('no banner %s' % module.params['banner'])
elif state == 'present':
if want['text'] and (want['text'] != have.get('text')):
banner_cmd = 'banner %s ' % module.params['banner']
for bline in want['text'].strip().splitlines():
final_cmd = banner_cmd + bline.strip()
commands.append(final_cmd)
return commands
def map_config_to_obj(module):
rc, out, err = exec_command(module,
'show banner %s' % module.params['banner'])
if rc == 0:
output = out
else:
rc, out, err = exec_command(module,
'show running-config | include banner %s'
% module.params['banner'])
if out:
output = re.search(r'\^C(.*)\^C', out, re.S).group(1).strip()
else:
output = None
obj = {'banner': module.params['banner'], 'state': 'absent'}
if output:
obj['text'] = output
obj['state'] = 'present'
return obj
def map_params_to_obj(module):
text = module.params['text']
if text:
text = to_text(text).strip()
return {
'banner': module.params['banner'],
'text': text,
'state': module.params['state']
}
def main():
""" main entry point for module execution
"""
argument_spec = dict(
banner=dict(required=True, choices=['login', 'motd']),
text=dict(),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(cnos_argument_spec)
required_if = [('state', 'present', ('text',))]
module = AnsibleModule(argument_spec=argument_spec,
required_if=required_if,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
result = {'changed': False}
if warnings:
result['warnings'] = warnings
want = map_params_to_obj(module)
have = map_config_to_obj(module)
commands = map_obj_to_commands((want, have), module)
result['commands'] = commands
if commands:
if not module.check_mode:
response = load_config(module, commands)
result['changed'] = True
module.exit_json(**result)
if __name__ == '__main__':
main()
|
geekboxzone/lollipop_external_chromium_org
|
refs/heads/geekbox
|
tools/perf/metrics/v8_object_stats.py
|
42
|
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
from metrics import Metric
from telemetry.value import scalar
_COUNTER_NAMES = [
'V8.OsMemoryAllocated',
'V8.MemoryNewSpaceBytesAvailable',
'V8.MemoryNewSpaceBytesCommitted',
'V8.MemoryNewSpaceBytesUsed',
'V8.MemoryOldPointerSpaceBytesAvailable',
'V8.MemoryOldPointerSpaceBytesCommitted',
'V8.MemoryOldPointerSpaceBytesUsed',
'V8.MemoryOldDataSpaceBytesAvailable',
'V8.MemoryOldDataSpaceBytesCommitted',
'V8.MemoryOldDataSpaceBytesUsed',
'V8.MemoryCodeSpaceBytesAvailable',
'V8.MemoryCodeSpaceBytesCommitted',
'V8.MemoryCodeSpaceBytesUsed',
'V8.MemoryMapSpaceBytesAvailable',
'V8.MemoryMapSpaceBytesCommitted',
'V8.MemoryMapSpaceBytesUsed',
'V8.MemoryCellSpaceBytesAvailable',
'V8.MemoryCellSpaceBytesCommitted',
'V8.MemoryCellSpaceBytesUsed',
'V8.MemoryPropertyCellSpaceBytesAvailable',
'V8.MemoryPropertyCellSpaceBytesCommitted',
'V8.MemoryPropertyCellSpaceBytesUsed',
'V8.MemoryLoSpaceBytesAvailable',
'V8.MemoryLoSpaceBytesCommitted',
'V8.MemoryLoSpaceBytesUsed',
'V8.SizeOf_ACCESSOR_PAIR_TYPE',
'V8.SizeOf_ACCESS_CHECK_INFO_TYPE',
'V8.SizeOf_ALIASED_ARGUMENTS_ENTRY_TYPE',
'V8.SizeOf_ALLOCATION_MEMENTO_TYPE',
'V8.SizeOf_ALLOCATION_SITE_TYPE',
'V8.SizeOf_ASCII_INTERNALIZED_STRING_TYPE',
'V8.SizeOf_ASCII_STRING_TYPE',
'V8.SizeOf_BOX_TYPE',
'V8.SizeOf_BREAK_POINT_INFO_TYPE',
'V8.SizeOf_BYTE_ARRAY_TYPE',
'V8.SizeOf_CALL_HANDLER_INFO_TYPE',
'V8.SizeOf_CELL_TYPE',
'V8.SizeOf_CODE_AGE-NotExecuted',
'V8.SizeOf_CODE_AGE-ExecutedOnce',
'V8.SizeOf_CODE_AGE-NoAge',
'V8.SizeOf_CODE_AGE-Quadragenarian',
'V8.SizeOf_CODE_AGE-Quinquagenarian',
'V8.SizeOf_CODE_AGE-Sexagenarian',
'V8.SizeOf_CODE_AGE-Septuagenarian',
'V8.SizeOf_CODE_AGE-Octogenarian',
'V8.SizeOf_CODE_CACHE_TYPE',
'V8.SizeOf_CODE_TYPE',
'V8.SizeOf_CODE_TYPE-BINARY_OP_IC',
'V8.SizeOf_CODE_TYPE-BUILTIN',
'V8.SizeOf_CODE_TYPE-CALL_IC',
'V8.SizeOf_CODE_TYPE-COMPARE_IC',
'V8.SizeOf_CODE_TYPE-COMPARE_NIL_IC',
'V8.SizeOf_CODE_TYPE-FUNCTION',
'V8.SizeOf_CODE_TYPE-KEYED_CALL_IC',
'V8.SizeOf_CODE_TYPE-KEYED_LOAD_IC',
'V8.SizeOf_CODE_TYPE-KEYED_STORE_IC',
'V8.SizeOf_CODE_TYPE-LOAD_IC',
'V8.SizeOf_CODE_TYPE-OPTIMIZED_FUNCTION',
'V8.SizeOf_CODE_TYPE-REGEXP',
'V8.SizeOf_CODE_TYPE-STORE_IC',
'V8.SizeOf_CODE_TYPE-STUB',
'V8.SizeOf_CODE_TYPE-TO_BOOLEAN_IC',
'V8.SizeOf_CONS_ASCII_INTERNALIZED_STRING_TYPE',
'V8.SizeOf_CONS_ASCII_STRING_TYPE',
'V8.SizeOf_CONS_INTERNALIZED_STRING_TYPE',
'V8.SizeOf_CONS_STRING_TYPE',
'V8.SizeOf_DEBUG_INFO_TYPE',
'V8.SizeOf_DECLARED_ACCESSOR_DESCRIPTOR_TYPE',
'V8.SizeOf_DECLARED_ACCESSOR_INFO_TYPE',
'V8.SizeOf_EXECUTABLE_ACCESSOR_INFO_TYPE',
'V8.SizeOf_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE',
'V8.SizeOf_EXTERNAL_ASCII_STRING_TYPE',
'V8.SizeOf_EXTERNAL_BYTE_ARRAY_TYPE',
'V8.SizeOf_EXTERNAL_DOUBLE_ARRAY_TYPE',
'V8.SizeOf_EXTERNAL_FLOAT_ARRAY_TYPE',
'V8.SizeOf_EXTERNAL_INTERNALIZED_STRING_TYPE',
'V8.SizeOf_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE',
'V8.SizeOf_EXTERNAL_INT_ARRAY_TYPE',
'V8.SizeOf_EXTERNAL_PIXEL_ARRAY_TYPE',
'V8.SizeOf_EXTERNAL_SHORT_ARRAY_TYPE',
'V8.SizeOf_EXTERNAL_STRING_TYPE',
'V8.SizeOf_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE',
'V8.SizeOf_EXTERNAL_UNSIGNED_BYTE_ARRAY_TYPE',
'V8.SizeOf_EXTERNAL_UNSIGNED_INT_ARRAY_TYPE',
'V8.SizeOf_EXTERNAL_UNSIGNED_SHORT_ARRAY_TYPE',
'V8.SizeOf_FILLER_TYPE',
'V8.SizeOf_FIXED_ARRAY-DESCRIPTOR_ARRAY_SUB_TYPE',
'V8.SizeOf_FIXED_ARRAY-DICTIONARY_ELEMENTS_SUB_TYPE',
'V8.SizeOf_FIXED_ARRAY-DICTIONARY_PROPERTIES_SUB_TYPE',
'V8.SizeOf_FIXED_ARRAY-FAST_ELEMENTS_SUB_TYPE',
'V8.SizeOf_FIXED_ARRAY-FAST_PROPERTIES_SUB_TYPE',
'V8.SizeOf_FIXED_ARRAY-MAP_CODE_CACHE_SUB_TYPE',
'V8.SizeOf_FIXED_ARRAY-SCOPE_INFO_SUB_TYPE',
'V8.SizeOf_FIXED_ARRAY-STRING_TABLE_SUB_TYPE',
'V8.SizeOf_FIXED_ARRAY-TRANSITION_ARRAY_SUB_TYPE',
'V8.SizeOf_FIXED_ARRAY_TYPE',
'V8.SizeOf_FIXED_DOUBLE_ARRAY_TYPE',
'V8.SizeOf_FOREIGN_TYPE',
'V8.SizeOf_FREE_SPACE_TYPE',
'V8.SizeOf_FUNCTION_TEMPLATE_INFO_TYPE',
'V8.SizeOf_HEAP_NUMBER_TYPE',
'V8.SizeOf_INTERCEPTOR_INFO_TYPE',
'V8.SizeOf_INTERNALIZED_STRING_TYPE',
'V8.SizeOf_JS_ARRAY_BUFFER_TYPE',
'V8.SizeOf_JS_ARRAY_TYPE',
'V8.SizeOf_JS_BUILTINS_OBJECT_TYPE',
'V8.SizeOf_JS_CONTEXT_EXTENSION_OBJECT_TYPE',
'V8.SizeOf_JS_DATA_VIEW_TYPE',
'V8.SizeOf_JS_DATE_TYPE',
'V8.SizeOf_JS_FUNCTION_PROXY_TYPE',
'V8.SizeOf_JS_FUNCTION_TYPE',
'V8.SizeOf_JS_GENERATOR_OBJECT_TYPE',
'V8.SizeOf_JS_GLOBAL_OBJECT_TYPE',
'V8.SizeOf_JS_GLOBAL_PROXY_TYPE',
'V8.SizeOf_JS_MAP_TYPE',
'V8.SizeOf_JS_MESSAGE_OBJECT_TYPE',
'V8.SizeOf_JS_MODULE_TYPE',
'V8.SizeOf_JS_OBJECT_TYPE',
'V8.SizeOf_JS_PROXY_TYPE',
'V8.SizeOf_JS_REGEXP_TYPE',
'V8.SizeOf_JS_SET_TYPE',
'V8.SizeOf_JS_TYPED_ARRAY_TYPE',
'V8.SizeOf_JS_VALUE_TYPE',
'V8.SizeOf_JS_WEAK_MAP_TYPE',
'V8.SizeOf_JS_WEAK_SET_TYPE',
'V8.SizeOf_MAP_TYPE',
'V8.SizeOf_OBJECT_TEMPLATE_INFO_TYPE',
'V8.SizeOf_ODDBALL_TYPE',
'V8.SizeOf_POLYMORPHIC_CODE_CACHE_TYPE',
'V8.SizeOf_PROPERTY_CELL_TYPE',
'V8.SizeOf_SCRIPT_TYPE',
'V8.SizeOf_SHARED_FUNCTION_INFO_TYPE',
'V8.SizeOf_SHORT_EXTERNAL_ASCII_INTERNALIZED_STRING_TYPE',
'V8.SizeOf_SHORT_EXTERNAL_ASCII_STRING_TYPE',
'V8.SizeOf_SHORT_EXTERNAL_INTERNALIZED_STRING_TYPE',
'V8.SizeOf_SHORT_EXTERNAL_INTERNALIZED_STRING_WITH_ONE_BYTE_DATA_TYPE',
'V8.SizeOf_SHORT_EXTERNAL_STRING_TYPE',
'V8.SizeOf_SHORT_EXTERNAL_STRING_WITH_ONE_BYTE_DATA_TYPE',
'V8.SizeOf_SIGNATURE_INFO_TYPE',
'V8.SizeOf_SLICED_ASCII_STRING_TYPE',
'V8.SizeOf_SLICED_STRING_TYPE',
'V8.SizeOf_STRING_TYPE',
'V8.SizeOf_SYMBOL_TYPE',
'V8.SizeOf_TYPE_FEEDBACK_INFO_TYPE',
'V8.SizeOf_TYPE_SWITCH_INFO_TYPE',
]
# Descriptions for what different counter names represent.
DESCRIPTIONS = {
'V8.MemoryExternalFragmentationTotal':
'Total external memory fragmentation after each GC in percent.',
'V8.MemoryHeapSampleTotalCommitted':
'The total size of committed memory used by V8 after each GC in KB.',
'V8.MemoryHeapSampleTotalUsed':
'The total size of live memory used by V8 after each GC in KB.',
}
class V8ObjectStatsMetric(Metric):
"""V8ObjectStatsMetric gathers statistics on the size of types in the V8 heap.
It does this by enabling the --track_gc_object_stats flag on V8 and reading
these statistics from the StatsTableMetric.
"""
def __init__(self, counters=None):
super(V8ObjectStatsMetric, self).__init__()
self._results = None
self._counters = counters or _COUNTER_NAMES
@classmethod
def CustomizeBrowserOptions(cls, options):
options.AppendExtraBrowserArgs([
'--enable-stats-table',
'--enable-benchmarking',
'--js-flags=--track_gc_object_stats --expose_gc',
# TODO(rmcilroy): This is needed for --enable-stats-table. Update once
# https://codereview.chromium.org/22911027/ lands.
'--no-sandbox'
])
@staticmethod
def GetV8StatsTable(tab, counters):
return tab.EvaluateJavaScript("""
(function(counters) {
var results = {};
if (!window.chrome || !window.chrome.benchmarking)
return results;
try {
window.gc(); // Trigger GC to ensure stats are checkpointed.
} catch(e) {
// window.gc() could have been mapped to something else,
// just continue.
}
for (var i = 0; i < counters.length; i++)
results[counters[i]] =
chrome.benchmarking.counterForRenderer(counters[i]);
return results;
})(%s);
""" % json.dumps(counters))
def Start(self, page, tab):
"""Do Nothing."""
pass
def Stop(self, page, tab):
"""Get the values in the stats table after the page is loaded."""
self._results = V8ObjectStatsMetric.GetV8StatsTable(tab, self._counters)
if not self._results:
logging.warning('No V8 object stats from website: ' + page.display_name)
def AddResults(self, tab, results):
"""Add results for this page to the results object."""
assert self._results != None, 'Must call Stop() first'
for counter_name in self._results:
description = DESCRIPTIONS.get(counter_name)
display_name = counter_name.replace('.', '_')
results.AddValue(scalar.ScalarValue(
results.current_page, display_name, 'kb',
self._results[counter_name] / 1024.0, description=description))
|
hyperNURb/ggrc-core
|
refs/heads/develop
|
src/ggrc_workflows/migrations/versions/20140612211116_580d2ac5bc2d_add_sort_index_to_.py
|
5
|
# Copyright (C) 2015 Google Inc., authors, and contributors <see AUTHORS file>
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
# Created By: anze@reciprocitylabs.com
# Maintained By: anze@reciprocitylabs.com
"""Add sort_index to CycleTaskGroupObjectTask
Revision ID: 580d2ac5bc2d
Revises: 5ac75b9cbb64
Create Date: 2014-06-12 21:11:16.736323
"""
# revision identifiers, used by Alembic.
revision = '580d2ac5bc2d'
down_revision = '5ac75b9cbb64'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('cycle_task_group_object_tasks', sa.Column('sort_index', sa.String(length=250), nullable=False))
def downgrade():
op.drop_column('cycle_task_group_object_tasks', 'sort_index')
|
zedr/django
|
refs/heads/master
|
django/core/management/commands/inspectdb.py
|
6
|
from __future__ import unicode_literals
from collections import OrderedDict
import keyword
import re
from optparse import make_option
from django.core.management.base import NoArgsCommand, CommandError
from django.db import connections, DEFAULT_DB_ALIAS
class Command(NoArgsCommand):
help = "Introspects the database tables in the given database and outputs a Django model module."
option_list = NoArgsCommand.option_list + (
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to '
'introspect. Defaults to using the "default" database.'),
)
requires_system_checks = False
db_module = 'django.db'
def handle_noargs(self, **options):
try:
for line in self.handle_inspection(options):
self.stdout.write("%s\n" % line)
except NotImplementedError:
raise CommandError("Database inspection isn't supported for the currently selected database backend.")
def handle_inspection(self, options):
connection = connections[options.get('database')]
# 'table_name_filter' is a stealth option
table_name_filter = options.get('table_name_filter')
table2model = lambda table_name: table_name.title().replace('_', '').replace(' ', '').replace('-', '')
strip_prefix = lambda s: s[1:] if s.startswith("u'") else s
with connection.cursor() as cursor:
yield "# This is an auto-generated Django model module."
yield "# You'll have to do the following manually to clean this up:"
yield "# * Rearrange models' order"
yield "# * Make sure each model has one field with primary_key=True"
yield "# * Remove `managed = False` lines for those models you wish to give write DB access"
yield "# Feel free to rename the models, but don't rename db_table values or field names."
yield "#"
yield "# Also note: You'll have to insert the output of 'django-admin.py sqlcustom [app_label]'"
yield "# into your database."
yield "from __future__ import unicode_literals"
yield ''
yield 'from %s import models' % self.db_module
known_models = []
for table_name in connection.introspection.table_names(cursor):
if table_name_filter is not None and callable(table_name_filter):
if not table_name_filter(table_name):
continue
yield ''
yield ''
yield 'class %s(models.Model):' % table2model(table_name)
known_models.append(table2model(table_name))
try:
relations = connection.introspection.get_relations(cursor, table_name)
except NotImplementedError:
relations = {}
try:
indexes = connection.introspection.get_indexes(cursor, table_name)
except NotImplementedError:
indexes = {}
used_column_names = [] # Holds column names used in the table so far
for i, row in enumerate(connection.introspection.get_table_description(cursor, table_name)):
comment_notes = [] # Holds Field notes, to be displayed in a Python comment.
extra_params = OrderedDict() # Holds Field parameters such as 'db_column'.
column_name = row[0]
is_relation = i in relations
att_name, params, notes = self.normalize_col_name(
column_name, used_column_names, is_relation)
extra_params.update(params)
comment_notes.extend(notes)
used_column_names.append(att_name)
# Add primary_key and unique, if necessary.
if column_name in indexes:
if indexes[column_name]['primary_key']:
extra_params['primary_key'] = True
elif indexes[column_name]['unique']:
extra_params['unique'] = True
if is_relation:
rel_to = "self" if relations[i][1] == table_name else table2model(relations[i][1])
if rel_to in known_models:
field_type = 'ForeignKey(%s' % rel_to
else:
field_type = "ForeignKey('%s'" % rel_to
else:
# Calling `get_field_type` to get the field type string and any
# additional paramters and notes.
field_type, field_params, field_notes = self.get_field_type(connection, table_name, row)
extra_params.update(field_params)
comment_notes.extend(field_notes)
field_type += '('
# Don't output 'id = meta.AutoField(primary_key=True)', because
# that's assumed if it doesn't exist.
if att_name == 'id' and extra_params == {'primary_key': True}:
if field_type == 'AutoField(':
continue
elif field_type == 'IntegerField(' and not connection.features.can_introspect_autofield:
comment_notes.append('AutoField?')
# Add 'null' and 'blank', if the 'null_ok' flag was present in the
# table description.
if row[6]: # If it's NULL...
if field_type == 'BooleanField(':
field_type = 'NullBooleanField('
else:
extra_params['blank'] = True
if not field_type in ('TextField(', 'CharField('):
extra_params['null'] = True
field_desc = '%s = %s%s' % (
att_name,
# Custom fields will have a dotted path
'' if '.' in field_type else 'models.',
field_type,
)
if extra_params:
if not field_desc.endswith('('):
field_desc += ', '
field_desc += ', '.join([
'%s=%s' % (k, strip_prefix(repr(v)))
for k, v in extra_params.items()])
field_desc += ')'
if comment_notes:
field_desc += ' # ' + ' '.join(comment_notes)
yield ' %s' % field_desc
for meta_line in self.get_meta(table_name):
yield meta_line
def normalize_col_name(self, col_name, used_column_names, is_relation):
"""
Modify the column name to make it Python-compatible as a field name
"""
field_params = {}
field_notes = []
new_name = col_name.lower()
if new_name != col_name:
field_notes.append('Field name made lowercase.')
if is_relation:
if new_name.endswith('_id'):
new_name = new_name[:-3]
else:
field_params['db_column'] = col_name
new_name, num_repl = re.subn(r'\W', '_', new_name)
if num_repl > 0:
field_notes.append('Field renamed to remove unsuitable characters.')
if new_name.find('__') >= 0:
while new_name.find('__') >= 0:
new_name = new_name.replace('__', '_')
if col_name.lower().find('__') >= 0:
# Only add the comment if the double underscore was in the original name
field_notes.append("Field renamed because it contained more than one '_' in a row.")
if new_name.startswith('_'):
new_name = 'field%s' % new_name
field_notes.append("Field renamed because it started with '_'.")
if new_name.endswith('_'):
new_name = '%sfield' % new_name
field_notes.append("Field renamed because it ended with '_'.")
if keyword.iskeyword(new_name):
new_name += '_field'
field_notes.append('Field renamed because it was a Python reserved word.')
if new_name[0].isdigit():
new_name = 'number_%s' % new_name
field_notes.append("Field renamed because it wasn't a valid Python identifier.")
if new_name in used_column_names:
num = 0
while '%s_%d' % (new_name, num) in used_column_names:
num += 1
new_name = '%s_%d' % (new_name, num)
field_notes.append('Field renamed because of name conflict.')
if col_name != new_name and field_notes:
field_params['db_column'] = col_name
return new_name, field_params, field_notes
def get_field_type(self, connection, table_name, row):
"""
Given the database connection, the table name, and the cursor row
description, this routine will return the given field type name, as
well as any additional keyword parameters and notes for the field.
"""
field_params = OrderedDict()
field_notes = []
try:
field_type = connection.introspection.get_field_type(row[1], row)
except KeyError:
field_type = 'TextField'
field_notes.append('This field type is a guess.')
# This is a hook for DATA_TYPES_REVERSE to return a tuple of
# (field_type, field_params_dict).
if type(field_type) is tuple:
field_type, new_params = field_type
field_params.update(new_params)
# Add max_length for all CharFields.
if field_type == 'CharField' and row[3]:
field_params['max_length'] = int(row[3])
if field_type == 'DecimalField':
if row[4] is None or row[5] is None:
field_notes.append(
'max_digits and decimal_places have been guessed, as this '
'database handles decimal fields as float')
field_params['max_digits'] = row[4] if row[4] is not None else 10
field_params['decimal_places'] = row[5] if row[5] is not None else 5
else:
field_params['max_digits'] = row[4]
field_params['decimal_places'] = row[5]
return field_type, field_params, field_notes
def get_meta(self, table_name):
"""
Return a sequence comprising the lines of code necessary
to construct the inner Meta class for the model corresponding
to the given database table name.
"""
return ["",
" class Meta:",
" managed = False",
" db_table = '%s'" % table_name]
|
pylbert/upm
|
refs/heads/master
|
examples/python/ppd42ns.py
|
6
|
#!/usr/bin/env python
# Author: Zion Orent <zorent@ics.com>
# Copyright (c) 2015 Intel Corporation.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from __future__ import print_function
import time, sys, signal, atexit
from upm import pyupm_ppd42ns as upmPpd42ns
def main():
# Instantiate a dust sensor on digital pin D8
myDustSensor = upmPpd42ns.PPD42NS(8)
## Exit handlers ##
# This function stops python from printing a stacktrace when you hit control-C
def SIGINTHandler(signum, frame):
raise SystemExit
# This function lets you run code on exit, including functions from myDustSensor
def exitHandler():
print("Exiting")
sys.exit(0)
# Register exit handlers
atexit.register(exitHandler)
signal.signal(signal.SIGINT, SIGINTHandler)
notice = ("This program will give readings "
"every 30 seconds until you stop it")
print(notice)
while(1):
data = myDustSensor.getData()
# we need to sleep for a bit for the data to print out
time.sleep(.1)
print("Low pulse occupancy: " + str(data.lowPulseOccupancy))
print("Ratio: " + str(data.ratio))
print("Concentration: " + str(data.concentration))
if __name__ == '__main__':
main()
|
quentinbodinier/custom_gnuradio_blocks
|
refs/heads/master
|
python/test_interp.py
|
1
|
import numpy as np
from gnuradio import gr
class test_interp(gr.interp_block):
"""
docstring for block test_interp
"""
def __init__(self):
gr.interp_block.__init__(self,
name="test_interp",
in_sig=[np.int8],
out_sig=[np.complex64], interp=2090)
def work(self, input_items, output_items):
in0 = input_items[0]
out = output_items[0]
print len(in0)
for j in range(len(in0)):
if in0[j] == 1:
out[j*2090:(j+1)*2090-1] = 1
else:
out[j*2090:(j+1)*2090-1] = 0
return len(output_items[0])
|
azoft-dev-team/imagrium
|
refs/heads/win
|
env/Lib/test/test_codecencodings_jp.py
|
150
|
#!/usr/bin/env python
#
# test_codecencodings_jp.py
# Codec encoding tests for Japanese encodings.
#
from test import test_support
from test import test_multibytecodec_support
import unittest
class Test_CP932(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'cp932'
tstring = test_multibytecodec_support.load_teststring('shift_jis')
codectests = (
# invalid bytes
("abc\x81\x00\x81\x00\x82\x84", "strict", None),
("abc\xf8", "strict", None),
("abc\x81\x00\x82\x84", "replace", u"abc\ufffd\uff44"),
("abc\x81\x00\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
("abc\x81\x00\x82\x84", "ignore", u"abc\uff44"),
# sjis vs cp932
("\\\x7e", "replace", u"\\\x7e"),
("\x81\x5f\x81\x61\x81\x7c", "replace", u"\uff3c\u2225\uff0d"),
)
class Test_EUC_JISX0213(test_multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jisx0213'
tstring = test_multibytecodec_support.load_teststring('euc_jisx0213')
codectests = (
# invalid bytes
("abc\x80\x80\xc1\xc4", "strict", None),
("abc\xc8", "strict", None),
("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u7956"),
("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u7956\ufffd"),
("abc\x80\x80\xc1\xc4", "ignore", u"abc\u7956"),
("abc\x8f\x83\x83", "replace", u"abc\ufffd"),
("\xc1\x64", "strict", None),
("\xa1\xc0", "strict", u"\uff3c"),
)
xmlcharnametest = (
u"\xab\u211c\xbb = \u2329\u1234\u232a",
"\xa9\xa8ℜ\xa9\xb2 = ⟨ሴ⟩"
)
eucjp_commontests = (
("abc\x80\x80\xc1\xc4", "strict", None),
("abc\xc8", "strict", None),
("abc\x80\x80\xc1\xc4", "replace", u"abc\ufffd\u7956"),
("abc\x80\x80\xc1\xc4\xc8", "replace", u"abc\ufffd\u7956\ufffd"),
("abc\x80\x80\xc1\xc4", "ignore", u"abc\u7956"),
("abc\x8f\x83\x83", "replace", u"abc\ufffd"),
("\xc1\x64", "strict", None),
)
class Test_EUC_JP_COMPAT(test_multibytecodec_support.TestBase,
unittest.TestCase):
encoding = 'euc_jp'
tstring = test_multibytecodec_support.load_teststring('euc_jp')
codectests = eucjp_commontests + (
("\xa1\xc0\\", "strict", u"\uff3c\\"),
(u"\xa5", "strict", "\x5c"),
(u"\u203e", "strict", "\x7e"),
)
shiftjis_commonenctests = (
("abc\x80\x80\x82\x84", "strict", None),
("abc\xf8", "strict", None),
("abc\x80\x80\x82\x84", "replace", u"abc\ufffd\uff44"),
("abc\x80\x80\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
("abc\x80\x80\x82\x84def", "ignore", u"abc\uff44def"),
)
class Test_SJIS_COMPAT(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jis'
tstring = test_multibytecodec_support.load_teststring('shift_jis')
codectests = shiftjis_commonenctests + (
("\\\x7e", "strict", u"\\\x7e"),
("\x81\x5f\x81\x61\x81\x7c", "strict", u"\uff3c\u2016\u2212"),
)
class Test_SJISX0213(test_multibytecodec_support.TestBase, unittest.TestCase):
encoding = 'shift_jisx0213'
tstring = test_multibytecodec_support.load_teststring('shift_jisx0213')
codectests = (
# invalid bytes
("abc\x80\x80\x82\x84", "strict", None),
("abc\xf8", "strict", None),
("abc\x80\x80\x82\x84", "replace", u"abc\ufffd\uff44"),
("abc\x80\x80\x82\x84\x88", "replace", u"abc\ufffd\uff44\ufffd"),
("abc\x80\x80\x82\x84def", "ignore", u"abc\uff44def"),
# sjis vs cp932
("\\\x7e", "replace", u"\xa5\u203e"),
("\x81\x5f\x81\x61\x81\x7c", "replace", u"\x5c\u2016\u2212"),
)
xmlcharnametest = (
u"\xab\u211c\xbb = \u2329\u1234\u232a",
"\x85Gℜ\x85Q = ⟨ሴ⟩"
)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
X-dark/Flexget
|
refs/heads/master
|
flexget/plugins/output/pushbullet.py
|
5
|
from __future__ import unicode_literals, division, absolute_import
import logging
import base64
from flexget import plugin
from flexget.event import event
from flexget.utils import json
from flexget.utils.template import RenderError
log = logging.getLogger("pushbullet")
__version__ = 0.1
client_headers = {"User-Agent": "FlexGet Pushbullet plugin/%s" % str(__version__)}
pushbullet_url = "https://api.pushbullet.com/api/pushes"
class OutputPushbullet(object):
"""
Example::
pushbullet:
apikey: <API_KEY>
device: <DEVICE_IDEN> (can also be a list of device idens, or don't specify any idens to send to all devices)
[title: <MESSAGE_TITLE>] (default: "{{task}} - Download started" -- accepts Jinja2)
[body: <MESSAGE_BODY>] (default: "{{series_name}} {{series_id}}" -- accepts Jinja2)
Configuration parameters are also supported from entries (eg. through set).
"""
def validator(self):
from flexget import validator
config = validator.factory("dict")
config.accept("text", key="apikey", required=True)
config.accept("text", key="device", required=False)
config.accept("list", key="device").accept("text")
config.accept("text", key="title", required=False)
config.accept("text", key="body", required=False)
return config
def prepare_config(self, config):
if isinstance(config, bool):
config = {"enabled": config}
# TODO: don't assume it's a download
config.setdefault("title", "{{task}} - Download started")
# TODO: use template file
config.setdefault("body", "{% if series_name is defined %}{{tvdb_series_name|d(series_name)}} "
"{{series_id}} {{tvdb_ep_name|d('')}}{% elif imdb_name is defined %}{{imdb_name}} "
"{{imdb_year}}{% else %}{{title}}{% endif %}")
config.setdefault("device", None)
return config
# Run last to make sure other outputs are successful before sending notification
@plugin.priority(0)
def on_task_output(self, task, config):
# get the parameters
config = self.prepare_config(config)
# Support for multiple devices
devices = config["device"]
if not isinstance(devices, list):
devices = [devices]
# Set a bunch of local variables from the config
apikey = config["apikey"]
device = config["device"]
client_headers["Authorization"] = "Basic %s" % base64.b64encode(apikey)
if task.options.test:
log.info("Test mode. Pushbullet configuration:")
log.info(" API_KEY: %s" % apikey)
log.info(" Type: Note")
log.info(" Device: %s" % device)
# Loop through the provided entries
for entry in task.accepted:
title = config["title"]
body = config["body"]
# Attempt to render the title field
try:
title = entry.render(title)
except RenderError as e:
log.warning("Problem rendering 'title': %s" % e)
title = "Download started"
# Attempt to render the body field
try:
body = entry.render(body)
except RenderError as e:
log.warning("Problem rendering 'body': %s" % e)
body = entry["title"]
for device in devices:
# Build the request
if not device:
data = {"type": "note", "title": title, "body": body}
else:
data = {"device_iden": device, "type": "note", "title": title, "body": body}
# Check for test mode
if task.options.test:
log.info("Test mode. Pushbullet notification would be:")
log.info(" Title: %s" % title)
log.info(" Body: %s" % body)
# Test mode. Skip remainder.
continue
# Make the request
response = task.requests.post(pushbullet_url, headers=client_headers, data=data, raise_status=False)
# Check if it succeeded
request_status = response.status_code
# error codes and messages from Pushbullet API
if request_status == 200:
log.debug("Pushbullet notification sent")
elif request_status == 500:
log.warning("Pushbullet notification failed, Pushbullet API having issues")
#TODO: Implement retrying. API requests 5 seconds between retries.
elif request_status >= 400:
error = json.loads(response.content)['error']
log.error("Pushbullet API error: %s" % error['message'])
else:
log.error("Unknown error when sending Pushbullet notification")
@event('plugin.register')
def register_plugin():
plugin.register(OutputPushbullet, "pushbullet", api_ver=2)
|
jumpojoy/neutron
|
refs/heads/generic_switch
|
neutron/plugins/ml2/drivers/l2pop/rpc_manager/l2population_rpc.py
|
41
|
# Copyright (c) 2013 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
import six
from neutron.common import constants as n_const
from neutron.plugins.ml2.drivers.l2pop import rpc as l2pop_rpc
LOG = logging.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class L2populationRpcCallBackMixin(object):
'''General mixin class of L2-population RPC call back.
The following methods are called through RPC.
add_fdb_entries(), remove_fdb_entries(), update_fdb_entries()
The following methods are used in an agent as internal methods.
fdb_add(), fdb_remove(), fdb_update()
'''
@log_helpers.log_method_call
def add_fdb_entries(self, context, fdb_entries, host=None):
if not host or host == cfg.CONF.host:
self.fdb_add(context, self._unmarshall_fdb_entries(fdb_entries))
@log_helpers.log_method_call
def remove_fdb_entries(self, context, fdb_entries, host=None):
if not host or host == cfg.CONF.host:
self.fdb_remove(context, self._unmarshall_fdb_entries(fdb_entries))
@log_helpers.log_method_call
def update_fdb_entries(self, context, fdb_entries, host=None):
if not host or host == cfg.CONF.host:
self.fdb_update(context, self._unmarshall_fdb_entries(fdb_entries))
@staticmethod
def _unmarshall_fdb_entries(fdb_entries):
"""Prepares fdb_entries from JSON.
All methods in this class that receive messages should call this to
unmarshall fdb_entries from the wire.
:param fdb_entries: Original fdb_entries data-structure. Looks like:
{
<uuid>: {
...,
'ports': {
<ip address>: [ [<mac>, <ip>], ... ],
...
:returns: Deep copy with [<mac>, <ip>] converted to PortInfo
"""
unmarshalled = dict(fdb_entries)
for value in unmarshalled.values():
if 'ports' in value:
value['ports'] = dict(
(address, [l2pop_rpc.PortInfo(*pi) for pi in port_infos])
for address, port_infos in value['ports'].items()
)
return unmarshalled
@abc.abstractmethod
def fdb_add(self, context, fdb_entries):
pass
@abc.abstractmethod
def fdb_remove(self, context, fdb_entries):
pass
@abc.abstractmethod
def fdb_update(self, context, fdb_entries):
pass
class L2populationRpcCallBackTunnelMixin(L2populationRpcCallBackMixin):
'''Mixin class of L2-population call back for Tunnel.
The following methods are all used in agents as internal methods.
Some of the methods in this class use Local VLAN Mapping, aka lvm.
It's a python object with at least the following attributes:
============ =========================================================
Attribute Description
============ =========================================================
vlan An identifier used by the agent to identify a neutron
network.
network_type A network type found in neutron.plugins.common.constants.
============ =========================================================
NOTE(yamamoto): "Local VLAN" is an OVS-agent term. OVS-agent internally
uses 802.1q VLAN tagging to isolate networks. While this class inherited
the terms from OVS-agent, it does not assume the specific underlying
technologies. E.g. this class is also used by ofagent, where a different
mechanism is used.
'''
@abc.abstractmethod
def add_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
'''Add flow for fdb
This method is assumed to be used by method fdb_add_tun.
We expect to add a flow entry to send a packet to specified port
on bridge.
And you may edit some information for local arp response.
:param br: represent the bridge on which add_fdb_flow should be
applied.
:param port_info: PortInfo instance to include mac and ip.
.mac_address
.ip_address
:remote_ip: remote ip address.
:param lvm: a local VLAN map of network.
:param ofport: a port to add.
'''
pass
@abc.abstractmethod
def del_fdb_flow(self, br, port_info, remote_ip, lvm, ofport):
'''Delete flow for fdb
This method is assumed to be used by method fdb_remove_tun.
We expect to delete a flow entry to send a packet to specified port
from bridge.
And you may delete some information for local arp response.
:param br: represent the bridge on which del_fdb_flow should be
applied.
:param port_info: PortInfo instance to include mac and ip.
.mac_address
.ip_address
:remote_ip: remote ip address.
:param lvm: local VLAN map of a network. See add_fdb_flow for
more explanation.
:param ofport: a port to delete.
'''
pass
@abc.abstractmethod
def setup_tunnel_port(self, br, remote_ip, network_type):
'''Setup an added tunnel port.
This method is assumed to be used by method fdb_add_tun.
We expect to prepare to call add_fdb_flow. It will be mainly adding
a port to a bridge.
If you need, you may do some preparations for a bridge.
:param br: represent the bridge on which setup_tunnel_port should be
applied.
:param remote_ip: an ip for a port to setup.
:param network_type: a type of a network.
:returns: an ofport value. value 0 means the port is unavailable.
'''
pass
@abc.abstractmethod
def cleanup_tunnel_port(self, br, tun_ofport, tunnel_type):
'''Clean up a deleted tunnel port.
This method is assumed to be used by method fdb_remove_tun.
We expect to clean up after calling del_fdb_flow. It will be mainly
deleting a port from a bridge.
If you need, you may do some cleanup for a bridge.
:param br: represent the bridge on which cleanup_tunnel_port should be
applied.
:param tun_ofport: a port value to cleanup.
:param tunnel_type: a type of a tunnel.
'''
pass
@abc.abstractmethod
def setup_entry_for_arp_reply(self, br, action, local_vid, mac_address,
ip_address):
'''Operate the ARP respond information.
Update MAC/IPv4 associations, which is typically used by
the local ARP responder. For example, OVS-agent sets up
flow entries to perform ARP responses.
:param br: represent the bridge on which setup_entry_for_arp_reply
should be applied.
:param action: add/remove flow for arp response information.
:param local_vid: id in local VLAN map of network's ARP entry.
:param mac_address: MAC string value.
:param ip_address: IP string value.
'''
pass
def get_agent_ports(self, fdb_entries, local_vlan_map):
"""Generator to yield port info.
For each known (i.e found in local_vlan_map) network in
fdb_entries, yield (lvm, fdb_entries[network_id]['ports']) pair.
:param fdb_entries: l2pop fdb entries
:param local_vlan_map: A dict to map network_id to
the corresponding lvm entry.
"""
for network_id, values in fdb_entries.items():
lvm = local_vlan_map.get(network_id)
if lvm is None:
continue
agent_ports = values.get('ports')
yield (lvm, agent_ports)
@log_helpers.log_method_call
def fdb_add_tun(self, context, br, lvm, agent_ports, lookup_port):
for remote_ip, ports in agent_ports.items():
# Ensure we have a tunnel port with this remote agent
ofport = lookup_port(lvm.network_type, remote_ip)
if not ofport:
ofport = self.setup_tunnel_port(br, remote_ip,
lvm.network_type)
if ofport == 0:
continue
for port in ports:
self.add_fdb_flow(br, port, remote_ip, lvm, ofport)
@log_helpers.log_method_call
def fdb_remove_tun(self, context, br, lvm, agent_ports, lookup_port):
for remote_ip, ports in agent_ports.items():
ofport = lookup_port(lvm.network_type, remote_ip)
if not ofport:
continue
for port in ports:
self.del_fdb_flow(br, port, remote_ip, lvm, ofport)
if port == n_const.FLOODING_ENTRY:
# Check if this tunnel port is still used
self.cleanup_tunnel_port(br, ofport, lvm.network_type)
@log_helpers.log_method_call
def fdb_update(self, context, fdb_entries):
'''Call methods named '_fdb_<action>'.
This method assumes that methods '_fdb_<action>' are defined in class.
Currently the following actions are available.
chg_ip
'''
for action, values in fdb_entries.items():
method = '_fdb_' + action
if not hasattr(self, method):
raise NotImplementedError()
getattr(self, method)(context, values)
@log_helpers.log_method_call
def fdb_chg_ip_tun(self, context, br, fdb_entries, local_ip,
local_vlan_map):
'''fdb update when an IP of a port is updated.
The ML2 l2-pop mechanism driver sends an fdb update rpc message when an
IP of a port is updated.
:param context: RPC context.
:param br: represent the bridge on which fdb_chg_ip_tun should be
applied.
:param fdb_entries: fdb dicts that contain all mac/IP information per
agent and network.
{'net1':
{'agent_ip':
{'before': PortInfo,
'after': PortInfo
}
}
'net2':
...
}
PortInfo has .mac_address and .ip_address attrs.
:param local_ip: local IP address of this agent.
:param local_vlan_map: A dict to map network_id to
the corresponding lvm entry.
'''
for network_id, agent_ports in fdb_entries.items():
lvm = local_vlan_map.get(network_id)
if not lvm:
continue
for agent_ip, state in agent_ports.items():
if agent_ip == local_ip:
continue
after = state.get('after', [])
for mac_ip in after:
self.setup_entry_for_arp_reply(br, 'add', lvm.vlan,
mac_ip.mac_address,
mac_ip.ip_address)
before = state.get('before', [])
for mac_ip in before:
self.setup_entry_for_arp_reply(br, 'remove', lvm.vlan,
mac_ip.mac_address,
mac_ip.ip_address)
|
haggi/OpenMaya
|
refs/heads/master
|
src/mayaToBase/mt@_devmodule/scripts/path.py
|
5
|
#
# Copyright (c) 2010 Mikhail Gusarov
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
""" path.py - An object representing a path to a file or directory.
Original author:
Jason Orendorff <jason.orendorff\x40gmail\x2ecom>
Contributors:
Mikhail Gusarov <dottedmag@dottedmag.net>
Marc Abramowitz <marc@marc-abramowitz.com>
Example:
from path import path
d = path('/home/guido/bin')
for f in d.files('*.py'):
f.chmod(0755)
This module requires Python 2.3 or later.
"""
from __future__ import generators
import sys
import warnings
import os
import fnmatch
import glob
import shutil
import codecs
import hashlib
import errno
__version__ = '2.4.1'
__all__ = ['path']
# Platform-specific support for path.owner
if os.name == 'nt':
try:
import win32security
except ImportError:
win32security = None
else:
try:
import pwd
except ImportError:
pwd = None
# Pre-2.3 support. Are unicode filenames supported?
_base = str
_getcwd = os.getcwd
try:
if os.path.supports_unicode_filenames:
_base = unicode
_getcwd = os.getcwdu
except AttributeError:
pass
# Pre-2.3 workaround for basestring.
try:
basestring
except NameError:
basestring = (str, unicode)
# Universal newline support
_textmode = 'U'
if hasattr(__builtins__, 'file') and not hasattr(file, 'newlines'):
_textmode = 'r'
class TreeWalkWarning(Warning):
pass
class path(_base):
""" Represents a filesystem path.
For documentation on individual methods, consult their
counterparts in os.path.
"""
# --- Special Python methods.
def __repr__(self):
return 'path(%s)' % _base.__repr__(self)
# Adding a path and a string yields a path.
def __add__(self, more):
try:
resultStr = _base.__add__(self, more)
except TypeError: # Python bug
resultStr = NotImplemented
if resultStr is NotImplemented:
return resultStr
return self.__class__(resultStr)
def __radd__(self, other):
if isinstance(other, basestring):
return self.__class__(other.__add__(self))
else:
return NotImplemented
# The / operator joins paths.
def __div__(self, rel):
""" fp.__div__(rel) == fp / rel == fp.joinpath(rel)
Join two path components, adding a separator character if
needed.
"""
return self.__class__(os.path.join(self, rel))
# Make the / operator work even when true division is enabled.
__truediv__ = __div__
def __enter__(self):
self._old_dir = self.getcwd()
os.chdir(self)
def __exit__(self, *_):
os.chdir(self._old_dir)
def getcwd(cls):
""" Return the current working directory as a path object. """
return cls(_getcwd())
getcwd = classmethod(getcwd)
#
# --- Operations on path strings.
def abspath(self): return self.__class__(os.path.abspath(self))
def normcase(self): return self.__class__(os.path.normcase(self))
def normpath(self): return self.__class__(os.path.normpath(self))
def realpath(self): return self.__class__(os.path.realpath(self))
def expanduser(self): return self.__class__(os.path.expanduser(self))
def expandvars(self): return self.__class__(os.path.expandvars(self))
def dirname(self): return self.__class__(os.path.dirname(self))
def basename(self): return self.__class__(os.path.basename(self))
def expand(self):
""" Clean up a filename by calling expandvars(),
expanduser(), and normpath() on it.
This is commonly everything needed to clean up a filename
read from a configuration file, for example.
"""
return self.expandvars().expanduser().normpath()
def _get_namebase(self):
base, ext = os.path.splitext(self.name)
return base
def _get_ext(self):
f, ext = os.path.splitext(_base(self))
return ext
def _get_drive(self):
drive, r = os.path.splitdrive(self)
return self.__class__(drive)
parent = property(
dirname, None, None,
""" This path's parent directory, as a new path object.
For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
""")
name = property(
basename, None, None,
""" The name of this file or directory without the full path.
For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
""")
namebase = property(
_get_namebase, None, None,
""" The same as path.name, but with one file extension stripped off.
For example, path('/home/guido/python.tar.gz').name == 'python.tar.gz',
but path('/home/guido/python.tar.gz').namebase == 'python.tar'
""")
ext = property(
_get_ext, None, None,
""" The file extension, for example '.py'. """)
drive = property(
_get_drive, None, None,
""" The drive specifier, for example 'C:'.
This is always empty on systems that don't use drive specifiers.
""")
def splitpath(self):
""" p.splitpath() -> Return (p.parent, p.name). """
parent, child = os.path.split(self)
return self.__class__(parent), child
def splitdrive(self):
""" p.splitdrive() -> Return (p.drive, <the rest of p>).
Split the drive specifier from this path. If there is
no drive specifier, p.drive is empty, so the return value
is simply (path(''), p). This is always the case on Unix.
"""
drive, rel = os.path.splitdrive(self)
return self.__class__(drive), rel
def splitext(self):
""" p.splitext() -> Return (p.stripext(), p.ext).
Split the filename extension from this path and return
the two parts. Either part may be empty.
The extension is everything from '.' to the end of the
last path segment. This has the property that if
(a, b) == p.splitext(), then a + b == p.
"""
filename, ext = os.path.splitext(self)
return self.__class__(filename), ext
def stripext(self):
""" p.stripext() -> Remove one file extension from the path.
For example, path('/home/guido/python.tar.gz').stripext()
returns path('/home/guido/python.tar').
"""
return self.splitext()[0]
if hasattr(os.path, 'splitunc'):
def splitunc(self):
unc, rest = os.path.splitunc(self)
return self.__class__(unc), rest
def _get_uncshare(self):
unc, r = os.path.splitunc(self)
return self.__class__(unc)
uncshare = property(
_get_uncshare, None, None,
""" The UNC mount point for this path.
This is empty for paths on local drives. """)
def joinpath(self, *args):
""" Join two or more path components, adding a separator
character (os.sep) if needed. Returns a new path
object.
"""
return self.__class__(os.path.join(self, *args))
def splitall(self):
r""" Return a list of the path components in this path.
The first item in the list will be a path. Its value will be
either os.curdir, os.pardir, empty, or the root directory of
this path (for example, '/' or 'C:\\'). The other items in
the list will be strings.
path.path.joinpath(*result) will yield the original path.
"""
parts = []
loc = self
while loc != os.curdir and loc != os.pardir:
prev = loc
loc, child = prev.splitpath()
if loc == prev:
break
parts.append(child)
parts.append(loc)
parts.reverse()
return parts
def relpath(self):
""" Return this path as a relative path,
based from the current working directory.
"""
cwd = self.__class__(os.getcwd())
return cwd.relpathto(self)
def relpathto(self, dest):
""" Return a relative path from self to dest.
If there is no relative path from self to dest, for example if
they reside on different drives in Windows, then this returns
dest.abspath().
"""
origin = self.abspath()
dest = self.__class__(dest).abspath()
orig_list = origin.normcase().splitall()
# Don't normcase dest! We want to preserve the case.
dest_list = dest.splitall()
if orig_list[0] != os.path.normcase(dest_list[0]):
# Can't get here from there.
return dest
# Find the location where the two paths start to differ.
i = 0
for start_seg, dest_seg in zip(orig_list, dest_list):
if start_seg != os.path.normcase(dest_seg):
break
i += 1
# Now i is the point where the two paths diverge.
# Need a certain number of "os.pardir"s to work up
# from the origin to the point of divergence.
segments = [os.pardir] * (len(orig_list) - i)
# Need to add the diverging part of dest_list.
segments += dest_list[i:]
if len(segments) == 0:
# If they happen to be identical, use os.curdir.
relpath = os.curdir
else:
relpath = os.path.join(*segments)
return self.__class__(relpath)
# --- Listing, searching, walking, and matching
def listdir(self, pattern=None):
""" D.listdir() -> List of items in this directory.
Use D.files() or D.dirs() instead if you want a listing
of just files or just subdirectories.
The elements of the list are path objects.
With the optional 'pattern' argument, this only lists
items whose names match the given pattern.
"""
names = os.listdir(self)
if pattern is not None:
names = fnmatch.filter(names, pattern)
return [self / child for child in names]
def dirs(self, pattern=None):
""" D.dirs() -> List of this directory's subdirectories.
The elements of the list are path objects.
This does not walk recursively into subdirectories
(but see path.walkdirs).
With the optional 'pattern' argument, this only lists
directories whose names match the given pattern. For
example, d.dirs('build-*').
"""
return [p for p in self.listdir(pattern) if p.isdir()]
def files(self, pattern=None):
""" D.files() -> List of the files in this directory.
The elements of the list are path objects.
This does not walk into subdirectories (see path.walkfiles).
With the optional 'pattern' argument, this only lists files
whose names match the given pattern. For example,
d.files('*.pyc').
"""
return [p for p in self.listdir(pattern) if p.isfile()]
def walk(self, pattern=None, errors='strict'):
""" D.walk() -> iterator over files and subdirs, recursively.
The iterator yields path objects naming each child item of
this directory and its descendants. This requires that
D.isdir().
This performs a depth-first traversal of the directory tree.
Each directory is returned just before all its children.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
if pattern is None or child.fnmatch(pattern):
yield child
try:
isdir = child.isdir()
except Exception:
if errors == 'ignore':
isdir = False
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (child, sys.exc_info()[1]),
TreeWalkWarning)
isdir = False
else:
raise
if isdir:
for item in child.walk(pattern, errors):
yield item
def walkdirs(self, pattern=None, errors='strict'):
""" D.walkdirs() -> iterator over subdirs, recursively.
With the optional 'pattern' argument, this yields only
directories whose names match the given pattern. For
example, mydir.walkdirs('*test') yields only directories
with names ending in 'test'.
The errors= keyword argument controls behavior when an
error occurs. The default is 'strict', which causes an
exception. The other allowed values are 'warn', which
reports the error via warnings.warn(), and 'ignore'.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
dirs = self.dirs()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in dirs:
if pattern is None or child.fnmatch(pattern):
yield child
for subsubdir in child.walkdirs(pattern, errors):
yield subsubdir
def walkfiles(self, pattern=None, errors='strict'):
""" D.walkfiles() -> iterator over files in D, recursively.
The optional argument, pattern, limits the results to files
with names that match the pattern. For example,
mydir.walkfiles('*.tmp') yields only files with the .tmp
extension.
"""
if errors not in ('strict', 'warn', 'ignore'):
raise ValueError("invalid errors parameter")
try:
childList = self.listdir()
except Exception:
if errors == 'ignore':
return
elif errors == 'warn':
warnings.warn(
"Unable to list directory '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
return
else:
raise
for child in childList:
try:
isfile = child.isfile()
isdir = not isfile and child.isdir()
except:
if errors == 'ignore':
continue
elif errors == 'warn':
warnings.warn(
"Unable to access '%s': %s"
% (self, sys.exc_info()[1]),
TreeWalkWarning)
continue
else:
raise
if isfile:
if pattern is None or child.fnmatch(pattern):
yield child
elif isdir:
for f in child.walkfiles(pattern, errors):
yield f
def fnmatch(self, pattern):
""" Return True if self.name matches the given pattern.
pattern - A filename pattern with wildcards,
for example '*.py'.
"""
return fnmatch.fnmatch(self.name, pattern)
def glob(self, pattern):
""" Return a list of path objects that match the pattern.
pattern - a path relative to this directory, with wildcards.
For example, path('/users').glob('*/bin/*') returns a list
of all the files users have in their bin directories.
"""
cls = self.__class__
return [cls(s) for s in glob.glob(_base(self / pattern))]
#
# --- Reading or writing an entire file at once.
def open(self, mode='r'):
""" Open this file. Return a file object. """
return open(self, mode)
def bytes(self):
""" Open this file, read all bytes, return them as a string. """
f = self.open('rb')
try:
return f.read()
finally:
f.close()
def write_bytes(self, bytes, append=False):
""" Open this file and write the given bytes to it.
Default behavior is to overwrite any existing file.
Call p.write_bytes(bytes, append=True) to append instead.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
f.write(bytes)
finally:
f.close()
def text(self, encoding=None, errors='strict'):
r""" Open this file, read it in, return the content as a string.
This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
are automatically translated to '\n'.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. If present, the content of the file is
decoded and returned as a unicode object; otherwise
it is returned as an 8-bit str.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'.
"""
if encoding is None:
# 8-bit
f = self.open(_textmode)
try:
return f.read()
finally:
f.close()
else:
# Unicode
f = codecs.open(self, 'r', encoding, errors)
# (Note - Can't use 'U' mode here, since codecs.open
# doesn't support 'U' mode, even in Python 2.3.)
try:
t = f.read()
finally:
f.close()
return (t.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
r""" Write the given text to this file.
The default behavior is to overwrite any existing file;
to append instead, use the 'append=True' keyword argument.
There are two differences between path.write_text() and
path.write_bytes(): newline handling and Unicode handling.
See below.
Parameters:
- text - str/unicode - The text to be written.
- encoding - str - The Unicode encoding that will be used.
This is ignored if 'text' isn't a Unicode string.
- errors - str - How to handle Unicode encoding errors.
Default is 'strict'. See help(unicode.encode) for the
options. This is ignored if 'text' isn't a Unicode
string.
- linesep - keyword argument - str/unicode - The sequence of
characters to be used to mark end-of-line. The default is
os.linesep. You can also specify None; this means to
leave all newlines as they are in 'text'.
- append - keyword argument - bool - Specifies what to do if
the file already exists (True: append to the end of it;
False: overwrite it.) The default is False.
--- Newline handling.
write_text() converts all standard end-of-line sequences
('\n', '\r', and '\r\n') to your platform's default end-of-line
sequence (see os.linesep; on Windows, for example, the
end-of-line marker is '\r\n').
If you don't like your platform's default, you can override it
using the 'linesep=' keyword argument. If you specifically want
write_text() to preserve the newlines as-is, use 'linesep=None'.
This applies to Unicode text the same as to 8-bit text, except
there are three additional standard Unicode end-of-line sequences:
u'\x85', u'\r\x85', and u'\u2028'.
(This is slightly different from when you open a file for
writing with fopen(filename, "w") in C or open(filename, 'w')
in Python.)
--- Unicode
If 'text' isn't Unicode, then apart from newline handling, the
bytes are written verbatim to the file. The 'encoding' and
'errors' arguments are not used and must be omitted.
If 'text' is Unicode, it is first converted to bytes using the
specified 'encoding' (or the default encoding if 'encoding'
isn't specified). The 'errors' argument applies only to this
conversion.
"""
if isinstance(text, unicode):
if linesep is not None:
# Convert all standard end-of-line sequences to
# ordinary newline characters.
text = (text.replace(u'\r\n', u'\n')
.replace(u'\r\x85', u'\n')
.replace(u'\r', u'\n')
.replace(u'\x85', u'\n')
.replace(u'\u2028', u'\n'))
text = text.replace(u'\n', linesep)
if encoding is None:
encoding = sys.getdefaultencoding()
bytes = text.encode(encoding, errors)
else:
# It is an error to specify an encoding if 'text' is
# an 8-bit string.
assert encoding is None
if linesep is not None:
text = (text.replace('\r\n', '\n')
.replace('\r', '\n'))
bytes = text.replace('\n', linesep)
self.write_bytes(bytes, append)
def lines(self, encoding=None, errors='strict', retain=True):
r""" Open this file, read all lines, return them in a list.
Optional arguments:
encoding - The Unicode encoding (or character set) of
the file. The default is None, meaning the content
of the file is read as 8-bit characters and returned
as a list of (non-Unicode) str objects.
errors - How to handle Unicode errors; see help(str.decode)
for the options. Default is 'strict'
retain - If true, retain newline characters; but all newline
character combinations ('\r', '\n', '\r\n') are
translated to '\n'. If false, newline characters are
stripped off. Default is True.
This uses 'U' mode in Python 2.3 and later.
"""
if encoding is None and retain:
f = self.open(_textmode)
try:
return f.readlines()
finally:
f.close()
else:
return self.text(encoding, errors).splitlines(retain)
def write_lines(self, lines, encoding=None, errors='strict',
linesep=os.linesep, append=False):
r""" Write the given lines of text to this file.
By default this overwrites any existing file at this path.
This puts a platform-specific newline sequence on every line.
See 'linesep' below.
lines - A list of strings.
encoding - A Unicode encoding to use. This applies only if
'lines' contains any Unicode strings.
errors - How to handle errors in Unicode encoding. This
also applies only to Unicode strings.
linesep - The desired line-ending. This line-ending is
applied to every line. If a line already has any
standard line ending ('\r', '\n', '\r\n', u'\x85',
u'\r\x85', u'\u2028'), that will be stripped off and
this will be used instead. The default is os.linesep,
which is platform-dependent ('\r\n' on Windows, '\n' on
Unix, etc.) Specify None to write the lines as-is,
like file.writelines().
Use the keyword argument append=True to append lines to the
file. The default is to overwrite the file. Warning:
When you use this with Unicode data, if the encoding of the
existing data in the file is different from the encoding
you specify with the encoding= parameter, the result is
mixed-encoding data, which can really confuse someone trying
to read the file later.
"""
if append:
mode = 'ab'
else:
mode = 'wb'
f = self.open(mode)
try:
for line in lines:
isUnicode = isinstance(line, unicode)
if linesep is not None:
# Strip off any existing line-end and add the
# specified linesep string.
if isUnicode:
if line[-2:] in (u'\r\n', u'\x0d\x85'):
line = line[:-2]
elif line[-1:] in (u'\r', u'\n',
u'\x85', u'\u2028'):
line = line[:-1]
else:
if line[-2:] == '\r\n':
line = line[:-2]
elif line[-1:] in ('\r', '\n'):
line = line[:-1]
line += linesep
if isUnicode:
if encoding is None:
encoding = sys.getdefaultencoding()
line = line.encode(encoding, errors)
f.write(line)
finally:
f.close()
def read_md5(self):
""" Calculate the md5 hash for this file.
This reads through the entire file.
"""
return self.read_hash('md5')
def _hash(self, hash_name):
f = self.open('rb')
try:
m = hashlib.new(hash_name)
while True:
d = f.read(8192)
if not d:
break
m.update(d)
return m
finally:
f.close()
def read_hash(self, hash_name):
""" Calculate given hash for this file.
List of supported hashes can be obtained from hashlib package. This
reads the entire file.
"""
return self._hash(hash_name).digest()
def read_hexhash(self, hash_name):
""" Calculate given hash for this file, returning hexdigest.
List of supported hashes can be obtained from hashlib package. This
reads the entire file.
"""
return self._hash(hash_name).hexdigest()
# --- Methods for querying the filesystem.
# N.B. On some platforms, the os.path functions may be implemented in C
# (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get
# bound. Playing it safe and wrapping them all in method calls.
def isabs(self): return os.path.isabs(self)
def exists(self): return os.path.exists(self)
def isdir(self): return os.path.isdir(self)
def isfile(self): return os.path.isfile(self)
def islink(self): return os.path.islink(self)
def ismount(self): return os.path.ismount(self)
if hasattr(os.path, 'samefile'):
def samefile(self): return os.path.samefile(self)
def getatime(self): return os.path.getatime(self)
atime = property(
getatime, None, None,
""" Last access time of the file. """)
def getmtime(self): return os.path.getmtime(self)
mtime = property(
getmtime, None, None,
""" Last-modified time of the file. """)
if hasattr(os.path, 'getctime'):
def getctime(self): return os.path.getctime(self)
ctime = property(
getctime, None, None,
""" Creation time of the file. """)
def getsize(self): return os.path.getsize(self)
size = property(
getsize, None, None,
""" Size of the file, in bytes. """)
if hasattr(os, 'access'):
def access(self, mode):
""" Return true if current user has access to this path.
mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
"""
return os.access(self, mode)
def stat(self):
""" Perform a stat() system call on this path. """
return os.stat(self)
def lstat(self):
""" Like path.stat(), but do not follow symbolic links. """
return os.lstat(self)
def get_owner(self):
r""" Return the name of the owner of this file or directory.
This follows symbolic links.
On Windows, this returns a name of the form ur'DOMAIN\User Name'.
On Windows, a group can own a file or directory.
"""
if os.name == 'nt':
if win32security is None:
raise Exception("path.owner requires win32all to be installed")
desc = win32security.GetFileSecurity(
self, win32security.OWNER_SECURITY_INFORMATION)
sid = desc.GetSecurityDescriptorOwner()
account, domain, typecode = win32security.LookupAccountSid(None, sid)
return domain + u'\\' + account
else:
if pwd is None:
raise NotImplementedError("path.owner is not implemented on this platform.")
st = self.stat()
return pwd.getpwuid(st.st_uid).pw_name
owner = property(
get_owner, None, None,
""" Name of the owner of this file or directory. """)
if hasattr(os, 'statvfs'):
def statvfs(self):
""" Perform a statvfs() system call on this path. """
return os.statvfs(self)
if hasattr(os, 'pathconf'):
def pathconf(self, name):
return os.pathconf(self, name)
#
# --- Modifying operations on files and directories
def utime(self, times):
""" Set the access and modified times of this file. """
os.utime(self, times)
def chmod(self, mode):
os.chmod(self, mode)
if hasattr(os, 'chown'):
def chown(self, uid, gid):
os.chown(self, uid, gid)
def rename(self, new):
os.rename(self, new)
def renames(self, new):
os.renames(self, new)
#
# --- Create/delete operations on directories
def mkdir(self, mode=0777):
os.mkdir(self, mode)
def mkdir_p(self, mode=0777):
try:
self.mkdir(mode)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def makedirs(self, mode=0777):
os.makedirs(self, mode)
def makedirs_p(self, mode=0777):
try:
self.makedirs(mode)
except OSError, e:
if e.errno != errno.EEXIST:
raise
def rmdir(self):
os.rmdir(self)
def rmdir_p(self):
try:
self.rmdir()
except OSError, e:
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
def removedirs(self):
os.removedirs(self)
def removedirs_p(self):
try:
self.removedirs()
except OSError, e:
if e.errno != errno.ENOTEMPTY and e.errno != errno.EEXIST:
raise
# --- Modifying operations on files
def touch(self):
""" Set the access/modified times of this file to the current time.
Create the file if it does not exist.
"""
fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
os.close(fd)
os.utime(self, None)
def remove(self):
os.remove(self)
def remove_p(self):
try:
self.unlink()
except OSError, e:
if e.errno != errno.ENOENT:
raise
def unlink(self):
os.unlink(self)
def unlink_p(self):
self.remove_p()
# --- Links
if hasattr(os, 'link'):
def link(self, newpath):
""" Create a hard link at 'newpath', pointing to this file. """
os.link(self, newpath)
if hasattr(os, 'symlink'):
def symlink(self, newlink):
""" Create a symbolic link at 'newlink', pointing here. """
os.symlink(self, newlink)
if hasattr(os, 'readlink'):
def readlink(self):
""" Return the path to which this symbolic link points.
The result may be an absolute or a relative path.
"""
return self.__class__(os.readlink(self))
def readlinkabs(self):
""" Return the path to which this symbolic link points.
The result is always an absolute path.
"""
p = self.readlink()
if p.isabs():
return p
else:
return (self.parent / p).abspath()
#
# --- High-level functions from shutil
copyfile = shutil.copyfile
copymode = shutil.copymode
copystat = shutil.copystat
copy = shutil.copy
copy2 = shutil.copy2
copytree = shutil.copytree
if hasattr(shutil, 'move'):
move = shutil.move
rmtree = shutil.rmtree
def rmtree_p(self):
try:
self.rmtree()
except OSError, e:
if e.errno != errno.ENOENT:
raise
#
# --- Special stuff from os
if hasattr(os, 'chroot'):
def chroot(self):
os.chroot(self)
if hasattr(os, 'startfile'):
def startfile(self):
os.startfile(self)
|
kaiserroll14/301finalproject
|
refs/heads/master
|
main/pandas/io/wb.py
|
9
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from pandas.compat import map, reduce, range, lrange
from pandas.io.common import urlopen
from pandas.io import json
import pandas
import numpy as np
import warnings
warnings.warn("\n"
"The pandas.io.wb module is moved to a separate package "
"(pandas-datareader) and will be removed from pandas in a "
"future version.\nAfter installing the pandas-datareader package "
"(https://github.com/pydata/pandas-datareader), you can change "
"the import ``from pandas.io import data, wb`` to "
"``from pandas_datareader import data, wb``.",
FutureWarning)
# This list of country codes was pulled from wikipedia during October 2014.
# While some exceptions do exist, it is the best proxy for countries supported
# by World Bank. It is an aggregation of the 2-digit ISO 3166-1 alpha-2, and
# 3-digit ISO 3166-1 alpha-3, codes, with 'all', 'ALL', and 'All' appended ot
# the end.
country_codes = ['AD', 'AE', 'AF', 'AG', 'AI', 'AL', 'AM', 'AO', 'AQ', 'AR', \
'AS', 'AT', 'AU', 'AW', 'AX', 'AZ', 'BA', 'BB', 'BD', 'BE', \
'BF', 'BG', 'BH', 'BI', 'BJ', 'BL', 'BM', 'BN', 'BO', 'BQ', \
'BR', 'BS', 'BT', 'BV', 'BW', 'BY', 'BZ', 'CA', 'CC', 'CD', \
'CF', 'CG', 'CH', 'CI', 'CK', 'CL', 'CM', 'CN', 'CO', 'CR', \
'CU', 'CV', 'CW', 'CX', 'CY', 'CZ', 'DE', 'DJ', 'DK', 'DM', \
'DO', 'DZ', 'EC', 'EE', 'EG', 'EH', 'ER', 'ES', 'ET', 'FI', \
'FJ', 'FK', 'FM', 'FO', 'FR', 'GA', 'GB', 'GD', 'GE', 'GF', \
'GG', 'GH', 'GI', 'GL', 'GM', 'GN', 'GP', 'GQ', 'GR', 'GS', \
'GT', 'GU', 'GW', 'GY', 'HK', 'HM', 'HN', 'HR', 'HT', 'HU', \
'ID', 'IE', 'IL', 'IM', 'IN', 'IO', 'IQ', 'IR', 'IS', 'IT', \
'JE', 'JM', 'JO', 'JP', 'KE', 'KG', 'KH', 'KI', 'KM', 'KN', \
'KP', 'KR', 'KW', 'KY', 'KZ', 'LA', 'LB', 'LC', 'LI', 'LK', \
'LR', 'LS', 'LT', 'LU', 'LV', 'LY', 'MA', 'MC', 'MD', 'ME', \
'MF', 'MG', 'MH', 'MK', 'ML', 'MM', 'MN', 'MO', 'MP', 'MQ', \
'MR', 'MS', 'MT', 'MU', 'MV', 'MW', 'MX', 'MY', 'MZ', 'NA', \
'NC', 'NE', 'NF', 'NG', 'NI', 'NL', 'NO', 'NP', 'NR', 'NU', \
'NZ', 'OM', 'PA', 'PE', 'PF', 'PG', 'PH', 'PK', 'PL', 'PM', \
'PN', 'PR', 'PS', 'PT', 'PW', 'PY', 'QA', 'RE', 'RO', 'RS', \
'RU', 'RW', 'SA', 'SB', 'SC', 'SD', 'SE', 'SG', 'SH', 'SI', \
'SJ', 'SK', 'SL', 'SM', 'SN', 'SO', 'SR', 'SS', 'ST', 'SV', \
'SX', 'SY', 'SZ', 'TC', 'TD', 'TF', 'TG', 'TH', 'TJ', 'TK', \
'TL', 'TM', 'TN', 'TO', 'TR', 'TT', 'TV', 'TW', 'TZ', 'UA', \
'UG', 'UM', 'US', 'UY', 'UZ', 'VA', 'VC', 'VE', 'VG', 'VI', \
'VN', 'VU', 'WF', 'WS', 'YE', 'YT', 'ZA', 'ZM', 'ZW', \
'ABW', 'AFG', 'AGO', 'AIA', 'ALA', 'ALB', 'AND', 'ARE', \
'ARG', 'ARM', 'ASM', 'ATA', 'ATF', 'ATG', 'AUS', 'AUT', \
'AZE', 'BDI', 'BEL', 'BEN', 'BES', 'BFA', 'BGD', 'BGR', \
'BHR', 'BHS', 'BIH', 'BLM', 'BLR', 'BLZ', 'BMU', 'BOL', \
'BRA', 'BRB', 'BRN', 'BTN', 'BVT', 'BWA', 'CAF', 'CAN', \
'CCK', 'CHE', 'CHL', 'CHN', 'CIV', 'CMR', 'COD', 'COG', \
'COK', 'COL', 'COM', 'CPV', 'CRI', 'CUB', 'CUW', 'CXR', \
'CYM', 'CYP', 'CZE', 'DEU', 'DJI', 'DMA', 'DNK', 'DOM', \
'DZA', 'ECU', 'EGY', 'ERI', 'ESH', 'ESP', 'EST', 'ETH', \
'FIN', 'FJI', 'FLK', 'FRA', 'FRO', 'FSM', 'GAB', 'GBR', \
'GEO', 'GGY', 'GHA', 'GIB', 'GIN', 'GLP', 'GMB', 'GNB', \
'GNQ', 'GRC', 'GRD', 'GRL', 'GTM', 'GUF', 'GUM', 'GUY', \
'HKG', 'HMD', 'HND', 'HRV', 'HTI', 'HUN', 'IDN', 'IMN', \
'IND', 'IOT', 'IRL', 'IRN', 'IRQ', 'ISL', 'ISR', 'ITA', \
'JAM', 'JEY', 'JOR', 'JPN', 'KAZ', 'KEN', 'KGZ', 'KHM', \
'KIR', 'KNA', 'KOR', 'KWT', 'LAO', 'LBN', 'LBR', 'LBY', \
'LCA', 'LIE', 'LKA', 'LSO', 'LTU', 'LUX', 'LVA', 'MAC', \
'MAF', 'MAR', 'MCO', 'MDA', 'MDG', 'MDV', 'MEX', 'MHL', \
'MKD', 'MLI', 'MLT', 'MMR', 'MNE', 'MNG', 'MNP', 'MOZ', \
'MRT', 'MSR', 'MTQ', 'MUS', 'MWI', 'MYS', 'MYT', 'NAM', \
'NCL', 'NER', 'NFK', 'NGA', 'NIC', 'NIU', 'NLD', 'NOR', \
'NPL', 'NRU', 'NZL', 'OMN', 'PAK', 'PAN', 'PCN', 'PER', \
'PHL', 'PLW', 'PNG', 'POL', 'PRI', 'PRK', 'PRT', 'PRY', \
'PSE', 'PYF', 'QAT', 'REU', 'ROU', 'RUS', 'RWA', 'SAU', \
'SDN', 'SEN', 'SGP', 'SGS', 'SHN', 'SJM', 'SLB', 'SLE', \
'SLV', 'SMR', 'SOM', 'SPM', 'SRB', 'SSD', 'STP', 'SUR', \
'SVK', 'SVN', 'SWE', 'SWZ', 'SXM', 'SYC', 'SYR', 'TCA', \
'TCD', 'TGO', 'THA', 'TJK', 'TKL', 'TKM', 'TLS', 'TON', \
'TTO', 'TUN', 'TUR', 'TUV', 'TWN', 'TZA', 'UGA', 'UKR', \
'UMI', 'URY', 'USA', 'UZB', 'VAT', 'VCT', 'VEN', 'VGB', \
'VIR', 'VNM', 'VUT', 'WLF', 'WSM', 'YEM', 'ZAF', 'ZMB', \
'ZWE', 'all', 'ALL', 'All']
def download(country=['MX', 'CA', 'US'], indicator=['NY.GDP.MKTP.CD', 'NY.GNS.ICTR.ZS'],
start=2003, end=2005,errors='warn'):
"""
Download data series from the World Bank's World Development Indicators
Parameters
----------
indicator: string or list of strings
taken from the ``id`` field in ``WDIsearch()``
country: string or list of strings.
``all`` downloads data for all countries
2 or 3 character ISO country codes select individual
countries (e.g.``US``,``CA``) or (e.g.``USA``,``CAN``). The codes
can be mixed.
The two ISO lists of countries, provided by wikipedia, are hardcoded
into pandas as of 11/10/2014.
start: int
First year of the data series
end: int
Last year of the data series (inclusive)
errors: str {'ignore', 'warn', 'raise'}, default 'warn'
Country codes are validated against a hardcoded list. This controls
the outcome of that validation, and attempts to also apply
to the results from world bank.
errors='raise', will raise a ValueError on a bad country code.
Returns
-------
``pandas`` DataFrame with columns: country, iso_code, year,
indicator value.
"""
if type(country) == str:
country = [country]
bad_countries = np.setdiff1d(country, country_codes)
# Validate the input
if len(bad_countries) > 0:
tmp = ", ".join(bad_countries)
if errors == 'raise':
raise ValueError("Invalid Country Code(s): %s" % tmp)
if errors == 'warn':
warnings.warn('Non-standard ISO country codes: %s' % tmp)
# Work with a list of indicators
if type(indicator) == str:
indicator = [indicator]
# Download
data = []
bad_indicators = {}
for ind in indicator:
one_indicator_data,msg = _get_data(ind, country, start, end)
if msg == "Success":
data.append(one_indicator_data)
else:
bad_indicators[ind] = msg
if len(bad_indicators.keys()) > 0:
bad_ind_msgs = [i + " : " + m for i,m in bad_indicators.items()]
bad_ind_msgs = "\n\n".join(bad_ind_msgs)
bad_ind_msgs = "\n\nInvalid Indicators:\n\n%s" % bad_ind_msgs
if errors == 'raise':
raise ValueError(bad_ind_msgs)
if errors == 'warn':
warnings.warn(bad_ind_msgs)
# Confirm we actually got some data, and build Dataframe
if len(data) > 0:
out = reduce(lambda x, y: x.merge(y, how='outer'), data)
out = out.drop('iso_code', axis=1)
out = out.set_index(['country', 'year'])
out = out._convert(datetime=True, numeric=True)
return out
else:
msg = "No indicators returned data."
if errors == 'ignore':
msg += " Set errors='warn' for more information."
raise ValueError(msg)
def _get_data(indicator="NY.GNS.ICTR.GN.ZS", country='US',
start=2002, end=2005):
if type(country) == str:
country = [country]
countries = ';'.join(country)
# Build URL for api call
url = ("http://api.worldbank.org/countries/" + countries + "/indicators/" +
indicator + "?date=" + str(start) + ":" + str(end) +
"&per_page=25000&format=json")
# Download
with urlopen(url) as response:
data = response.read()
# Check to see if there is a possible problem
possible_message = json.loads(data)[0]
if 'message' in possible_message.keys():
msg = possible_message['message'][0]
try:
msg = msg['key'].split() + ["\n "] + msg['value'].split()
wb_err = ' '.join(msg)
except:
wb_err = ""
if 'key' in msg.keys():
wb_err = msg['key'] + "\n "
if 'value' in msg.keys():
wb_err += msg['value']
error_msg = "Problem with a World Bank Query \n %s"
return None, error_msg % wb_err
if 'total' in possible_message.keys():
if possible_message['total'] == 0:
return None, "No results from world bank."
# Parse JSON file
data = json.loads(data)[1]
country = [x['country']['value'] for x in data]
iso_code = [x['country']['id'] for x in data]
year = [x['date'] for x in data]
value = [x['value'] for x in data]
# Prepare output
out = pandas.DataFrame([country, iso_code, year, value]).T
out.columns = ['country', 'iso_code', 'year', indicator]
return out,"Success"
def get_countries():
'''Query information about countries
'''
url = 'http://api.worldbank.org/countries/?per_page=1000&format=json'
with urlopen(url) as response:
data = response.read()
data = json.loads(data)[1]
data = pandas.DataFrame(data)
data.adminregion = [x['value'] for x in data.adminregion]
data.incomeLevel = [x['value'] for x in data.incomeLevel]
data.lendingType = [x['value'] for x in data.lendingType]
data.region = [x['value'] for x in data.region]
data = data.rename(columns={'id': 'iso3c', 'iso2Code': 'iso2c'})
return data
def get_indicators():
'''Download information about all World Bank data series
'''
url = 'http://api.worldbank.org/indicators?per_page=50000&format=json'
with urlopen(url) as response:
data = response.read()
data = json.loads(data)[1]
data = pandas.DataFrame(data)
# Clean fields
data.source = [x['value'] for x in data.source]
fun = lambda x: x.encode('ascii', 'ignore')
data.sourceOrganization = data.sourceOrganization.apply(fun)
# Clean topic field
def get_value(x):
try:
return x['value']
except:
return ''
fun = lambda x: [get_value(y) for y in x]
data.topics = data.topics.apply(fun)
data.topics = data.topics.apply(lambda x: ' ; '.join(x))
# Clean outpu
data = data.sort(columns='id')
data.index = pandas.Index(lrange(data.shape[0]))
return data
_cached_series = None
def search(string='gdp.*capi', field='name', case=False):
"""
Search available data series from the world bank
Parameters
----------
string: string
regular expression
field: string
id, name, source, sourceNote, sourceOrganization, topics
See notes below
case: bool
case sensitive search?
Notes
-----
The first time this function is run it will download and cache the full
list of available series. Depending on the speed of your network
connection, this can take time. Subsequent searches will use the cached
copy, so they should be much faster.
id : Data series indicator (for use with the ``indicator`` argument of
``WDI()``) e.g. NY.GNS.ICTR.GN.ZS"
name: Short description of the data series
source: Data collection project
sourceOrganization: Data collection organization
note:
sourceNote:
topics:
"""
# Create cached list of series if it does not exist
global _cached_series
if type(_cached_series) is not pandas.core.frame.DataFrame:
_cached_series = get_indicators()
data = _cached_series[field]
idx = data.str.contains(string, case=case)
out = _cached_series.ix[idx].dropna()
return out
|
glebysg/GC_server
|
refs/heads/master
|
gestureclean/score_statistics.py
|
1
|
import sys
from vacs.models import Command, Experiment, Vac, Evaluation,\
Assignment, Participant, Score, ValAssignment, Validation
from django.contrib.auth import get_user_model
import csv
import numpy as np
# Get all the Scores for the experiment
experiment_id = 77
scores = Score.objects.filter(experiment__id=77)
vacs = Vac.objects.filter(experiment__id=77)
commands = Command.objects.all()
lexicon_index=8
l7_scores = scores.filter(lexicon_number=lexicon_index)
write_data = [["Command", "Score"]]
with open('gestureclean/analytics/best_lexicon.csv', 'w') as filewriter:
writer = csv.writer(filewriter)
counter = 0.0
over_threshold = 0.0
for command in commands:
command_scores = l7_scores.filter(command=command)
score_mean = round(np.mean([1-s.score if (s.vac.name == "Complexity" or s.vac.name == "Amount of movement") else s.score
for s in command_scores]),2)
if score_mean > 0.5:
over_threshold +=1
write_data.append([command.name,score_mean])
counter += 1
write_data.append(["Commands over threshold", str((over_threshold/counter)*100)+"%"])
writer.writerows(write_data)
fid = fopen('myfile.txt','w');
for i = 1:length(fileList)
fprintf(fid,'%s\n',fileList{i});
end
fclose(fid);
|
mhrivnak/pulp
|
refs/heads/master
|
client_lib/test/data/extensions_loader_tests/valid_set/ext2/pulp_cli.py
|
3
|
# -*- coding: utf-8 -*-
#
# Copyright © 2012 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
from pulp.client.extensions.extensions import PulpCliSection
def initialize(context):
section = PulpCliSection('section-2', 'Section 2')
context.cli.add_section(section)
|
fernandog/Medusa
|
refs/heads/optimized
|
lib/rtorrent/err.py
|
182
|
# Copyright (c) 2013 Chris Lucas, <chris@chrisjlucas.com>
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from rtorrent.common import convert_version_tuple_to_str
class RTorrentVersionError(Exception):
def __init__(self, min_version, cur_version):
self.min_version = min_version
self.cur_version = cur_version
self.msg = "Minimum version required: {0}".format(
convert_version_tuple_to_str(min_version))
def __str__(self):
return(self.msg)
class MethodError(Exception):
def __init__(self, msg):
self.msg = msg
def __str__(self):
return(self.msg)
|
FHannes/intellij-community
|
refs/heads/master
|
python/testData/inspections/PyMethodMayBeStaticInspection/staticMethod.py
|
83
|
__author__ = 'ktisha'
class Foo(object):
@staticmethod
def foo(param): # <-method here should not be highlighted
return "foo"
|
nijel/weblate
|
refs/heads/main
|
weblate/trans/migrations/0103_update_source_unit.py
|
2
|
# Generated by Django 3.0.7 on 2020-09-15 10:30
from django.db import migrations
from django.db.models import F
def update_source_unit(apps, schema_editor):
Unit = apps.get_model("trans", "Unit")
Project = apps.get_model("trans", "Project")
db_alias = schema_editor.connection.alias
source_units = Unit.objects.using(db_alias).filter(
translation__language=F("translation__component__source_language")
)
total = source_units.count()
processed = 0
for project in Project.objects.using(db_alias).iterator():
has_labels = project.label_set.exists()
for source in source_units.filter(
translation__component__project=project
).iterator():
processed += 1
if processed % 1000 == 0:
percent = int(100 * processed / total)
print(f"Updating source units {percent}% [{processed}/{total}]...")
# Filter matching translation units
translations = (
Unit.objects.using(db_alias)
.filter(
translation__component=source.translation.component,
id_hash=source.id_hash,
)
.exclude(pk=source.pk)
)
# Update source_unit attribute and wipe extra_flags and explanation
update = {"source_unit": source}
if source.extra_flags:
update["extra_flags"] = ""
if source.explanation:
update["explanation"] = ""
translations.update(**update)
# Wipe labels link to translations
if has_labels and source.labels.exists():
Unit.labels.through.objects.using(db_alias).filter(
unit__in=translations
).delete()
if total:
print(f"Updating source units completed [{processed}/{total}]")
class Migration(migrations.Migration):
dependencies = [
("trans", "0102_unit_source_unit"),
]
operations = [migrations.RunPython(update_source_unit, elidable=True)]
|
zyq001/ryu
|
refs/heads/master
|
ryu/tests/switch/run_mininet.py
|
19
|
#!/usr/bin/env python
import sys
from mininet.cli import CLI
from mininet.link import Link
from mininet.net import Mininet
from mininet.node import RemoteController
from mininet.node import OVSSwitch
from mininet.node import UserSwitch
from mininet.term import makeTerm
from oslo_config import cfg
from ryu import version
if '__main__' == __name__:
opts = [
cfg.StrOpt('switch', default='ovs',
help='test switch (ovs|ovs13|ovs14|cpqd)')
]
conf = cfg.ConfigOpts()
conf.register_cli_opts(opts)
conf(project='ryu', version='run_mininet.py %s' % version)
conf(sys.argv[1:])
switch_type = {'ovs': OVSSwitch, 'ovs13': OVSSwitch,
'ovs14': OVSSwitch, 'cpqd': UserSwitch}
switch = switch_type.get(conf.switch)
if switch is None:
raise ValueError('Invalid switch type. [%s]', conf.switch)
net = Mininet(switch=switch, controller=RemoteController)
c0 = net.addController('c0')
s1 = net.addSwitch('s1')
s2 = net.addSwitch('s2')
Link(s1, s2)
Link(s1, s2)
Link(s1, s2)
net.build()
c0.start()
s1.start([c0])
s2.start([c0])
if conf.switch in ['ovs', 'ovs13']:
s1.cmd('ovs-vsctl set Bridge s1 protocols=OpenFlow13')
s2.cmd('ovs-vsctl set Bridge s2 protocols=OpenFlow13')
elif conf.switch == 'ovs14':
s1.cmd('ovs-vsctl set Bridge s1 protocols=OpenFlow14')
s2.cmd('ovs-vsctl set Bridge s2 protocols=OpenFlow14')
CLI(net)
net.stop()
|
googleapis/googleapis-gen
|
refs/heads/master
|
google/ads/googleads/v6/googleads-py/google/ads/googleads/v6/enums/types/billing_setup_status.py
|
1
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package='google.ads.googleads.v6.enums',
marshal='google.ads.googleads.v6',
manifest={
'BillingSetupStatusEnum',
},
)
class BillingSetupStatusEnum(proto.Message):
r"""Message describing BillingSetup statuses. """
class BillingSetupStatus(proto.Enum):
r"""The possible statuses of a BillingSetup."""
UNSPECIFIED = 0
UNKNOWN = 1
PENDING = 2
APPROVED_HELD = 3
APPROVED = 4
CANCELLED = 5
__all__ = tuple(sorted(__protobuf__.manifest))
|
cetic/ansible
|
refs/heads/devel
|
test/units/modules/remote_management/hpe/test_oneview_fc_network.py
|
16
|
# -*- coding: utf-8 -*-
#
# Copyright (2016-2017) Hewlett Packard Enterprise Development LP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from ansible.compat.tests import unittest
from oneview_module_loader import FcNetworkModule
from hpe_test_utils import OneViewBaseTestCase
FAKE_MSG_ERROR = 'Fake message error'
DEFAULT_FC_NETWORK_TEMPLATE = dict(
name='New FC Network 2',
autoLoginRedistribution=True,
fabricType='FabricAttach'
)
PARAMS_FOR_PRESENT = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'])
)
PARAMS_WITH_CHANGES = dict(
config='config.json',
state='present',
data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'],
newName="New Name",
fabricType='DirectAttach')
)
PARAMS_FOR_ABSENT = dict(
config='config.json',
state='absent',
data=dict(name=DEFAULT_FC_NETWORK_TEMPLATE['name'])
)
class FcNetworkModuleSpec(unittest.TestCase,
OneViewBaseTestCase):
"""
OneViewBaseTestCase provides the mocks used in this test case
"""
def setUp(self):
self.configure_mocks(self, FcNetworkModule)
self.resource = self.mock_ov_client.fc_networks
def test_should_create_new_fc_network(self):
self.resource.get_by.return_value = []
self.resource.create.return_value = DEFAULT_FC_NETWORK_TEMPLATE
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
FcNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=FcNetworkModule.MSG_CREATED,
ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE)
)
def test_should_not_update_when_data_is_equals(self):
self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
self.mock_ansible_module.params = PARAMS_FOR_PRESENT
FcNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=FcNetworkModule.MSG_ALREADY_PRESENT,
ansible_facts=dict(fc_network=DEFAULT_FC_NETWORK_TEMPLATE)
)
def test_update_when_data_has_modified_attributes(self):
data_merged = DEFAULT_FC_NETWORK_TEMPLATE.copy()
data_merged['fabricType'] = 'DirectAttach'
self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
self.resource.update.return_value = data_merged
self.mock_ansible_module.params = PARAMS_WITH_CHANGES
FcNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=FcNetworkModule.MSG_UPDATED,
ansible_facts=dict(fc_network=data_merged)
)
def test_should_remove_fc_network(self):
self.resource.get_by.return_value = [DEFAULT_FC_NETWORK_TEMPLATE]
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
FcNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
msg=FcNetworkModule.MSG_DELETED
)
def test_should_do_nothing_when_fc_network_not_exist(self):
self.resource.get_by.return_value = []
self.mock_ansible_module.params = PARAMS_FOR_ABSENT
FcNetworkModule().run()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
msg=FcNetworkModule.MSG_ALREADY_ABSENT
)
def test_update_scopes_when_different(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = DEFAULT_FC_NETWORK_TEMPLATE.copy()
resource_data['scopeUris'] = ['fake']
resource_data['uri'] = 'rest/fc/fake'
self.resource.get_by.return_value = [resource_data]
patch_return = resource_data.copy()
patch_return['scopeUris'] = ['test']
self.resource.patch.return_value = patch_return
FcNetworkModule().run()
self.resource.patch.assert_called_once_with('rest/fc/fake',
operation='replace',
path='/scopeUris',
value=['test'])
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=True,
ansible_facts=dict(fc_network=patch_return),
msg=FcNetworkModule.MSG_UPDATED
)
def test_should_do_nothing_when_scopes_are_the_same(self):
params_to_scope = PARAMS_FOR_PRESENT.copy()
params_to_scope['data']['scopeUris'] = ['test']
self.mock_ansible_module.params = params_to_scope
resource_data = DEFAULT_FC_NETWORK_TEMPLATE.copy()
resource_data['scopeUris'] = ['test']
self.resource.get_by.return_value = [resource_data]
FcNetworkModule().run()
self.resource.patch.not_been_called()
self.mock_ansible_module.exit_json.assert_called_once_with(
changed=False,
ansible_facts=dict(fc_network=resource_data),
msg=FcNetworkModule.MSG_ALREADY_PRESENT
)
|
temnoregg/django-helpdesk
|
refs/heads/master
|
helpdesk/admin.py
|
3
|
from django.contrib import admin
from django.contrib.auth import get_user_model
from django.contrib.auth.admin import UserAdmin
from helpdesk.models import Queue, Ticket, FollowUp, PreSetReply, KBCategory
from helpdesk.models import EscalationExclusion, EmailTemplate, KBItem
from helpdesk.models import TicketChange, Attachment, IgnoreEmail
from helpdesk.models import CustomField
from helpdesk.models import QueueMembership
from helpdesk import settings as helpdesk_settings
class QueueAdmin(admin.ModelAdmin):
list_display = ('title', 'slug', 'email_address', 'locale')
class TicketAdmin(admin.ModelAdmin):
list_display = ('title', 'status', 'assigned_to', 'submitter_email',)
date_hierarchy = 'created'
list_filter = ('assigned_to', 'status', )
class TicketChangeInline(admin.StackedInline):
model = TicketChange
class AttachmentInline(admin.StackedInline):
model = Attachment
class FollowUpAdmin(admin.ModelAdmin):
inlines = [TicketChangeInline, AttachmentInline]
class KBItemAdmin(admin.ModelAdmin):
list_display = ('category', 'title', 'last_updated',)
list_display_links = ('title',)
class CustomFieldAdmin(admin.ModelAdmin):
list_display = ('name', 'label', 'data_type')
class EmailTemplateAdmin(admin.ModelAdmin):
list_display = ('template_name', 'heading', 'locale')
list_filter = ('locale', )
class QueueMembershipInline(admin.StackedInline):
model = QueueMembership
class UserAdminWithQueueMemberships(UserAdmin):
def change_view(self, request, object_id, form_url='', extra_context=None):
self.inlines = (QueueMembershipInline,)
return super(UserAdminWithQueueMemberships, self).change_view(
request, object_id, form_url=form_url, extra_context=extra_context)
admin.site.register(Ticket, TicketAdmin)
admin.site.register(Queue, QueueAdmin)
admin.site.register(FollowUp, FollowUpAdmin)
admin.site.register(PreSetReply)
admin.site.register(EscalationExclusion)
admin.site.register(EmailTemplate, EmailTemplateAdmin)
admin.site.register(KBCategory)
admin.site.register(KBItem, KBItemAdmin)
admin.site.register(IgnoreEmail)
admin.site.register(CustomField, CustomFieldAdmin)
if helpdesk_settings.HELPDESK_ENABLE_PER_QUEUE_STAFF_MEMBERSHIP:
admin.site.unregister(get_user_model())
admin.site.register(get_user_model(), UserAdminWithQueueMemberships)
|
katiecheng/Bombolone
|
refs/heads/master
|
env/lib/python2.7/site-packages/Crypto/Cipher/XOR.py
|
126
|
# -*- coding: utf-8 -*-
#
# Cipher/XOR.py : XOR
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
"""XOR toy cipher
XOR is one the simplest stream ciphers. Encryption and decryption are
performed by XOR-ing data with a keystream made by contatenating
the key.
Do not use it for real applications!
:undocumented: __revision__, __package__
"""
__revision__ = "$Id$"
from Crypto.Cipher import _XOR
class XORCipher:
"""XOR cipher object"""
def __init__(self, key, *args, **kwargs):
"""Initialize a XOR cipher object
See also `new()` at the module level."""
self._cipher = _XOR.new(key, *args, **kwargs)
self.block_size = self._cipher.block_size
self.key_size = self._cipher.key_size
def encrypt(self, plaintext):
"""Encrypt a piece of data.
:Parameters:
plaintext : byte string
The piece of data to encrypt. It can be of any size.
:Return: the encrypted data (byte string, as long as the
plaintext).
"""
return self._cipher.encrypt(plaintext)
def decrypt(self, ciphertext):
"""Decrypt a piece of data.
:Parameters:
ciphertext : byte string
The piece of data to decrypt. It can be of any size.
:Return: the decrypted data (byte string, as long as the
ciphertext).
"""
return self._cipher.decrypt(ciphertext)
def new(key, *args, **kwargs):
"""Create a new XOR cipher
:Parameters:
key : byte string
The secret key to use in the symmetric cipher.
Its length may vary from 1 to 32 bytes.
:Return: an `XORCipher` object
"""
return XORCipher(key, *args, **kwargs)
#: Size of a data block (in bytes)
block_size = 1
#: Size of a key (in bytes)
key_size = xrange(1,32+1)
|
kinow-io/kinow-python-sdk
|
refs/heads/master
|
kinow_client/apis/o_auth_api.py
|
1
|
# coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.41
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class OAuthApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_token(self, client_id, client_secret, **kwargs):
"""
Get authentication token
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_token(client_id, client_secret, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str client_id: Client Id given by your back office (required)
:param str client_secret: Client secret given by your back office (required)
:return: OAuthToken
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_token_with_http_info(client_id, client_secret, **kwargs)
else:
(data) = self.get_token_with_http_info(client_id, client_secret, **kwargs)
return data
def get_token_with_http_info(self, client_id, client_secret, **kwargs):
"""
Get authentication token
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_token_with_http_info(client_id, client_secret, callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param str client_id: Client Id given by your back office (required)
:param str client_secret: Client secret given by your back office (required)
:return: OAuthToken
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['client_id', 'client_secret']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_token" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'client_id' is set
if ('client_id' not in params) or (params['client_id'] is None):
raise ValueError("Missing the required parameter `client_id` when calling `get_token`")
# verify the required parameter 'client_secret' is set
if ('client_secret' not in params) or (params['client_secret'] is None):
raise ValueError("Missing the required parameter `client_secret` when calling `get_token`")
collection_formats = {}
resource_path = '/get-token'.replace('{format}', 'json')
path_params = {}
query_params = {}
header_params = {}
form_params = []
local_var_files = {}
if 'client_id' in params:
form_params.append(('client_id', params['client_id']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
if 'client_secret' in params:
form_params.append(('client_secret', params['client_secret']))
self.api_client.set_default_header('Content-Type', 'application/x-www-form-urlencoded')
body_params = None
# Authentication setting
auth_settings = []
return self.api_client.call_api(resource_path, 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='OAuthToken',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
|
danakj/chromium
|
refs/heads/master
|
chrome/tools/build/win/resedit.py
|
13
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A utility script that can extract and edit resources in a Windows binary.
For detailed help, see the script's usage by invoking it with --help."""
import ctypes
import ctypes.wintypes
import logging
import optparse
import os
import shutil
import sys
import tempfile
import win32api
import win32con
_LOGGER = logging.getLogger(__name__)
# The win32api-supplied UpdateResource wrapper unfortunately does not allow
# one to remove resources due to overzealous parameter verification.
# For that case we're forced to go straight to the native API implementation.
UpdateResource = ctypes.windll.kernel32.UpdateResourceW
UpdateResource.argtypes = [
ctypes.wintypes.HANDLE, # HANDLE hUpdate
ctypes.c_wchar_p, # LPCTSTR lpType
ctypes.c_wchar_p, # LPCTSTR lpName
ctypes.c_short, # WORD wLanguage
ctypes.c_void_p, # LPVOID lpData
ctypes.c_ulong, # DWORD cbData
]
UpdateResource.restype = ctypes.c_short
def _ResIdToString(res_id):
# Convert integral res types/ids to a string.
if isinstance(res_id, int):
return "#%d" % res_id
return res_id
class ResourceEditor(object):
"""A utility class to make it easy to extract and manipulate resources in a
Windows binary."""
def __init__(self, input_file, output_file):
"""Create a new editor.
Args:
input_file: path to the input file.
output_file: (optional) path to the output file.
"""
self._input_file = input_file
self._output_file = output_file
self._modified = False
self._module = None
self._temp_dir = None
self._temp_file = None
self._update_handle = None
def __del__(self):
if self._module:
win32api.FreeLibrary(self._module)
self._module = None
if self._update_handle:
_LOGGER.info('Canceling edits to "%s".', self.input_file)
win32api.EndUpdateResource(self._update_handle, False)
self._update_handle = None
if self._temp_dir:
_LOGGER.info('Removing temporary directory "%s".', self._temp_dir)
shutil.rmtree(self._temp_dir)
self._temp_dir = None
def _GetModule(self):
if not self._module:
# Specify a full path to LoadLibraryEx to prevent
# it from searching the path.
input_file = os.path.abspath(self.input_file)
_LOGGER.info('Loading input_file from "%s"', input_file)
self._module = win32api.LoadLibraryEx(
input_file, None, win32con.LOAD_LIBRARY_AS_DATAFILE)
return self._module
def _GetTempDir(self):
if not self._temp_dir:
self._temp_dir = tempfile.mkdtemp()
_LOGGER.info('Created temporary directory "%s".', self._temp_dir)
return self._temp_dir
def _GetUpdateHandle(self):
if not self._update_handle:
# Make a copy of the input file in the temp dir.
self._temp_file = os.path.join(self.temp_dir,
os.path.basename(self._input_file))
shutil.copyfile(self._input_file, self._temp_file)
# Open a resource update handle on the copy.
_LOGGER.info('Opening temp file "%s".', self._temp_file)
self._update_handle = win32api.BeginUpdateResource(self._temp_file, False)
return self._update_handle
modified = property(lambda self: self._modified)
input_file = property(lambda self: self._input_file)
module = property(_GetModule)
temp_dir = property(_GetTempDir)
update_handle = property(_GetUpdateHandle)
def ExtractAllToDir(self, extract_to):
"""Extracts all resources from our input file to a directory hierarchy
in the directory named extract_to.
The generated directory hierarchy is three-level, and looks like:
resource-type/
resource-name/
lang-id.
Args:
extract_to: path to the folder to output to. This folder will be erased
and recreated if it already exists.
"""
_LOGGER.info('Extracting all resources from "%s" to directory "%s".',
self.input_file, extract_to)
if os.path.exists(extract_to):
_LOGGER.info('Destination directory "%s" exists, deleting', extract_to)
shutil.rmtree(extract_to)
# Make sure the destination dir exists.
os.makedirs(extract_to)
# Now enumerate the resource types.
for res_type in win32api.EnumResourceTypes(self.module):
res_type_str = _ResIdToString(res_type)
# And the resource names.
for res_name in win32api.EnumResourceNames(self.module, res_type):
res_name_str = _ResIdToString(res_name)
# Then the languages.
for res_lang in win32api.EnumResourceLanguages(self.module,
res_type, res_name):
res_lang_str = _ResIdToString(res_lang)
dest_dir = os.path.join(extract_to, res_type_str, res_lang_str)
dest_file = os.path.join(dest_dir, res_name_str)
_LOGGER.info('Extracting resource "%s", lang "%d" name "%s" '
'to file "%s".',
res_type_str, res_lang, res_name_str, dest_file)
# Extract each resource to a file in the output dir.
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
self.ExtractResource(res_type, res_lang, res_name, dest_file)
def ExtractResource(self, res_type, res_lang, res_name, dest_file):
"""Extracts a given resource, specified by type, language id and name,
to a given file.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
dest_file: path to the file where the resource data will be written.
"""
_LOGGER.info('Extracting resource "%s", lang "%d" name "%s" '
'to file "%s".', res_type, res_lang, res_name, dest_file)
data = win32api.LoadResource(self.module, res_type, res_name, res_lang)
with open(dest_file, 'wb') as f:
f.write(data)
def RemoveResource(self, res_type, res_lang, res_name):
"""Removes a given resource, specified by type, language id and name.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource, e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
"""
_LOGGER.info('Removing resource "%s:%s".', res_type, res_name)
# We have to go native to perform a removal.
ret = UpdateResource(self.update_handle,
res_type,
res_name,
res_lang,
None,
0)
# Raise an error on failure.
if ret == 0:
error = win32api.GetLastError()
print "error", error
raise RuntimeError(error)
self._modified = True
def UpdateResource(self, res_type, res_lang, res_name, file_path):
"""Inserts or updates a given resource with the contents of a file.
This is a legacy version of UpdateResourceData, where the data arg is read
from a file , rather than passed directly.
"""
_LOGGER.info('Writing resource from file %s', file_path)
with open(file_path, 'rb') as f:
self.UpdateResourceData(res_type, res_lang, res_name, f.read())
def UpdateResourceData(self, res_type, res_lang, res_name, data):
"""Inserts or updates a given resource with the given data.
Args:
res_type: the type of the resource, e.g. "B7".
res_lang: the language id of the resource, e.g. 1033.
res_name: the name of the resource, e.g. "SETUP.EXE".
data: the new resource data.
"""
_LOGGER.info('Writing resource "%s:%s"', res_type, res_name)
win32api.UpdateResource(self.update_handle,
res_type,
res_name,
data,
res_lang)
self._modified = True
def Commit(self):
"""Commit any successful resource edits this editor has performed.
This has the effect of writing the output file.
"""
if self._update_handle:
update_handle = self._update_handle
self._update_handle = None
win32api.EndUpdateResource(update_handle, False)
_LOGGER.info('Writing edited file to "%s".', self._output_file)
shutil.copyfile(self._temp_file, self._output_file)
else:
_LOGGER.info('No edits made. Copying input to "%s".', self._output_file)
shutil.copyfile(self._input_file, self._output_file)
_USAGE = """\
usage: %prog [options] input_file
A utility script to extract and edit the resources in a Windows executable.
EXAMPLE USAGE:
# Extract from mini_installer.exe, the resource type "B7", langid 1033 and
# name "CHROME.PACKED.7Z" to a file named chrome.7z.
# Note that 1033 corresponds to English (United States).
%prog mini_installer.exe --extract B7 1033 CHROME.PACKED.7Z chrome.7z
# Update mini_installer.exe by removing the resouce type "BL", langid 1033 and
# name "SETUP.EXE". Add the resource type "B7", langid 1033 and name
# "SETUP.EXE.packed.7z" from the file setup.packed.7z.
# Write the edited file to mini_installer_packed.exe.
%prog mini_installer.exe \\
--remove BL 1033 SETUP.EXE \\
--update B7 1033 SETUP.EXE.packed.7z setup.packed.7z \\
--output_file mini_installer_packed.exe
"""
def _ParseArgs():
parser = optparse.OptionParser(_USAGE)
parser.add_option('--verbose', action='store_true',
help='Enable verbose logging.')
parser.add_option('--extract_all',
help='Path to a folder which will be created, in which all resources '
'from the input_file will be stored, each in a file named '
'"res_type/lang_id/res_name".')
parser.add_option('--extract', action='append', default=[], nargs=4,
help='Extract the resource with the given type, language id and name '
'to the given file.',
metavar='type langid name file_path')
parser.add_option('--remove', action='append', default=[], nargs=3,
help='Remove the resource with the given type, langid and name.',
metavar='type langid name')
parser.add_option('--update', action='append', default=[], nargs=4,
help='Insert or update the resource with the given type, langid and '
'name with the contents of the file given.',
metavar='type langid name file_path')
parser.add_option('--output_file',
help='On success, OUTPUT_FILE will be written with a copy of the '
'input file with the edits specified by any remove or update '
'options.')
options, args = parser.parse_args()
if len(args) != 1:
parser.error('You have to specify an input file to work on.')
modify = options.remove or options.update
if modify and not options.output_file:
parser.error('You have to specify an output file with edit options.')
return options, args
def _ConvertInts(*args):
"""Return args with any all-digit strings converted to ints."""
results = []
for arg in args:
if isinstance(arg, basestring) and arg.isdigit():
results.append(int(arg))
else:
results.append(arg)
return results
def main(options, args):
"""Main program for the script."""
if options.verbose:
logging.basicConfig(level=logging.INFO)
# Create the editor for our input file.
editor = ResourceEditor(args[0], options.output_file)
if options.extract_all:
editor.ExtractAllToDir(options.extract_all)
for res_type, res_lang, res_name, dest_file in options.extract:
res_type, res_lang, res_name = _ConvertInts(res_type, res_lang, res_name)
editor.ExtractResource(res_type, res_lang, res_name, dest_file)
for res_type, res_lang, res_name in options.remove:
res_type, res_lang, res_name = _ConvertInts(res_type, res_lang, res_name)
editor.RemoveResource(res_type, res_lang, res_name)
for res_type, res_lang, res_name, src_file in options.update:
res_type, res_lang, res_name = _ConvertInts(res_type, res_lang, res_name)
editor.UpdateResource(res_type, res_lang, res_name, src_file)
if editor.modified:
editor.Commit()
if __name__ == '__main__':
sys.exit(main(*_ParseArgs()))
|
VielSoft/odoo
|
refs/heads/8.0
|
addons/website_mail/models/mail_message.py
|
264
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import SUPERUSER_ID
from openerp.tools import html2plaintext
from openerp.tools.translate import _
from openerp.osv import osv, fields, expression
class MailMessage(osv.Model):
_inherit = 'mail.message'
def _get_description_short(self, cr, uid, ids, name, arg, context=None):
res = dict.fromkeys(ids, False)
for message in self.browse(cr, uid, ids, context=context):
if message.subject:
res[message.id] = message.subject
else:
plaintext_ct = '' if not message.body else html2plaintext(message.body)
res[message.id] = plaintext_ct[:30] + '%s' % (' [...]' if len(plaintext_ct) >= 30 else '')
return res
_columns = {
'description': fields.function(
_get_description_short, type='char',
help='Message description: either the subject, or the beginning of the body'
),
'website_published': fields.boolean(
'Published', help="Visible on the website as a comment", copy=False,
),
}
def default_get(self, cr, uid, fields_list, context=None):
defaults = super(MailMessage, self).default_get(cr, uid, fields_list, context=context)
# Note: explicitly implemented in default_get() instead of _defaults,
# to avoid setting to True for all existing messages during upgrades.
# TODO: this default should probably be dynamic according to the model
# on which the messages are attached, thus moved to create().
if 'website_published' in fields_list:
defaults.setdefault('website_published', True)
return defaults
def _search(self, cr, uid, args, offset=0, limit=None, order=None,
context=None, count=False, access_rights_uid=None):
""" Override that adds specific access rights of mail.message, to restrict
messages to published messages for public users. """
if uid != SUPERUSER_ID:
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1]
if group_user_id in [group.id for group in group_ids]:
args = expression.AND([[('website_published', '=', True)], list(args)])
return super(MailMessage, self)._search(cr, uid, args, offset=offset, limit=limit, order=order,
context=context, count=count, access_rights_uid=access_rights_uid)
def check_access_rule(self, cr, uid, ids, operation, context=None):
""" Add Access rules of mail.message for non-employee user:
- read:
- raise if the type is comment and subtype NULL (internal note)
"""
if uid != SUPERUSER_ID:
group_ids = self.pool.get('res.users').browse(cr, uid, uid, context=context).groups_id
group_user_id = self.pool.get("ir.model.data").get_object_reference(cr, uid, 'base', 'group_public')[1]
if group_user_id in [group.id for group in group_ids]:
cr.execute('SELECT id FROM "%s" WHERE website_published IS FALSE AND id = ANY (%%s)' % (self._table), (ids,))
if cr.fetchall():
raise osv.except_osv(
_('Access Denied'),
_('The requested operation cannot be completed due to security restrictions. Please contact your system administrator.\n\n(Document type: %s, Operation: %s)') % (self._description, operation))
return super(MailMessage, self).check_access_rule(cr, uid, ids=ids, operation=operation, context=context)
|
jbassen/edx-platform
|
refs/heads/master
|
lms/djangoapps/bulk_email/tests/test_err_handling.py
|
77
|
# -*- coding: utf-8 -*-
"""
Unit tests for handling email sending errors
"""
from itertools import cycle
from celery.states import SUCCESS, RETRY
from django.conf import settings
from django.core.management import call_command
from django.core.urlresolvers import reverse
from django.db import DatabaseError
import json
from mock import patch, Mock
from nose.plugins.attrib import attr
from smtplib import SMTPDataError, SMTPServerDisconnected, SMTPConnectError
from bulk_email.models import CourseEmail, SEND_TO_ALL
from bulk_email.tasks import perform_delegate_email_batches, send_course_email
from instructor_task.models import InstructorTask
from instructor_task.subtasks import (
initialize_subtask_info,
SubtaskStatus,
check_subtask_is_valid,
update_subtask_status,
DuplicateTaskException,
MAX_DATABASE_LOCK_RETRIES,
)
from opaque_keys.edx.locations import SlashSeparatedCourseKey
from student.tests.factories import UserFactory, AdminFactory, CourseEnrollmentFactory
from xmodule.modulestore.tests.django_utils import ModuleStoreTestCase
from xmodule.modulestore.tests.factories import CourseFactory
class EmailTestException(Exception):
"""Mock exception for email testing."""
pass
@attr('shard_1')
@patch('bulk_email.models.html_to_text', Mock(return_value='Mocking CourseEmail.text_message'))
@patch.dict(settings.FEATURES, {'ENABLE_INSTRUCTOR_EMAIL': True, 'REQUIRE_COURSE_EMAIL_AUTH': False})
class TestEmailErrors(ModuleStoreTestCase):
"""
Test that errors from sending email are handled properly.
"""
def setUp(self):
super(TestEmailErrors, self).setUp()
course_title = u"ẗëṡẗ title イ乇丂イ ᄊ乇丂丂ムg乇 キo尺 ムレレ тэѕт мэѕѕаБэ"
self.course = CourseFactory.create(display_name=course_title)
self.instructor = AdminFactory.create()
self.client.login(username=self.instructor.username, password="test")
# load initial content (since we don't run migrations as part of tests):
call_command("loaddata", "course_email_template.json")
self.url = reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.send_mail_url = reverse('send_email', kwargs={'course_id': self.course.id.to_deprecated_string()})
self.success_content = {
'course_id': self.course.id.to_deprecated_string(),
'success': True,
}
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_retry(self, retry, get_conn):
"""
Test that celery handles transient SMTPDataErrors by retrying.
"""
get_conn.return_value.send_messages.side_effect = SMTPDataError(455, "Throttling: Sending rate exceeded")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# Test that we retry upon hitting a 4xx error
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPDataError)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.update_subtask_status')
@patch('bulk_email.tasks.send_course_email.retry')
def test_data_err_fail(self, retry, result, get_conn):
"""
Test that celery handles permanent SMTPDataErrors by failing and not retrying.
"""
# have every fourth email fail due to blacklisting:
get_conn.return_value.send_messages.side_effect = cycle([SMTPDataError(554, "Email address is blacklisted"),
None, None, None])
students = [UserFactory() for _ in xrange(settings.BULK_EMAIL_EMAILS_PER_TASK)]
for student in students:
CourseEnrollmentFactory.create(user=student, course_id=self.course.id)
test_email = {
'action': 'Send email',
'send_to': 'all',
'subject': 'test subject for all',
'message': 'test message for all'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
# We shouldn't retry when hitting a 5xx error
self.assertFalse(retry.called)
# Test that after the rejected email, the rest still successfully send
((_entry_id, _current_task_id, subtask_status), _kwargs) = result.call_args
self.assertEquals(subtask_status.skipped, 0)
expected_fails = int((settings.BULK_EMAIL_EMAILS_PER_TASK + 3) / 4.0)
self.assertEquals(subtask_status.failed, expected_fails)
self.assertEquals(subtask_status.succeeded, settings.BULK_EMAIL_EMAILS_PER_TASK - expected_fails)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_disconn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPServerDisconnected by retrying.
"""
get_conn.return_value.open.side_effect = SMTPServerDisconnected(425, "Disconnecting")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPServerDisconnected)
@patch('bulk_email.tasks.get_connection', autospec=True)
@patch('bulk_email.tasks.send_course_email.retry')
def test_conn_err_retry(self, retry, get_conn):
"""
Test that celery handles SMTPConnectError by retrying.
"""
get_conn.return_value.open.side_effect = SMTPConnectError(424, "Bad Connection")
test_email = {
'action': 'Send email',
'send_to': 'myself',
'subject': 'test subject for myself',
'message': 'test message for myself'
}
response = self.client.post(self.send_mail_url, test_email)
self.assertEquals(json.loads(response.content), self.success_content)
self.assertTrue(retry.called)
(__, kwargs) = retry.call_args
exc = kwargs['exc']
self.assertIsInstance(exc, SMTPConnectError)
@patch('bulk_email.tasks.SubtaskStatus.increment')
@patch('bulk_email.tasks.log')
def test_nonexistent_email(self, mock_log, result):
"""
Tests retries when the email doesn't exist
"""
# create an InstructorTask object to pass through
course_id = self.course.id
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": -1}
with self.assertRaises(CourseEmail.DoesNotExist):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=no-member
((log_str, __, email_id), __) = mock_log.warning.call_args
self.assertTrue(mock_log.warning.called)
self.assertIn('Failed to get CourseEmail with id', log_str)
self.assertEqual(email_id, -1)
self.assertFalse(result.called)
def test_nonexistent_course(self):
"""
Tests exception when the course in the email doesn't exist
"""
course_id = SlashSeparatedCourseKey("I", "DONT", "EXIST")
email = CourseEmail(course_id=course_id)
email.save()
entry = InstructorTask.create(course_id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
# (?i) is a regex for ignore case
with self.assertRaisesRegexp(ValueError, r"(?i)course not found"):
perform_delegate_email_batches(entry.id, course_id, task_input, "action_name") # pylint: disable=no-member
def test_nonexistent_to_option(self):
"""
Tests exception when the to_option in the email doesn't exist
"""
email = CourseEmail(course_id=self.course.id, to_option="IDONTEXIST")
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(Exception, 'Unexpected bulk email TO_OPTION found: IDONTEXIST'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_wrong_course_id_in_task(self):
"""
Tests exception when the course_id in task is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=self.course.id, to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create("bogus/task/id", "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(ValueError, 'does not match task value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_wrong_course_id_in_email(self):
"""
Tests exception when the course_id in CourseEmail is not the same as one explicitly passed in.
"""
email = CourseEmail(course_id=SlashSeparatedCourseKey("bogus", "course", "id"), to_option=SEND_TO_ALL)
email.save()
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
task_input = {"email_id": email.id} # pylint: disable=no-member
with self.assertRaisesRegexp(ValueError, 'does not match email value'):
perform_delegate_email_batches(entry.id, self.course.id, task_input, "action_name") # pylint: disable=no-member
def test_send_email_undefined_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
subtask_status = SubtaskStatus.create(subtask_id)
email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find subtasks of instructor task'):
send_course_email(entry_id, email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_missing_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
different_subtask_id = "bogus-subtask-id-value"
subtask_status = SubtaskStatus.create(different_subtask_id)
bogus_email_id = 1001
with self.assertRaisesRegexp(DuplicateTaskException, 'unable to find status for subtask of instructor task'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_completed_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=SUCCESS)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already completed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_running_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
update_subtask_status(entry_id, subtask_id, subtask_status)
check_subtask_is_valid(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
with self.assertRaisesRegexp(DuplicateTaskException, 'already being executed'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
def test_send_email_retried_subtask(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-value"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=2)
update_subtask_status(entry_id, subtask_id, subtask_status)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
# try running with a clean subtask:
new_subtask_status = SubtaskStatus.create(subtask_id)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
# try again, with a retried subtask with lower count:
new_subtask_status = SubtaskStatus.create(subtask_id, state=RETRY, retried_nomax=1)
with self.assertRaisesRegexp(DuplicateTaskException, 'already retried'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, new_subtask_status.to_dict())
def test_send_email_with_locked_instructor_task(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
subtask_id = "subtask-id-locked-model"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
with patch('instructor_task.subtasks.InstructorTask.save') as mock_task_save:
mock_task_save.side_effect = DatabaseError
with self.assertRaises(DatabaseError):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
self.assertEquals(mock_task_save.call_count, MAX_DATABASE_LOCK_RETRIES)
def test_send_email_undefined_email(self):
# test at a lower level, to ensure that the course gets checked down below too.
entry = InstructorTask.create(self.course.id, "task_type", "task_key", "task_input", self.instructor)
entry_id = entry.id # pylint: disable=no-member
to_list = ['test@test.com']
global_email_context = {'course_title': 'dummy course'}
subtask_id = "subtask-id-undefined-email"
initialize_subtask_info(entry, "emailed", 100, [subtask_id])
subtask_status = SubtaskStatus.create(subtask_id)
bogus_email_id = 1001
with self.assertRaises(CourseEmail.DoesNotExist):
# we skip the call that updates subtask status, since we've not set up the InstructorTask
# for the subtask, and it's not important to the test.
with patch('bulk_email.tasks.update_subtask_status'):
send_course_email(entry_id, bogus_email_id, to_list, global_email_context, subtask_status.to_dict())
|
3dfxmadscientist/cbss-server
|
refs/heads/master
|
addons/project_long_term/__init__.py
|
67
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import project_long_term
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
|
kaplun/ops
|
refs/heads/prod
|
modules/bibencode/lib/bibencode_config.py
|
7
|
# -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Bibencode configuration submodule"""
__revision__ = "$Id$"
import invenio.config
import re
#-----------------------#
# General Configuration #
#-----------------------#
## The command for probing with FFMPEG
CFG_BIBENCODE_FFMPEG_PROBE_COMMAND = invenio.config.CFG_PATH_FFPROBE + " %s -loglevel verbose -show_format -show_streams"
## The command for probing with MEDIAINFO
CFG_BIBENCODE_MEDIAINFO_COMMAND = invenio.config.CFG_PATH_MEDIAINFO + " %s -f --Output=XML"
## Image extraction base command
CFG_BIBENCODE_FFMPEG_EXTRACT_COMMAND = invenio.config.CFG_PATH_FFMPEG + " -ss %.2f -i %s -r 1 -vframes 1 -f image2 -s %s %s"
## Commands for multipass encoding
## In the first pass, you can dump the output to /dev/null and ignore audio
## CFG_BIBENCODE_FFMPEG_COMMAND_PASS_1 = "ffmpeg -i %s -y -loglevel verbose -vcodec %s -pass 1 -passlogfile %s -an -f rawvideo -b %s -s %s %s /dev/null"
## CFG_BIBENCODE_FFMPEG_COMMAND_PASS_2 = "ffmpeg -i %s -y -loglevel verbose -vcodec %s -pass 2 -passlogfile %s -acodec %s -b %s -ab %s -s %s %s %s"
CFG_BIBENCODE_FFMPEG_PASSLOGFILE_PREFIX = invenio.config.CFG_LOGDIR + "/bibencode2pass-%s-%s"
## Path to the encoding logfiles
## Filenames will later be substituted with process specific information
CFG_BIBENCODE_FFMPEG_ENCODING_LOG = invenio.config.CFG_LOGDIR + "/bibencode_%s.log"
## Path to probing logiles
CFG_BIBENCODE_FFMPEG_PROBE_LOG = invenio.config.CFG_LOGDIR + "/bibencode_probe_%s.log"
## The pattern for the encoding status specific string in the FFmpeg output
CFG_BIBENCODE_FFMPEG_ENCODE_TIME = re.compile("^.+time=(\d\d:\d\d:\d\d.\d\d).+$")
## The pattern for the configuration string with information about compiling options
CFD_BIBENCODE_FFMPEG_OUT_RE_CONFIGURATION = re.compile("(--enable-[a-z0-9\-]*)")
## The minimum ffmpeg compile options for BibEncode to work correctly
CFG_BIBENCODE_FFMPEG_CONFIGURATION_REQUIRED = (
'--enable-gpl',
'--enable-version3',
'--enable-nonfree',
'--enable-libfaac',
'--enable-libtheora',
'--enable-libvorbis',
'--enable-libvpx',
'--enable-libx264',
## '--enable-funky'
)
## Path to the directory for transcoded files
CFG_BIBENCODE_TARGET_DIRECTORY = invenio.config.CFG_TMPDIR + "/"
#------------------------#
# Metadata Configuration #
#------------------------#
## Template for key-value pairs that can be used with FFMPEG to set metadata.
## Not all keys are represented in every video container format.
## FFMPEG will try to write any given key-value pairs. If the container
## format does not support some pairs there wont be an error.
## You might like to verify that the attributes were really written
## by using FFPROBE.
## The FFMPEG argument structure is:
## -metadata key1="value1" -metadata key2="value2 ...
CFG_BIBENCODE_FFMPEG_METADATA_TEMPLATE = {
'title': None,
'author': None,
'album_artist': None,
'album': None,
'grouping': None,
'composer': None,
'year': None,
'track': None,
'comment': None,
'genre': None,
'copyright': None,
'description': None,
'synopsis': None,
'show': None,
'episode_id': None,
"network": None,
'lyrics': None
}
# Duration: 00:02:28.58, start: 0.000000, bitrate: 9439 kb/s
# timcode start? bitrate
CFG_BIBENCODE_FFMPEG_RE_VIDEOINFO_DURATION = re.compile("^\s*Duration: (.*?), start: (\d+\.\d+), bitrate: (\d+?) kb\/s$")
# Stream #0.0(eng): Video: h264 (Main), yuv420p, 1920x1056, 9338 kb/s, 23.98 fps, 23.98 tbr, 2997 tbn, 5994 tbc
# Stream #0.1(eng): Video: wmv3, yuv420p, 1440x1080, 9500 kb/s, 25 tbr, 1k tbn, 1k tbc
# number language codec color resolution bitrate fps tbr tbn tbc
CFG_BIBENCODE_FFMPEG_RE_VIDEOINFO_VSTREAM = re.compile("^\s*Stream #(\d+.\d+)\(?(\w+)?\)?: Video: ([a-zA-Z0-9\(\) ]*), (\w+), (\d+x\d+), (\d+) kb\/s, (.+) fps, (.+) tbr, (.+) tbn, (.+) tbc$")
# Stream #0.0(eng): Audio: wmav2, 44100 Hz, 2 channels, s16, 320 kb/s
# Stream #0.1(eng): Audio: aac, 44100 Hz, stereo, s16, 97 kb/s
# number language codec samplerate channels bit-depth bitrate
CFG_BIBENCODE_FFMPEG_RE_VIDEOINFO_ASTREAM = re.compile("^\s*Stream #(\d+.\d+)\(?(\w+)?\)?: Audio: ([a-zA-Z0-9\(\) ]*), (\d+) Hz, ([a-zA-Z0-9 ]+), (\w+), (\d+) kb\/s$")
## FFMPEG command for setting metadata
## This will create a copy of the master and write the metadata there
CFG_BIBENCODE_FFMPEG_METADATA_SET_COMMAND = "ffmpeg -y -i %s -acodec copy -vcodec copy %s"
## FFMPEG metadata argument template
## had to remove '-metadata ' in front because of issues with command splitting
CFG_BIBENCODE_FFMPEG_METADATA_ARGUMENT = "%s=\"%s\""
## File containing mappings from ffprobe and mediainfo to pbcore
CFG_BIBENCODE_PBCORE_MAPPINGS = invenio.config.CFG_ETCDIR + "/bibencode/pbcore_mappings.json"
## XSLT Template from PBCORE to MARCXML
CFG_BIBENCODE_PBCORE_MARC_XSLT = invenio.config.CFG_ETCDIR + "/bibencode/pbcore_to_marc_nons.xsl"
CFG_BIBENCODE_ASPECT_RATIO_MARC_FIELD = "951__x"
## Metadata Patterns for parsing
def create_metadata_re_dict():
""" Creates a dictionary with Regex patterns from the metadata template dictionary
"""
metadata_re_dictionary = {}
for key, value in CFG_BIBENCODE_FFMPEG_METADATA_TEMPLATE.iteritems():
metadata_re_dictionary[key] = re.compile("^\s*%s\s*:\s(((\S*)\s*(\S*))*)$" % key)
return metadata_re_dictionary
CFG_BIBENCODE_FFMPEG_METADATA_RE_DICT = create_metadata_re_dict()
#----------------------#
# Parameter Validation #
#----------------------#
CFG_BIBENCODE_VALID_MODES = ['encode', 'extract', 'meta', 'batch', 'daemon', 'cdsmedia']
CFG_BIBENCODE_FFMPEG_VALID_SIZES = [
'sqcif', 'qcif', 'cif', '4cif', '16cif', 'qqvga', 'qvga', 'vga', 'svga',
'xga', 'uxga', 'qxga', 'sxga', 'qsxga', 'hsxga', 'wvga', 'wxga', 'wsxga',
'wuxga', 'woxga', 'wqsxga', 'wquxga', 'whsxga', 'cga', 'ega',
'hd480', 'hd720', 'hd1080'
]
CFG_BIBENCODE_RESOLUTIONS = {
"ntsc": "720x480",
"pal": "720x576",
"qntsc": "352x240",
"qpal": "352x288",
"sntsc": "640x480",
"spal": "768x576",
"film": "352x240",
"ntsc-film": "352x240",
"sqcif": "128x96",
"qcif": "176x144",
"cif": "352x288",
"4cif": "704x576",
"16cif": "1408x1152",
"qqvga": "160x120",
"qvga": "320x240",
"vga": "640x480",
"svga": "800x600",
"xga": "1024x768",
"uxga": "1600x1200",
"qxga": "2048x1536",
"sxga": "1280x1024",
"qsxga": "2560x2048",
"hsxga": "5120x4096",
"wvga": "852x480",
"wxga": "1366x768",
"wsxga": "1600x1024",
"wuxga": "1920x1200",
"woxga": "2560x1600",
"wqsxga": "3200x2048",
"wquxga": "3840x2400",
"whsxga": "6400x4096",
"whuxga": "7680x4800",
"cga": "320x200",
"ega": "640x350",
"hd480": "852x480",
"hd720": "1280x720",
"hd1080": "1920x1080"
}
CFG_BIBENCODE_FFMPEG_RE_VALID_SIZE = re.compile("^\d+x\d+$")
CFG_BIBENCODE_FFMPEG_VALID_VCODECS = [
'libx264', 'libvpx', 'libtheora', 'mpeg4', 'wmv2', 'wmv1', 'flv'
]
CFG_BIBENCODE_FFMPEG_VALID_ACODECS = [
'libmp3lame', 'libvorbis', 'wma1', 'wma2', 'libfaac'
]
#------------------------#
# Profiles Configuration #
#------------------------#
CFG_BIBENCODE_PROFILES_ENCODING = invenio.config.CFG_ETCDIR + "/bibencode/encoding_profiles.json"
CFG_BIBENCODE_PROFILES_ENCODING_LOCAL = invenio.config.CFG_ETCDIR + "/bibencode/encoding_profiles_local.json"
CFG_BIBENCODE_PROFILES_EXTRACT = invenio.config.CFG_ETCDIR + "/bibencode/extract_profiles.json"
CFG_BIBENCODE_PROFILES_EXTRACT_LOCAL = invenio.config.CFG_ETCDIR + "/bibencode/extract_profiles_local.json"
CFG_BIBENCODE_TEMPLATE_BATCH_SUBMISSION = invenio.config.CFG_ETCDIR + "/bibencode/batch_template_submission.json"
#----------------------#
# Daemon Configuration #
#----------------------#
CFG_BIBENCODE_DAEMON_DIR_NEWJOBS = invenio.config.CFG_TMPSHAREDDIR + '/bibencode/jobs'
CFG_BIBENCODE_DAEMON_DIR_OLDJOBS = invenio.config.CFG_TMPSHAREDDIR + '/bibencode/jobs/done'
#-------------------#
# WebSubmit Support #
#-------------------#
CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_FNAME = 'aspect_sample_.jpg'
CFG_BIBENCODE_WEBSUBMIT_ASPECT_SAMPLE_DIR = 'aspect_samples'
|
matmutant/sl4a
|
refs/heads/master
|
python-build/python-libs/ase/scripts/take_picture.py
|
87
|
import android
droid = android.Android()
droid.cameraCapturePicture('/sdcard/foo.jpg')
|
usc-isi-i2/mydig-webservice
|
refs/heads/master
|
ws/app_annotation.py
|
1
|
from app_base import *
@api.route('/projects/<project_name>/entities/<kg_id>/fields/<field_name>/annotations')
class FieldAnnotations(Resource):
@requires_auth
def get(self, project_name, kg_id, field_name):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
if kg_id not in data[project_name]['field_annotations']:
return rest.not_found('kg_id {} not found'.format(kg_id))
if field_name not in data[project_name]['field_annotations'][kg_id]:
return rest.not_found('Field name {} not found'.format(field_name))
return data[project_name]['field_annotations'][kg_id][field_name]
@requires_auth
def delete(self, project_name, kg_id, field_name):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
if kg_id not in data[project_name]['field_annotations']:
return rest.not_found('kg_id {} not found'.format(kg_id))
if field_name not in data[project_name]['field_annotations'][kg_id]:
return rest.not_found('Field name {} not found'.format(field_name))
data[project_name]['field_annotations'][kg_id][field_name] = dict()
# write to file
self.write_to_field_file(project_name, field_name)
# load into ES
self.es_remove_field_annotation('full', project_name, kg_id, field_name)
self.es_remove_field_annotation('sample', project_name, kg_id, field_name)
return rest.deleted()
@requires_auth
def post(self, project_name, kg_id, field_name):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
# field should be in master_config
if field_name not in data[project_name]['master_config']['fields']:
return rest.bad_request('Field {} is not exist'.format(field_name))
input = request.get_json(force=True)
key = input.get('key', '')
if key.strip() == '':
return rest.bad_request('invalid key')
human_annotation = input.get('human_annotation', -1)
if not isinstance(human_annotation, int) or human_annotation == -1:
return rest.bad_request('invalid human_annotation')
_add_keys_to_dict(data[project_name]['field_annotations'], [kg_id, field_name, key])
data[project_name]['field_annotations'][kg_id][field_name][key]['human_annotation'] = human_annotation
# write to file
self.write_to_field_file(project_name, field_name)
# load into ES
self.es_update_field_annotation('full', project_name, kg_id, field_name, key, human_annotation)
self.es_update_field_annotation('sample', project_name, kg_id, field_name, key, human_annotation)
return rest.created()
@requires_auth
def put(self, project_name, kg_id, field_name):
return rest.post(project_name, kg_id, field_name)
@staticmethod
def es_update_field_annotation(index_version, project_name, kg_id, field_name, key, human_annotation):
try:
es = ES(config['es'][index_version + '_url'])
index = data[project_name]['master_config']['index'][index_version]
type = data[project_name]['master_config']['root_name']
hits = es.retrieve_doc(index, type, kg_id)
if hits:
doc = hits['hits']['hits'][0]['_source']
_add_keys_to_dict(doc, ['knowledge_graph', field_name])
for field_instance in doc['knowledge_graph'][field_name]:
if field_instance['key'] == key:
field_instance['human_annotation'] = human_annotation
break
res = es.load_data(index, type, doc, doc['doc_id'])
if not res:
logger.info('Fail to load data to {}: project {}, kg_id {}, field {}, key {}'.format(
index_version, project_name, kg_id, field_name, key
))
return
logger.info('Fail to retrieve from {}: project {}, kg_id {}, field {}, key {}'.format(
index_version, project_name, kg_id, field_name, key
))
return
except Exception as e:
logger.warning('Fail to update annotation to {}: project {}, kg_id {}, field {}, key {}'.format(
index_version, project_name, kg_id, field_name, key
))
@staticmethod
def es_remove_field_annotation(index_version, project_name, kg_id, field_name, key=None):
try:
es = ES(config['es'][index_version + '_url'])
index = data[project_name]['master_config']['index'][index_version]
type = data[project_name]['master_config']['root_name']
hits = es.retrieve_doc(index, type, kg_id)
if hits:
doc = hits['hits']['hits'][0]['_source']
if 'knowledge_graph' not in doc:
return
if field_name not in doc['knowledge_graph']:
return
for field_instance in doc['knowledge_graph'][field_name]:
if key is None: # delete all annotations
if 'human_annotation' in field_instance:
del field_instance['human_annotation']
else: # delete annotation of a specific key
if field_instance['key'] == key:
del field_instance['human_annotation']
break
res = es.load_data(index, type, doc, doc['doc_id'])
if not res:
return True
return False
except Exception as e:
logger.warning('Fail to remove annotation from {}: project {}, kg_id {}, field {}, key {}'.format(
index_version, project_name, kg_id, field_name, key
))
@staticmethod
def write_to_field_file(project_name, field_name):
file_path = os.path.join(get_project_dir_path(project_name), 'field_annotations/' + field_name + '.csv')
field_obj = data[project_name]['field_annotations']
with open(file_path, 'w') as csvfile:
writer = csv.DictWriter(
csvfile, fieldnames=['field_name', 'kg_id', 'key', 'human_annotation'],
delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
for kg_id_, kg_obj_ in field_obj.items():
for field_name_, field_obj_ in kg_obj_.items():
if field_name_ == field_name:
for key_, key_obj_ in field_obj_.items():
writer.writerow(
{'field_name': field_name_, 'kg_id': kg_id_,
'key': key_, 'human_annotation': key_obj_['human_annotation']})
@staticmethod
def load_from_field_file(project_name):
dir_path = os.path.join(get_project_dir_path(project_name), 'field_annotations')
for file_name in os.listdir(dir_path):
name, ext = os.path.splitext(file_name)
if ext != '.csv':
continue
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'r') as csvfile:
reader = csv.DictReader(
csvfile, fieldnames=['field_name', 'kg_id', 'key', 'human_annotation'],
delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
next(reader, None) # skip header
for row in reader:
_add_keys_to_dict(data[project_name]['field_annotations'],
[row['kg_id'], row['field_name'], row['key']])
data[project_name]['field_annotations'][row['kg_id']][row['field_name']][row['key']][
'human_annotation'] = row['human_annotation']
@api.route('/projects/<project_name>/entities/<kg_id>/fields/<field_name>/annotations/<key>')
class FieldInstanceAnnotations(Resource):
@requires_auth
def get(self, project_name, kg_id, field_name, key):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if kg_id not in data[project_name]['field_annotations']:
return rest.not_found(
'Field annotations not found, kg_id: {}'.format(kg_id))
if field_name not in data[project_name]['field_annotations'][kg_id]:
return rest.not_found(
'Field annotations not found, kg_id: {}, field: {}'.format(kg_id, field_name))
if key not in data[project_name]['field_annotations'][kg_id][field_name]:
return rest.not_found(
'Field annotations not found, kg_id: {}, field: {}, key: {}'.format(kg_id, field_name, key))
return data[project_name]['field_annotations'][kg_id][field_name][key]
@requires_auth
def delete(self, project_name, kg_id, field_name, key):
if project_name not in data:
return rest.not_found('Project {} not found'.format(project_name))
if kg_id not in data[project_name]['field_annotations']:
return rest.not_found(
'Field annotations not found, kg_id: {}'.format(kg_id))
if field_name not in data[project_name]['field_annotations'][kg_id]:
return rest.not_found(
'Field annotations not found, kg_id: {}, field: {}'.format(kg_id, field_name))
if key not in data[project_name]['field_annotations'][kg_id][field_name]:
return rest.not_found(
'Field annotations not found, kg_id: {}, field: {}, key: {}'.format(kg_id, field_name, key))
del data[project_name]['field_annotations'][kg_id][field_name][key]
# write to file
FieldAnnotations.write_to_field_file(project_name, field_name)
# load into ES
FieldAnnotations.es_remove_field_annotation('full', project_name, kg_id, field_name, key)
FieldAnnotations.es_remove_field_annotation('sample', project_name, kg_id, field_name, key)
return rest.deleted()
@api.route('/projects/<project_name>/tags/<tag_name>/annotations/<entity_name>/annotations')
class TagAnnotationsForEntityType(Resource):
@requires_auth
def delete(self, project_name, tag_name, entity_name):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
if tag_name not in data[project_name]['master_config']['tags']:
return rest.not_found('Tag {} not found'.format(tag_name))
if entity_name not in data[project_name]['entities']:
return rest.not_found('Entity {} not found'.format(entity_name))
for kg_id, kg_item in data[project_name]['entities'][entity_name].items():
# if tag_name in kg_item.keys():
# if 'human_annotation' in kg_item[tag_name]:
# del kg_item[tag_name]['human_annotation']
# hard code
if tag_name in kg_item:
del kg_item[tag_name]
# remove from ES
self.es_remove_tag_annotation('full', project_name, kg_id, tag_name)
self.es_remove_tag_annotation('sample', project_name, kg_id, tag_name)
if len(kg_item) == 0:
del data[project_name]['entities'][entity_name][kg_id]
# write to file
self.write_to_tag_file(project_name, tag_name)
return rest.deleted()
@requires_auth
def get(self, project_name, tag_name, entity_name):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
if tag_name not in data[project_name]['master_config']['tags']:
return rest.not_found('Tag {} not found'.format(tag_name))
result = dict()
if entity_name in data[project_name]['entities']:
for kg_id, kg_item in data[project_name]['entities'][entity_name].items():
for tag_name_, annotation in kg_item.items():
if tag_name == tag_name_:
result[kg_id] = annotation
return result
@requires_auth
def post(self, project_name, tag_name, entity_name):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
if tag_name not in data[project_name]['master_config']['tags']:
return rest.not_found('Tag {} not found'.format(tag_name))
if entity_name not in data[project_name]['entities']:
return rest.not_found('Entity {} not found'.format(entity_name))
input = request.get_json(force=True)
kg_id = input.get('kg_id', '')
if len(kg_id) == 0:
return rest.bad_request('Invalid kg_id')
human_annotation = input.get('human_annotation', -1)
if not isinstance(human_annotation, int) or human_annotation == -1:
return rest.bad_request('Invalid human annotation')
# if kg_id not in data[project_name]['entities'][entity_name]:
# return rest.not_found('kg_id {} not found'.format(kg_id))
#
# if tag_name not in data[project_name]['entities'][entity_name][kg_id]:
# return rest.not_found('Tag {} not found'.format(tag_name))
_add_keys_to_dict(data[project_name]['entities'][entity_name], [kg_id, tag_name])
data[project_name]['entities'][entity_name][kg_id][tag_name]['human_annotation'] = human_annotation
# write to file
self.write_to_tag_file(project_name, tag_name)
# load to ES
self.es_update_tag_annotation('full', project_name, kg_id, tag_name, human_annotation)
self.es_update_tag_annotation('sample', project_name, kg_id, tag_name, human_annotation)
return rest.created()
@requires_auth
def put(self, project_name, tag_name, entity_name):
return self.post(project_name, tag_name, entity_name)
@staticmethod
def es_update_tag_annotation(index_version, project_name, kg_id, tag_name, human_annotation):
try:
es = ES(config['es'][index_version + '_url'])
index = data[project_name]['master_config']['index'][index_version]
type = data[project_name]['master_config']['root_name']
hits = es.retrieve_doc(index, type, kg_id)
if hits:
doc = hits['hits']['hits'][0]['_source']
_add_keys_to_dict(doc, ['knowledge_graph', '_tags', tag_name])
doc['knowledge_graph']['_tags'][tag_name]['human_annotation'] = human_annotation
res = es.load_data(index, type, doc, doc['doc_id'])
if not res:
logger.info('Fail to retrieve or load data to {}: project {}, kg_id {}, tag{}, index {}, type {}'
.format(index_version, project_name, kg_id, tag_name, index, type))
return
logger.info('Fail to retrieve or load data to {}: project {}, kg_id {}, tag{}, index {}, type {}'
.format(index_version, project_name, kg_id, tag_name, index, type))
return
except Exception as e:
logger.warning('Fail to update annotation to {}: project {}, kg_id {}, tag {}'
.format(index_version, project_name, kg_id, tag_name))
@staticmethod
def es_remove_tag_annotation(index_version, project_name, kg_id, tag_name):
try:
es = ES(config['es'][index_version + '_url'])
index = data[project_name]['master_config']['index'][index_version]
type = data[project_name]['master_config']['root_name']
hits = es.retrieve_doc(index, type, kg_id)
if hits:
doc = hits['hits']['hits'][0]['_source']
if 'knowledge_graph' not in doc:
return
if '_tags' not in doc['knowledge_graph']:
return
if tag_name not in doc['knowledge_graph']['_tags']:
return
if 'human_annotation' not in doc['knowledge_graph']['_tags'][tag_name]:
return
# here, I only removed 'human_annotation' instead of the whole tag
# for tag should be deleted in another api
del doc['knowledge_graph']['_tags'][tag_name]['human_annotation']
res = es.load_data(index, type, doc, doc['doc_id'])
if not res:
logger.info('Fail to retrieve or load data to {}: project {}, kg_id {}, tag{}, index {}, type {}'
.format(index_version, project_name, kg_id, tag_name, index, type))
return
logger.info('Fail to retrieve or load data to {}: project {}, kg_id {}, tag{}, index {}, type {}'
.format(index_version, project_name, kg_id, tag_name, index, type))
return
except Exception as e:
logger.warning('Fail to remove annotation from {}: project {}, kg_id {}, tag {}'.format(
index_version, project_name, kg_id, tag_name
))
@staticmethod
def write_to_tag_file(project_name, tag_name):
file_path = os.path.join(get_project_dir_path(project_name), 'entity_annotations/' + tag_name + '.csv')
tag_obj = data[project_name]['entities']
with open(file_path, 'w') as csvfile:
writer = csv.DictWriter(
csvfile, fieldnames=['tag_name', 'entity_name', 'kg_id', 'human_annotation'],
delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
writer.writeheader()
for entity_name_, entity_obj_ in tag_obj.items():
for kg_id_, kg_obj_ in entity_obj_.items():
for tag_name_, tag_obj_ in kg_obj_.items():
if tag_name_ == tag_name and 'human_annotation' in tag_obj_:
writer.writerow(
{'tag_name': tag_name_, 'entity_name': entity_name_,
'kg_id': kg_id_, 'human_annotation': tag_obj_['human_annotation']})
@staticmethod
def load_from_tag_file(project_name):
dir_path = os.path.join(get_project_dir_path(project_name), 'entity_annotations')
for file_name in os.listdir(dir_path):
name, ext = os.path.splitext(file_name)
if ext != '.csv':
continue
file_path = os.path.join(dir_path, file_name)
with open(file_path, 'r') as csvfile:
reader = csv.DictReader(
csvfile, fieldnames=['tag_name', 'entity_name', 'kg_id', 'human_annotation'],
delimiter=',', quotechar='"', quoting=csv.QUOTE_NONNUMERIC)
next(reader, None) # skip header
for row in reader:
_add_keys_to_dict(data[project_name]['entities'],
[row['entity_name'], row['kg_id'], row['tag_name']])
data[project_name]['entities'][row['entity_name']][row['kg_id']][row['tag_name']][
'human_annotation'] = row['human_annotation']
@api.route('/projects/<project_name>/tags/<tag_name>/annotations/<entity_name>/annotations/<kg_id>')
class TagAnnotationsForEntity(Resource):
@requires_auth
def delete(self, project_name, tag_name, entity_name, kg_id):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
if tag_name not in data[project_name]['master_config']['tags']:
return rest.not_found('Tag {} not found'.format(tag_name))
if entity_name not in data[project_name]['entities']:
return rest.not_found('Entity {} not found'.format(entity_name))
if kg_id not in data[project_name]['entities'][entity_name]:
return rest.not_found('kg_id {} not found'.format(kg_id))
if tag_name not in data[project_name]['entities'][entity_name][kg_id]:
return rest.not_found('kg_id {} not found'.format(kg_id))
if 'human_annotation' in data[project_name]['entities'][entity_name][kg_id][tag_name]:
del data[project_name]['entities'][entity_name][kg_id][tag_name]['human_annotation']
# write to file
TagAnnotationsForEntityType.write_to_tag_file(project_name, tag_name)
# remove from ES
TagAnnotationsForEntityType.es_remove_tag_annotation('full', project_name, kg_id, tag_name)
TagAnnotationsForEntityType.es_remove_tag_annotation('sample', project_name, kg_id, tag_name)
return rest.deleted()
@requires_auth
def get(self, project_name, tag_name, entity_name, kg_id):
if project_name not in data:
return rest.not_found('Project: {} not found'.format(project_name))
if tag_name not in data[project_name]['master_config']['tags']:
return rest.not_found('Tag {} not found'.format(tag_name))
if entity_name not in data[project_name]['entities']:
return rest.not_found('Entity {} not found'.format(entity_name))
if kg_id not in data[project_name]['entities'][entity_name]:
return rest.not_found('kg_id {} not found'.format(kg_id))
if tag_name not in data[project_name]['entities'][entity_name][kg_id]:
return rest.not_found('kg_id {} not found'.format(kg_id))
# if 'human_annotation' not in data[project_name]['entities'][entity_name][kg_id][tag_name]:
# return rest.not_found('No human_annotation')
ret = data[project_name]['entities'][entity_name][kg_id][tag_name]
# return knowledge graph
parser = reqparse.RequestParser()
parser.add_argument('kg', required=False, type=str, help='knowledge graph')
args = parser.parse_args()
return_kg = True if args['kg'] is not None and \
args['kg'].lower() == 'true' else False
if return_kg:
ret['knowledge_graph'] = self.get_kg(project_name, kg_id, tag_name)
return ret
@staticmethod
def get_kg(project_name, kg_id, tag_name):
index_version = 'full'
try:
es = ES(config['es'][index_version + '_url'])
index = data[project_name]['master_config']['index'][index_version]
type = data[project_name]['master_config']['root_name']
hits = es.retrieve_doc(index, type, kg_id)
if hits:
doc = hits['hits']['hits'][0]['_source']
if 'knowledge_graph' not in doc:
return None
return doc['knowledge_graph']
return None
except Exception as e:
logger.warning('Fail to update annotation to: project {}, kg_id {}, tag {}'.format(
project_name, kg_id, tag_name
))
|
westinedu/similarinterest
|
refs/heads/master
|
django/contrib/flatpages/templatetags/flatpages.py
|
98
|
from django import template
from django.conf import settings
from django.contrib.flatpages.models import FlatPage
register = template.Library()
class FlatpageNode(template.Node):
def __init__(self, context_name, starts_with=None, user=None):
self.context_name = context_name
if starts_with:
self.starts_with = template.Variable(starts_with)
else:
self.starts_with = None
if user:
self.user = template.Variable(user)
else:
self.user = None
def render(self, context):
flatpages = FlatPage.objects.filter(sites__id=settings.SITE_ID)
# If a prefix was specified, add a filter
if self.starts_with:
flatpages = flatpages.filter(
url__startswith=self.starts_with.resolve(context))
# If the provided user is not authenticated, or no user
# was provided, filter the list to only public flatpages.
if self.user:
user = self.user.resolve(context)
if not user.is_authenticated():
flatpages = flatpages.filter(registration_required=False)
else:
flatpages = flatpages.filter(registration_required=False)
context[self.context_name] = flatpages
return ''
@register.tag
def get_flatpages(parser, token):
"""
Retrieves all flatpage objects available for the current site and
visible to the specific user (or visible to all users if no user is
specified). Populates the template context with them in a variable
whose name is defined by the ``as`` clause.
An optional ``for`` clause can be used to control the user whose
permissions are to be used in determining which flatpages are visible.
An optional argument, ``starts_with``, can be applied to limit the
returned flatpages to those beginning with a particular base URL.
This argument can be passed as a variable or a string, as it resolves
from the template context.
Syntax::
{% get_flatpages ['url_starts_with'] [for user] as context_name %}
Example usage::
{% get_flatpages as flatpages %}
{% get_flatpages for someuser as flatpages %}
{% get_flatpages '/about/' as about_pages %}
{% get_flatpages prefix as about_pages %}
{% get_flatpages '/about/' for someuser as about_pages %}
"""
bits = token.split_contents()
syntax_message = ("%(tag_name)s expects a syntax of %(tag_name)s "
"['url_starts_with'] [for user] as context_name" %
dict(tag_name=bits[0]))
# Must have at 3-6 bits in the tag
if len(bits) >= 3 and len(bits) <= 6:
# If there's an even number of bits, there's no prefix
if len(bits) % 2 == 0:
prefix = bits[1]
else:
prefix = None
# The very last bit must be the context name
if bits[-2] != 'as':
raise template.TemplateSyntaxError(syntax_message)
context_name = bits[-1]
# If there are 5 or 6 bits, there is a user defined
if len(bits) >= 5:
if bits[-4] != 'for':
raise template.TemplateSyntaxError(syntax_message)
user = bits[-3]
else:
user = None
return FlatpageNode(context_name, starts_with=prefix, user=user)
else:
raise template.TemplateSyntaxError(syntax_message)
|
benjaoming/kolibri
|
refs/heads/master
|
kolibri/plugins/coach/hooks.py
|
5
|
from __future__ import absolute_import, print_function, unicode_literals
from kolibri.core.webpack import hooks as webpack_hooks
class CoachSyncHook(webpack_hooks.WebpackInclusionHook):
"""
Inherit a hook defining assets to be loaded synchronously in coach/coach.html
"""
class Meta:
abstract = True
class CoachAsyncHook(webpack_hooks.WebpackInclusionHook):
"""
Inherit a hook defining assets to be loaded asynchronously in coach/coach.html
"""
class Meta:
abstract = True
|
caphrim007/ansible
|
refs/heads/devel
|
lib/ansible/modules/network/f5/bigip_monitor_tcp_echo.py
|
6
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_monitor_tcp_echo
short_description: Manages F5 BIG-IP LTM tcp echo monitors
description: Manages F5 BIG-IP LTM tcp echo monitors.
version_added: 2.4
options:
name:
description:
- Monitor name.
required: True
parent:
description:
- The parent template of this monitor template. Once this value has
been set, it cannot be changed. By default, this value is the C(tcp_echo)
parent on the C(Common) partition.
default: /Common/tcp_echo
description:
description:
- The description of the monitor.
version_added: 2.7
ip:
description:
- IP address part of the IP/port definition. If this parameter is not
provided when creating a new monitor, then the default value will be
'*'.
- If this value is an IP address, and the C(type) is C(tcp) (the default),
then a C(port) number must be specified.
interval:
description:
- The interval specifying how frequently the monitor instance of this
template will run. If this parameter is not provided when creating
a new monitor, then the default value will be 5. This value B(must)
be less than the C(timeout) value.
timeout:
description:
- The number of seconds in which the node or service must respond to
the monitor request. If the target responds within the set time
period, it is considered up. If the target does not respond within
the set time period, it is considered down. You can change this
number to any number you want, however, it should be 3 times the
interval number of seconds plus 1 second. If this parameter is not
provided when creating a new monitor, then the default value will be 16.
time_until_up:
description:
- Specifies the amount of time in seconds after the first successful
response before a node will be marked up. A value of 0 will cause a
node to be marked up immediately after a valid response is received
from the node. If this parameter is not provided when creating
a new monitor, then the default value will be 0.
partition:
description:
- Device partition to manage resources on.
default: Common
version_added: 2.5
state:
description:
- When C(present), ensures that the monitor exists.
- When C(absent), ensures the monitor is removed.
default: present
choices:
- present
- absent
version_added: 2.5
notes:
- Requires BIG-IP software version >= 12
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
- Wojciech Wypior (@wojtek0806)
'''
EXAMPLES = r'''
- name: Create TCP Echo Monitor
bigip_monitor_tcp_echo:
state: present
server: lb.mydomain.com
user: admin
ip: 10.10.10.10
password: secret
name: my_tcp_monitor
delegate_to: localhost
- name: Remove TCP Echo Monitor
bigip_monitor_tcp_echo:
state: absent
server: lb.mydomain.com
user: admin
password: secret
name: my_tcp_monitor
delegate_to: localhost
'''
RETURN = r'''
parent:
description: New parent template of the monitor.
returned: changed
type: string
sample: tcp
ip:
description: The new IP of IP/port definition.
returned: changed
type: string
sample: 10.12.13.14
description:
description: The description of the monitor.
returned: changed
type: str
sample: Important Monitor
interval:
description: The new interval in which to run the monitor check.
returned: changed
type: int
sample: 2
timeout:
description: The new timeout in which the remote system must respond to the monitor.
returned: changed
type: int
sample: 10
time_until_up:
description: The new time in which to mark a system as up after first successful response.
returned: changed
type: int
sample: 2
'''
import os
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.basic import env_fallback
try:
from library.module_utils.network.f5.bigip import F5RestClient
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
from library.module_utils.network.f5.common import transform_name
from library.module_utils.network.f5.common import exit_json
from library.module_utils.network.f5.common import fail_json
from library.module_utils.network.f5.ipaddress import is_valid_ip
except ImportError:
from ansible.module_utils.network.f5.bigip import F5RestClient
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
from ansible.module_utils.network.f5.common import transform_name
from ansible.module_utils.network.f5.common import exit_json
from ansible.module_utils.network.f5.common import fail_json
from ansible.module_utils.network.f5.ipaddress import is_valid_ip
class Parameters(AnsibleF5Parameters):
api_map = {
'timeUntilUp': 'time_until_up',
'defaultsFrom': 'parent',
}
api_attributes = [
'timeUntilUp', 'defaultsFrom', 'interval', 'timeout', 'destination',
'description',
]
returnables = [
'parent', 'ip', 'interval', 'timeout', 'time_until_up', 'description',
]
updatables = [
'ip', 'interval', 'timeout', 'time_until_up', 'description',
]
@property
def interval(self):
if self._values['interval'] is None:
return None
if 1 > int(self._values['interval']) > 86400:
raise F5ModuleError(
"Interval value must be between 1 and 86400"
)
return int(self._values['interval'])
@property
def timeout(self):
if self._values['timeout'] is None:
return None
return int(self._values['timeout'])
@property
def ip(self):
if self._values['ip'] is None:
return None
if self._values['ip'] in ['*', '0.0.0.0']:
return '*'
elif is_valid_ip(self._values['ip']):
return self._values['ip']
else:
raise F5ModuleError(
"The provided 'ip' parameter is not an IP address."
)
@property
def destination(self):
return self.ip
@destination.setter
def destination(self, value):
self._values['ip'] = value
@property
def time_until_up(self):
if self._values['time_until_up'] is None:
return None
return int(self._values['time_until_up'])
@property
def parent(self):
if self._values['parent'] is None:
return None
if self._values['parent'].startswith('/'):
parent = os.path.basename(self._values['parent'])
result = '/{0}/{1}'.format(self.partition, parent)
else:
result = '/{0}/{1}'.format(self.partition, self._values['parent'])
return result
@property
def type(self):
return 'tcp_echo'
class ApiParameters(Parameters):
pass
class ModuleParameters(Parameters):
pass
class Changes(Parameters):
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class UsableChanges(Changes):
pass
class ReportableChanges(Changes):
pass
class Difference(object):
def __init__(self, want, have=None):
self.want = want
self.have = have
def compare(self, param):
try:
result = getattr(self, param)
return result
except AttributeError:
result = self.__default(param)
return result
@property
def parent(self):
if self.want.parent != self.have.parent:
raise F5ModuleError(
"The parent monitor cannot be changed"
)
@property
def destination(self):
if self.want.ip is None:
return None
if self.want.destination != self.have.destination:
return self.want.destination
@property
def interval(self):
if self.want.timeout is not None and self.want.interval is not None:
if self.want.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.timeout is not None:
if self.have.interval >= self.want.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
elif self.want.interval is not None:
if self.want.interval >= self.have.timeout:
raise F5ModuleError(
"Parameter 'interval' must be less than 'timeout'."
)
if self.want.interval != self.have.interval:
return self.want.interval
def __default(self, param):
attr1 = getattr(self.want, param)
try:
attr2 = getattr(self.have, param)
if attr1 != attr2:
return attr1
except AttributeError:
return attr1
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = ModuleParameters(params=self.module.params)
self.have = ApiParameters()
self.changes = UsableChanges()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = UsableChanges(params=changed)
def _update_changed_options(self):
diff = Difference(self.want, self.have)
updatables = Parameters.updatables
changed = dict()
for k in updatables:
change = diff.compare(k)
if change is None:
continue
else:
if isinstance(change, dict):
changed.update(change)
else:
changed[k] = change
if changed:
self.changes = UsableChanges(params=changed)
return True
return False
def should_update(self):
result = self._update_changed_options()
if result:
return True
return False
def exec_module(self):
changed = False
result = dict()
state = self.want.state
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
reportable = ReportableChanges(params=self.changes.to_return())
changes = reportable.to_return()
result.update(**changes)
result.update(dict(changed=changed))
self._announce_deprecations(result)
return result
def _announce_deprecations(self, result):
warnings = result.pop('__warnings', [])
for warning in warnings:
self.client.module.deprecate(
msg=warning['msg'],
version=warning['version']
)
def present(self):
if self.exists():
return self.update()
else:
return self.create()
def exists(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-echo/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError:
return False
if resp.status == 404 or 'code' in response and response['code'] == 404:
return False
return True
def update(self):
self.have = self.read_current_from_device()
if not self.should_update():
return False
if self.module.check_mode:
return True
self.update_on_device()
return True
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to delete the resource.")
return True
def create(self):
self._set_changed_options()
self._set_default_creation_values()
if self.module.check_mode:
return True
self.create_on_device()
return True
def _set_default_creation_values(self):
if self.want.timeout is None:
self.want.update({'timeout': 16})
if self.want.interval is None:
self.want.update({'interval': 5})
if self.want.time_until_up is None:
self.want.update({'time_until_up': 0})
if self.want.ip is None:
self.want.update({'ip': '*'})
def create_on_device(self):
params = self.changes.api_params()
params['name'] = self.want.name
params['partition'] = self.want.partition
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-echo/".format(
self.client.provider['server'],
self.client.provider['server_port']
)
resp = self.client.api.post(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] in [400, 403]:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def update_on_device(self):
params = self.changes.api_params()
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-echo/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.patch(uri, json=params)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-echo/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.delete(uri)
if resp.status == 200:
return True
def read_current_from_device(self):
uri = "https://{0}:{1}/mgmt/tm/ltm/monitor/tcp-echo/{2}".format(
self.client.provider['server'],
self.client.provider['server_port'],
transform_name(self.want.partition, self.want.name)
)
resp = self.client.api.get(uri)
try:
response = resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
if 'code' in response and response['code'] == 400:
if 'message' in response:
raise F5ModuleError(response['message'])
else:
raise F5ModuleError(resp.content)
return ApiParameters(params=response)
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
parent=dict(default='/Common/tcp_echo'),
description=dict(),
ip=dict(),
interval=dict(type='int'),
timeout=dict(type='int'),
time_until_up=dict(type='int'),
state=dict(
default='present',
choices=['present', 'absent']
),
partition=dict(
default='Common',
fallback=(env_fallback, ['F5_PARTITION'])
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode,
)
client = F5RestClient(**module.params)
try:
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
exit_json(module, results, client)
except F5ModuleError as ex:
cleanup_tokens(client)
fail_json(module, ex, client)
if __name__ == '__main__':
main()
|
savoirfairelinux/django
|
refs/heads/master
|
django/contrib/gis/geos/collections.py
|
7
|
"""
This module houses the Geometry Collection objects:
GeometryCollection, MultiPoint, MultiLineString, and MultiPolygon
"""
from ctypes import byref, c_int, c_uint
from django.contrib.gis.geos import prototypes as capi
from django.contrib.gis.geos.error import GEOSException
from django.contrib.gis.geos.geometry import GEOSGeometry, LinearGeometryMixin
from django.contrib.gis.geos.libgeos import geos_version_tuple, get_pointer_arr
from django.contrib.gis.geos.linestring import LinearRing, LineString
from django.contrib.gis.geos.point import Point
from django.contrib.gis.geos.polygon import Polygon
class GeometryCollection(GEOSGeometry):
_typeid = 7
def __init__(self, *args, **kwargs):
"Initialize a Geometry Collection from a sequence of Geometry objects."
# Checking the arguments
if len(args) == 1:
# If only one geometry provided or a list of geometries is provided
# in the first argument.
if isinstance(args[0], (tuple, list)):
init_geoms = args[0]
else:
init_geoms = args
else:
init_geoms = args
# Ensuring that only the permitted geometries are allowed in this collection
# this is moved to list mixin super class
self._check_allowed(init_geoms)
# Creating the geometry pointer array.
collection = self._create_collection(len(init_geoms), iter(init_geoms))
super().__init__(collection, **kwargs)
def __iter__(self):
"Iterate over each Geometry in the Collection."
for i in range(len(self)):
yield self[i]
def __len__(self):
"Return the number of geometries in this Collection."
return self.num_geom
# ### Methods for compatibility with ListMixin ###
def _create_collection(self, length, items):
# Creating the geometry pointer array.
geoms = get_pointer_arr(length)
for i, g in enumerate(items):
# this is a little sloppy, but makes life easier
# allow GEOSGeometry types (python wrappers) or pointer types
geoms[i] = capi.geom_clone(getattr(g, 'ptr', g))
return capi.create_collection(c_int(self._typeid), byref(geoms), c_uint(length))
def _get_single_internal(self, index):
return capi.get_geomn(self.ptr, index)
def _get_single_external(self, index):
"Return the Geometry from this Collection at the given index (0-based)."
# Checking the index and returning the corresponding GEOS geometry.
return GEOSGeometry(capi.geom_clone(self._get_single_internal(index)), srid=self.srid)
def _set_list(self, length, items):
"Create a new collection, and destroy the contents of the previous pointer."
prev_ptr = self.ptr
srid = self.srid
self.ptr = self._create_collection(length, items)
if srid:
self.srid = srid
capi.destroy_geom(prev_ptr)
_set_single = GEOSGeometry._set_single_rebuild
_assign_extended_slice = GEOSGeometry._assign_extended_slice_rebuild
@property
def kml(self):
"Return the KML for this Geometry Collection."
return '<MultiGeometry>%s</MultiGeometry>' % ''.join(g.kml for g in self)
@property
def tuple(self):
"Return a tuple of all the coordinates in this Geometry Collection"
return tuple(g.tuple for g in self)
coords = tuple
# MultiPoint, MultiLineString, and MultiPolygon class definitions.
class MultiPoint(GeometryCollection):
_allowed = Point
_typeid = 4
class MultiLineString(LinearGeometryMixin, GeometryCollection):
_allowed = (LineString, LinearRing)
_typeid = 5
@property
def closed(self):
if geos_version_tuple() < (3, 5):
raise GEOSException("MultiLineString.closed requires GEOS >= 3.5.0.")
return super().closed
class MultiPolygon(GeometryCollection):
_allowed = Polygon
_typeid = 6
# Setting the allowed types here since GeometryCollection is defined before
# its subclasses.
GeometryCollection._allowed = (Point, LineString, LinearRing, Polygon, MultiPoint, MultiLineString, MultiPolygon)
|
stevenewey/yotta
|
refs/heads/master
|
yotta/lib/version.py
|
5
|
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
import re
# Semantic Versioning, BSD, Represent and compare version strings, pip install -e git://github.com/autopulated/python-semanticversion.git#egg=semantic_version
import semantic_version
# Parse and match pure version strings and version specifications
#
# Versions:
# "v1.2.3"
# "1.2.3"
# "v1.2.3b1"
# "" (tip)
#
# Version Specifications:
# "1.2.3"
# ">1.2.3"
# "<1.2.3"
# ">=1.2.3"
# "<=1.2.3"
# "*" (any version)
# "" (any version)
#
# For full details see semantic_version documentation
#
class Version(object):
def __init__(self, version_string, url=None):
''' Wrap the semantic_version Version class so that we can represent
'tip' versions as well as specific versions, and store an optional
URL that can represent the location from which we can retrieve this
version.
Also add some useful methods for manipulating versions.
'''
super(Version, self).__init__()
self.url = url
version_string = str(version_string.strip())
# strip of leading v or = characters, these are permitted in npm's
# semver, and npm tags versions as v1.2.3
if version_string.startswith('v') or version_string.startswith('='):
self.version = semantic_version.Version(version_string[1:], partial=False)
elif not version_string:
self.version = 'tip'
else:
self.version = semantic_version.Version(version_string, partial=False)
self.url = url
def isTip(self):
return self.version == 'tip'
def major(self):
return self.version.major
def minor(self):
return self.version.minor
def patch(self):
return self.version.patch
def bump(self, bumptype):
if isinstance(self.version, str):
raise ValueError('cannot bump generic version "%s"' % self.version)
if bumptype == 'major':
self.version.major = self.version.major + 1
self.version.minor = 0
self.version.patch = 0
self.version.prerelease = ''
self.version.build = ''
elif bumptype == 'minor':
self.version.minor = self.version.minor + 1
self.version.patch = 0
self.version.prerelease = ''
self.version.build = ''
elif bumptype == 'patch':
self.version.patch = self.version.patch + 1
self.version.prerelease = ''
self.version.build = ''
else:
raise ValueError('bumptype must be "major", "minor" or "patch"')
self.version.prerelease = None
self.version.build = None
def __str__(self):
return str(self.version)
def __repr__(self):
return 'Version(%s %s)' % (self.version, self.url)
def __cmp__(self, other):
# if the other is an unwrapped version (used within the Spec class)
if isinstance(other, semantic_version.Version):
other_is_specific_ver = True
other_is_unwrapped = True
elif not hasattr(other, 'version'):
return NotImplemented
else:
other_is_specific_ver = isinstance(other.version, semantic_version.Version)
other_is_unwrapped = False
self_is_specific_ver = isinstance(self.version, semantic_version.Version)
if self.version == 'tip' and other_is_specific_ver:
return 1
elif (not other_is_unwrapped) and other.version == 'tip' and self_is_specific_ver:
return -1
elif self_is_specific_ver and other_is_specific_ver:
if other_is_unwrapped:
return semantic_version.Version.__cmp__(self.version, other)
else:
return semantic_version.Version.__cmp__(self.version, other.version)
elif self.version == 'tip' and other.version == 'tip':
raise Exception('Comparing two "tip" versions is undefined')
else:
raise Exception('Unsupported version comparison: "%s" vs. "%s"' % (self.version, other.version))
def __eq__(self, other):
return self.__cmp__(other) == 0
def __hash__(self):
return hash(self.version)
def __ne__(self, other):
return self.__cmp__(other) != 0
def __lt__(self, other):
return self.__cmp__(other) < 0
def __le__(self, other):
return self.__cmp__(other) <= 0
def __gt__(self, other):
return self.__cmp__(other) > 0
def __ge__(self, other):
return self.__cmp__(other) >= 0
# subclass to allow empty specification strings (equivalent to '*')
class Spec(semantic_version.Spec):
def __init__(self, version_spec):
if not version_spec:
version_spec = '*'
# add support for version specs that are unadorned versions, or a
# single equals
if re.match('^[0-9]', version_spec):
version_spec = '==' + version_spec
elif re.match('^=[0-9]', version_spec):
version_spec = '=' + version_spec
# add support for the ~ and ^ version specifiers:
# ~1.2.3 := >=1.2.3-0 <1.3.0-0
# ^1.2.3 := >=1.2.3-0 <2.0.0-0
# ^0.1.2 := 0.1.2 exactly (for 0.x.x versions)
elif re.match('^\^', version_spec):
v = semantic_version.Version(version_spec[1:])
if v.major == 0:
# for 0. releases, ^ means exact version only
version_spec = '==' + str(v)
else:
v2 = Version(version_spec[1:])
v2.bump('major')
version_spec = '>=' + str(v) + ',<' +str(v2)
elif re.match('^~', version_spec):
v = semantic_version.Version(version_spec[1:])
v2 = Version(version_spec[1:])
v2.bump('minor')
version_spec = '>=' + str(v) + ',<' +str(v2)
super(Spec, self).__init__(version_spec)
# base type contains function checks the type, so must replace it
def __contains__(self, version):
return self.match(version)
|
mikewiebe-ansible/ansible
|
refs/heads/devel
|
test/units/modules/storage/netapp/test_na_ontap_vserver_cifs_security.py
|
21
|
# (c) 2019, NetApp, Inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
''' unit test template for ONTAP Ansible module '''
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
import pytest
from units.compat import unittest
from units.compat.mock import patch, Mock
from ansible.module_utils import basic
from ansible.module_utils._text import to_bytes
import ansible.module_utils.netapp as netapp_utils
from ansible.modules.storage.netapp.na_ontap_vserver_cifs_security \
import NetAppONTAPCifsSecurity as cifs_security_module # module under test
if not netapp_utils.has_netapp_lib():
pytestmark = pytest.mark.skip('skipping as missing required netapp_lib')
def set_module_args(args):
"""prepare arguments so that they will be picked up during module creation"""
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args) # pylint: disable=protected-access
class AnsibleExitJson(Exception):
"""Exception class to be raised by module.exit_json and caught by the test case"""
pass
class AnsibleFailJson(Exception):
"""Exception class to be raised by module.fail_json and caught by the test case"""
pass
def exit_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over exit_json; package return data into an exception"""
if 'changed' not in kwargs:
kwargs['changed'] = False
raise AnsibleExitJson(kwargs)
def fail_json(*args, **kwargs): # pylint: disable=unused-argument
"""function to patch over fail_json; package return data into an exception"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
class MockONTAPConnection(object):
''' mock server connection to ONTAP host '''
def __init__(self, kind=None, data=None):
''' save arguments '''
self.type = kind
self.data = data
self.xml_in = None
self.xml_out = None
def invoke_successfully(self, xml, enable_tunneling): # pylint: disable=unused-argument
''' mock invoke_successfully returning xml data '''
self.xml_in = xml
if self.type == 'cifs_security':
xml = self.build_port_info(self.data)
if self.type == 'error':
error = netapp_utils.zapi.NaApiError('test', 'error')
raise error
self.xml_out = xml
return xml
@staticmethod
def build_port_info(cifs_security_details):
''' build xml data for cifs-security '''
xml = netapp_utils.zapi.NaElement('xml')
attributes = {
'num-records': 1,
'attributes-list': {
'cifs-security': {
'is_aes_encryption_enabled': cifs_security_details['is_aes_encryption_enabled'],
'lm_compatibility_level': cifs_security_details['lm_compatibility_level']
}
}
}
xml.translate_struct(attributes)
return xml
class TestMyModule(unittest.TestCase):
''' a group of related Unit Tests '''
def setUp(self):
self.mock_module_helper = patch.multiple(basic.AnsibleModule,
exit_json=exit_json,
fail_json=fail_json)
self.mock_module_helper.start()
self.addCleanup(self.mock_module_helper.stop)
self.mock_cifs_security = {
'is_aes_encryption_enabled': 'true',
'lm_compatibility_level': 'krb'
}
def mock_args(self):
return {
'is_aes_encryption_enabled': self.mock_cifs_security['is_aes_encryption_enabled'],
'lm_compatibility_level': self.mock_cifs_security['lm_compatibility_level'],
'vserver': 'ansible',
'hostname': 'test',
'username': 'test_user',
'password': 'test_pass!',
'https': 'False'
}
def get_cifs_security_mock_object(self, kind=None):
"""
Helper method to return an na_ontap_vserver_cifs_security object
:param kind: passes this param to MockONTAPConnection()
:return: na_ontap_vserver_cifs_security object
"""
obj = cifs_security_module()
obj.asup_log_for_cserver = Mock(return_value=None)
obj.server = Mock()
obj.server.invoke_successfully = Mock()
if kind is None:
obj.server = MockONTAPConnection()
else:
obj.server = MockONTAPConnection(kind=kind, data=self.mock_cifs_security)
return obj
@patch('ansible.modules.storage.netapp.na_ontap_vserver_cifs_security.NetAppONTAPCifsSecurity.cifs_security_get_iter')
def test_successful_modify(self, get_cifs_security):
''' Test successful modify max throughput '''
data = self.mock_args()
set_module_args(data)
current = {
'is_aes_encryption_enabled': False,
'lm_compatibility_level': 'lm_ntlm_ntlmv2_krb'
}
get_cifs_security.side_effect = [
current
]
with pytest.raises(AnsibleExitJson) as exc:
self.get_cifs_security_mock_object('cifs_security').apply()
assert exc.value.args[0]['changed']
@patch('ansible.modules.storage.netapp.na_ontap_vserver_cifs_security.NetAppONTAPCifsSecurity.cifs_security_get_iter')
def test_modify_error(self, get_cifs_security):
''' Test create idempotency '''
data = self.mock_args()
set_module_args(data)
current = {
'is_aes_encryption_enabled': False
}
get_cifs_security.side_effect = [
current
]
with pytest.raises(AnsibleFailJson) as exc:
self.get_cifs_security_mock_object('error').apply()
assert exc.value.args[0]['msg'] == 'Error modifying cifs security on ansible: NetApp API failed. Reason - test:error'
|
rwakulszowa/servo
|
refs/heads/master
|
tests/wpt/css-tests/tools/manifest/tests/test_manifest.py
|
59
|
import platform
import os
import mock
import hypothesis as h
import hypothesis.strategies as hs
import pytest
from .. import manifest, item, sourcefile, utils
def SourceFileWithTest(path, hash, cls, *args):
s = mock.Mock(rel_path=path, hash=hash)
test = cls(s, utils.rel_path_to_url(path), *args)
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
@hs.composite
def rel_dir_file_path(draw):
length = draw(hs.integers(min_value=1, max_value=20))
if length == 1:
return "a"
else:
remaining = length - 2
if os.path.sep == "/":
alphabet = "a/"
elif os.path.sep == "\\":
alphabet = "a/\\"
else:
assert False, "uhhhh, this platform is weird"
mid = draw(hs.text(alphabet=alphabet, min_size=remaining, max_size=remaining))
return os.path.normcase("a" + mid + "a")
@hs.composite
def sourcefile_strategy(draw):
item_classes = [item.TestharnessTest, item.RefTest, item.RefTestNode,
item.ManualTest, item.Stub, item.WebdriverSpecTest,
item.ConformanceCheckerTest, item.SupportFile]
cls = draw(hs.sampled_from(item_classes))
path = draw(rel_dir_file_path())
hash = draw(hs.text(alphabet="0123456789abcdef", min_size=40, max_size=40))
s = mock.Mock(rel_path=path, hash=hash)
if cls in (item.RefTest, item.RefTestNode):
ref_path = draw(rel_dir_file_path())
h.assume(path != ref_path)
ref_eq = draw(hs.sampled_from(["==", "!="]))
test = cls(s, utils.rel_path_to_url(path), [(utils.rel_path_to_url(ref_path), ref_eq)])
elif cls is item.SupportFile:
test = cls(s)
else:
test = cls(s, utils.rel_path_to_url(path))
s.manifest_items = mock.Mock(return_value=(cls.item_type, [test]))
return s
@h.given(hs.lists(sourcefile_strategy(),
min_size=1, average_size=10, max_size=1000,
unique_by=lambda x: x.rel_path))
@h.example([SourceFileWithTest("a", "0"*40, item.ConformanceCheckerTest)])
def test_manifest_to_json(s):
m = manifest.Manifest()
assert m.update(s) is True
json_str = m.to_json()
loaded = manifest.Manifest.from_json("/", json_str)
assert list(loaded) == list(m)
assert loaded.to_json() == json_str
@h.given(hs.lists(sourcefile_strategy(),
min_size=1, average_size=10,
unique_by=lambda x: x.rel_path))
@h.example([SourceFileWithTest("a", "0"*40, item.TestharnessTest)])
@h.example([SourceFileWithTest("a", "0"*40, item.RefTest, [("/aa", "==")])])
def test_manifest_idempotent(s):
m = manifest.Manifest()
assert m.update(s) is True
m1 = list(m)
assert m.update(s) is False
assert list(m) == m1
def test_manifest_to_json_forwardslash():
m = manifest.Manifest()
s = SourceFileWithTest("a/b", "0"*40, item.TestharnessTest)
assert m.update([s]) is True
assert m.to_json() == {
'paths': {
'a/b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 4,
'url_base': '/',
'items': {
'reftest': {},
'reftest_node': {},
'testharness': {
'a/b': [['/a/b', {}]]
}
}
}
def test_manifest_to_json_backslash():
m = manifest.Manifest()
s = SourceFileWithTest("a\\b", "0"*40, item.TestharnessTest)
if os.path.sep == "\\":
assert m.update([s]) is True
assert m.to_json() == {
'paths': {
'a/b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 4,
'url_base': '/',
'items': {
'reftest': {},
'reftest_node': {},
'testharness': {
'a/b': [['/a/b', {}]]
}
}
}
else:
with pytest.raises(ValueError):
# one of these must raise ValueError
# the first must return True if it doesn't raise
assert m.update([s]) is True
m.to_json()
def test_manifest_from_json_backslash():
json_obj = {
'paths': {
'a\\b': ('0000000000000000000000000000000000000000', 'testharness')
},
'version': 4,
'url_base': '/',
'items': {
'reftest': {},
'reftest_node': {},
'testharness': {
'a\\b': [['/a/b', {}]]
}
}
}
with pytest.raises(ValueError):
manifest.Manifest.from_json("/", json_obj)
def test_reftest_computation_chain():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
m.update([s1, s2])
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
def test_reftest_computation_chain_update_add():
m = manifest.Manifest()
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
test2 = s2.manifest_items()[1][0]
assert m.update([s2]) is True
assert list(m) == [("reftest", test2.path, {test2})]
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
test1 = s1.manifest_items()[1][0]
# s2's hash is unchanged, but it has gone from a test to a node
assert m.update([s1, s2]) is True
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
def test_reftest_computation_chain_update_remove():
m = manifest.Manifest()
s1 = SourceFileWithTest("test1", "0"*40, item.RefTest, [("/test2", "==")])
s2 = SourceFileWithTest("test2", "0"*40, item.RefTest, [("/test3", "==")])
assert m.update([s1, s2]) is True
test1 = s1.manifest_items()[1][0]
test2 = s2.manifest_items()[1][0]
test2_node = test2.to_RefTestNode()
assert list(m) == [("reftest", test1.path, {test1}),
("reftest_node", test2.path, {test2_node})]
# s2's hash is unchanged, but it has gone from a node to a test
assert m.update([s2]) is True
assert list(m) == [("reftest", test2.path, {test2})]
|
simone/django-gb
|
refs/heads/master
|
tests/test_runner/test_discover_runner.py
|
10
|
from contextlib import contextmanager
import os
from unittest import TestSuite, TextTestRunner, defaultTestLoader
from django.test import TestCase
from django.test.runner import DiscoverRunner
@contextmanager
def change_cwd(directory):
current_dir = os.path.abspath(os.path.dirname(__file__))
new_dir = os.path.join(current_dir, directory)
old_cwd = os.getcwd()
os.chdir(new_dir)
try:
yield
finally:
os.chdir(old_cwd)
class DiscoverRunnerTest(TestCase):
def test_dotted_test_module(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample"],
).countTestCases()
self.assertEqual(count, 2)
def test_dotted_test_class_vanilla_unittest(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestVanillaUnittest"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_class_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase"],
).countTestCases()
self.assertEqual(count, 1)
def test_dotted_test_method_django_testcase(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.TestDjangoTestCase.test_sample"],
).countTestCases()
self.assertEqual(count, 1)
def test_pattern(self):
count = DiscoverRunner(
pattern="*_tests.py",
).build_suite(["test_discovery_sample"]).countTestCases()
self.assertEqual(count, 1)
def test_file_path(self):
with change_cwd(".."):
count = DiscoverRunner().build_suite(
["test_discovery_sample/"],
).countTestCases()
self.assertEqual(count, 3)
def test_empty_label(self):
"""
If the test label is empty, discovery should happen on the current
working directory.
"""
with change_cwd("."):
suite = DiscoverRunner().build_suite([])
self.assertEqual(
suite._tests[0].id().split(".")[0],
os.path.basename(os.getcwd()),
)
def test_empty_test_case(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests_sample.EmptyTestCase"],
).countTestCases()
self.assertEqual(count, 0)
def test_discovery_on_package(self):
count = DiscoverRunner().build_suite(
["test_discovery_sample.tests"],
).countTestCases()
self.assertEqual(count, 1)
def test_ignore_adjacent(self):
"""
When given a dotted path to a module, unittest discovery searches
not just the module, but also the directory containing the module.
This results in tests from adjacent modules being run when they
should not. The discover runner avoids this behavior.
"""
count = DiscoverRunner().build_suite(
["test_discovery_sample.empty"],
).countTestCases()
self.assertEqual(count, 0)
def test_overrideable_test_suite(self):
self.assertEqual(DiscoverRunner().test_suite, TestSuite)
def test_overrideable_test_runner(self):
self.assertEqual(DiscoverRunner().test_runner, TextTestRunner)
def test_overrideable_test_loader(self):
self.assertEqual(DiscoverRunner().test_loader, defaultTestLoader)
|
philpot/tocayo
|
refs/heads/master
|
tocayoproj/tocayoapp/tests.py
|
24123
|
from django.test import TestCase
# Create your tests here.
|
osvalr/odoo
|
refs/heads/8.0
|
addons/sale_stock/res_config.py
|
331
|
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import openerp
from openerp import SUPERUSER_ID
from openerp.osv import fields, osv
from openerp.tools.translate import _
class sale_configuration(osv.osv_memory):
_inherit = 'sale.config.settings'
_columns = {
'group_invoice_deli_orders': fields.boolean('Generate invoices after and based on delivery orders',
implied_group='sale_stock.group_invoice_deli_orders',
help="To allow your salesman to make invoices for Delivery Orders using the menu 'Deliveries to Invoice'."),
'task_work': fields.boolean("Prepare invoices based on task's activities",
help='Lets you transfer the entries under tasks defined for Project Management to '
'the Timesheet line entries for particular date and particular user with the effect of creating, editing and deleting either ways '
'and to automatically creates project tasks from procurement lines.\n'
'-This installs the modules project_timesheet and sale_service.'),
'default_order_policy': fields.selection(
[('manual', 'Invoice based on sales orders'), ('picking', 'Invoice based on deliveries')],
'The default invoicing method is', default_model='sale.order',
help="You can generate invoices based on sales orders or based on shippings."),
'module_delivery': fields.boolean('Allow adding shipping costs',
help='Allows you to add delivery methods in sales orders and delivery orders.\n'
'You can define your own carrier and delivery grids for prices.\n'
'-This installs the module delivery.'),
'default_picking_policy' : fields.boolean("Deliver all at once when all products are available.",
help = "Sales order by default will be configured to deliver all products at once instead of delivering each product when it is available. This may have an impact on the shipping price."),
'group_mrp_properties': fields.boolean('Product properties on order lines',
implied_group='sale.group_mrp_properties',
help="Allows you to tag sales order lines with properties."),
'module_project_timesheet': fields.boolean("Project Timesheet"),
'module_sale_service': fields.boolean("Sale Service"),
'group_route_so_lines': fields.boolean('Choose MTO, drop shipping,... on sales order lines',
implied_group='sale_stock.group_route_so_lines',
help="Allows you to choose a delivery route on sales order lines"),
}
_defaults = {
'default_order_policy': 'manual',
}
def default_get(self, cr, uid, fields, context=None):
res = super(sale_configuration, self).default_get(cr, uid, fields, context)
# task_work, time_unit depend on other fields
res['task_work'] = res.get('module_sale_service') and res.get('module_project_timesheet')
return res
def get_default_sale_config(self, cr, uid, ids, context=None):
ir_values = self.pool.get('ir.values')
default_picking_policy = ir_values.get_default(cr, uid, 'sale.order', 'picking_policy')
return {
'default_picking_policy': default_picking_policy == 'one',
}
def set_sale_defaults(self, cr, uid, ids, context=None):
if uid != SUPERUSER_ID and not self.pool['res.users'].has_group(cr, uid, 'base.group_erp_manager'):
raise openerp.exceptions.AccessError(_("Only administrators can change the settings"))
ir_values = self.pool.get('ir.values')
wizard = self.browse(cr, uid, ids)[0]
default_picking_policy = 'one' if wizard.default_picking_policy else 'direct'
ir_values.set_default(cr, SUPERUSER_ID, 'sale.order', 'picking_policy', default_picking_policy)
res = super(sale_configuration, self).set_sale_defaults(cr, uid, ids, context)
return res
def onchange_invoice_methods(self, cr, uid, ids, group_invoice_so_lines, group_invoice_deli_orders, context=None):
if not group_invoice_deli_orders:
return {'value': {'default_order_policy': 'manual'}}
if not group_invoice_so_lines:
return {'value': {'default_order_policy': 'picking'}}
return {}
|
chromium/chromium
|
refs/heads/master
|
third_party/google-closure-library/closure/bin/build/depswriter_test.py
|
11
|
#!/usr/bin/env python
#
# Copyright 2010 The Closure Library Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit test for depswriter."""
__author__ = 'johnlenz@google.com (John Lenz)'
import unittest
import depswriter
class MockSource(object):
"""Mock Source file."""
def __init__(self, provides, requires, is_goog_module=False):
self.provides = set(provides)
self.requires = set(requires)
self.is_goog_module = is_goog_module
def __repr__(self):
return 'MockSource %s' % self.provides
class DepsWriterTestCase(unittest.TestCase):
"""Unit test for depswriter."""
def testMakeDepsFile(self):
sources = {}
sources['test.js'] = MockSource(['A'], ['B', 'C'])
deps = depswriter.MakeDepsFile(sources)
self.assertEqual(
'goog.addDependency(\'test.js\', [\'A\'], [\'B\', \'C\'], {});\n',
deps)
def testMakeDepsFileUnicode(self):
sources = {}
sources['test.js'] = MockSource([u'A'], [u'B', u'C'])
deps = depswriter.MakeDepsFile(sources)
self.assertEqual(
'goog.addDependency(\'test.js\', [\'A\'], [\'B\', \'C\'], {});\n',
deps)
def testMakeDepsFileModule(self):
sources = {}
sources['test.js'] = MockSource(['A'], ['B', 'C'], True)
deps = depswriter.MakeDepsFile(sources)
self.assertEqual(
"goog.addDependency('test.js', "
"['A'], ['B', 'C'], {'module': 'goog'});\n",
deps)
if __name__ == '__main__':
unittest.main()
|
stianjensen/django-rest-framework
|
refs/heads/master
|
tests/test_urlpatterns.py
|
76
|
from __future__ import unicode_literals
from collections import namedtuple
from django.conf.urls import include, url
from django.core import urlresolvers
from django.test import TestCase
from rest_framework.test import APIRequestFactory
from rest_framework.urlpatterns import format_suffix_patterns
# A container class for test paths for the test case
URLTestPath = namedtuple('URLTestPath', ['path', 'args', 'kwargs'])
def dummy_view(request, *args, **kwargs):
pass
class FormatSuffixTests(TestCase):
"""
Tests `format_suffix_patterns` against different URLPatterns to ensure the
URLs still resolve properly, including any captured parameters.
"""
def _resolve_urlpatterns(self, urlpatterns, test_paths):
factory = APIRequestFactory()
try:
urlpatterns = format_suffix_patterns(urlpatterns)
except Exception:
self.fail("Failed to apply `format_suffix_patterns` on the supplied urlpatterns")
resolver = urlresolvers.RegexURLResolver(r'^/', urlpatterns)
for test_path in test_paths:
request = factory.get(test_path.path)
try:
callback, callback_args, callback_kwargs = resolver.resolve(request.path_info)
except Exception:
self.fail("Failed to resolve URL: %s" % request.path_info)
self.assertEqual(callback_args, test_path.args)
self.assertEqual(callback_kwargs, test_path.kwargs)
def test_trailing_slash(self):
factory = APIRequestFactory()
urlpatterns = format_suffix_patterns([
url(r'^test/$', dummy_view),
])
resolver = urlresolvers.RegexURLResolver(r'^/', urlpatterns)
test_paths = [
(URLTestPath('/test.api', (), {'format': 'api'}), True),
(URLTestPath('/test/.api', (), {'format': 'api'}), False),
(URLTestPath('/test.api/', (), {'format': 'api'}), True),
]
for test_path, expected_resolved in test_paths:
request = factory.get(test_path.path)
try:
callback, callback_args, callback_kwargs = resolver.resolve(request.path_info)
except urlresolvers.Resolver404:
callback, callback_args, callback_kwargs = (None, None, None)
if not expected_resolved:
assert callback is None
continue
assert callback_args == test_path.args
assert callback_kwargs == test_path.kwargs
def test_format_suffix(self):
urlpatterns = [
url(r'^test$', dummy_view),
]
test_paths = [
URLTestPath('/test', (), {}),
URLTestPath('/test.api', (), {'format': 'api'}),
URLTestPath('/test.asdf', (), {'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_default_args(self):
urlpatterns = [
url(r'^test$', dummy_view, {'foo': 'bar'}),
]
test_paths = [
URLTestPath('/test', (), {'foo': 'bar', }),
URLTestPath('/test.api', (), {'foo': 'bar', 'format': 'api'}),
URLTestPath('/test.asdf', (), {'foo': 'bar', 'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
def test_included_urls(self):
nested_patterns = [
url(r'^path$', dummy_view)
]
urlpatterns = [
url(r'^test/', include(nested_patterns), {'foo': 'bar'}),
]
test_paths = [
URLTestPath('/test/path', (), {'foo': 'bar', }),
URLTestPath('/test/path.api', (), {'foo': 'bar', 'format': 'api'}),
URLTestPath('/test/path.asdf', (), {'foo': 'bar', 'format': 'asdf'}),
]
self._resolve_urlpatterns(urlpatterns, test_paths)
|
great-expectations/great_expectations
|
refs/heads/develop
|
great_expectations/expectations/metrics/column_aggregate_metrics/column_values_between_count.py
|
1
|
from typing import Any, Dict, Tuple
import numpy as np
from great_expectations.core.util import get_sql_dialect_floating_point_infinity_value
from great_expectations.execution_engine import (
PandasExecutionEngine,
SparkDFExecutionEngine,
)
from great_expectations.execution_engine.execution_engine import MetricDomainTypes
from great_expectations.execution_engine.sqlalchemy_execution_engine import (
SqlAlchemyExecutionEngine,
)
from great_expectations.expectations.metrics.import_manager import sa
from great_expectations.expectations.metrics.metric_provider import (
MetricProvider,
metric_value,
)
class ColumnValuesBetweenCount(MetricProvider):
"""This metric is an aggregate helper for rare cases."""
metric_name = "column_values.between.count"
value_keys = (
"min_value",
"max_value",
"strict_min",
"strict_max",
)
@metric_value(engine=PandasExecutionEngine)
def _pandas(
cls,
execution_engine: PandasExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
min_value = metric_value_kwargs.get("min_value")
max_value = metric_value_kwargs.get("max_value")
strict_min = metric_value_kwargs.get("strict_min")
strict_max = metric_value_kwargs.get("strict_max")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
(
df,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
val = df[accessor_domain_kwargs["column"]]
if min_value is not None and max_value is not None:
if strict_min and strict_max:
series = (min_value < val) and (val < max_value)
elif strict_min:
series = (min_value < val) and (val <= max_value)
elif strict_max:
series = (min_value <= val) and (val < max_value)
else:
series = (min_value <= val) and (val <= max_value)
elif min_value is None and max_value is not None:
if strict_max:
series = val < max_value
else:
series = val <= max_value
elif min_value is not None and max_value is None:
if strict_min:
series = min_value < val
else:
series = min_value <= val
else:
raise ValueError("unable to parse domain and value kwargs")
return np.count_nonzero(series)
@metric_value(engine=SqlAlchemyExecutionEngine)
def _sqlalchemy(
cls,
execution_engine: SqlAlchemyExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
min_value = metric_value_kwargs.get("min_value")
max_value = metric_value_kwargs.get("max_value")
strict_min = metric_value_kwargs.get("strict_min")
strict_max = metric_value_kwargs.get("strict_max")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
dialect_name = execution_engine.engine.dialect.name.lower()
if (
min_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
min_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
min_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=True
)
if (
min_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
min_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
min_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=False
)
if (
max_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=True
)
) or (
max_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=True
)
):
max_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=True
)
if (
max_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_np", negative=False
)
) or (
max_value
== get_sql_dialect_floating_point_infinity_value(
schema="api_cast", negative=False
)
):
max_value = get_sql_dialect_floating_point_infinity_value(
schema=dialect_name, negative=False
)
(
selectable,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
column = sa.column(accessor_domain_kwargs["column"])
if min_value is None:
if strict_max:
condition = column < max_value
else:
condition = column <= max_value
elif max_value is None:
if strict_min:
condition = column > min_value
else:
condition = column >= min_value
else:
if strict_min and strict_max:
condition = sa.and_(column > min_value, column < max_value)
elif strict_min:
condition = sa.and_(column > min_value, column <= max_value)
elif strict_max:
condition = sa.and_(column >= min_value, column < max_value)
else:
condition = sa.and_(column >= min_value, column <= max_value)
return execution_engine.engine.execute(
sa.select([sa.func.count()]).select_from(selectable).where(condition)
).scalar()
@metric_value(engine=SparkDFExecutionEngine)
def _spark(
cls,
execution_engine: SparkDFExecutionEngine,
metric_domain_kwargs: Dict,
metric_value_kwargs: Dict,
metrics: Dict[Tuple, Any],
runtime_configuration: Dict,
):
min_value = metric_value_kwargs.get("min_value")
max_value = metric_value_kwargs.get("max_value")
strict_min = metric_value_kwargs.get("strict_min")
strict_max = metric_value_kwargs.get("strict_max")
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
(
df,
compute_domain_kwargs,
accessor_domain_kwargs,
) = execution_engine.get_compute_domain(
domain_kwargs=metric_domain_kwargs, domain_type=MetricDomainTypes.COLUMN
)
column = df[accessor_domain_kwargs["column"]]
if min_value is not None and max_value is not None and min_value > max_value:
raise ValueError("min_value cannot be greater than max_value")
if min_value is None and max_value is None:
raise ValueError("min_value and max_value cannot both be None")
if min_value is None:
if strict_max:
condition = column < max_value
else:
condition = column <= max_value
elif max_value is None:
if strict_min:
condition = column > min_value
else:
condition = column >= min_value
else:
if strict_min and strict_max:
condition = (column > min_value) & (column < max_value)
elif strict_min:
condition = (column > min_value) & (column <= max_value)
elif strict_max:
condition = (column >= min_value) & (column < max_value)
else:
condition = (column >= min_value) & (column <= max_value)
return df.filter(condition).count()
|
dohoangkhiem/uwsgi
|
refs/heads/master
|
examples/uwsgirouter5.py
|
21
|
import uwsgi
fd = uwsgi.connect("127.0.0.1:3033")
def application(e, s):
for part in uwsgi.send_message(fd, 0, 4, e, 30, e['wsgi.input'].fileno(), uwsgi.cl()):
yield part
|
AutorestCI/azure-sdk-for-python
|
refs/heads/master
|
azure-mgmt-iothubprovisioningservices/azure/mgmt/iothubprovisioningservices/operations/__init__.py
|
2
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .operations import Operations
from .dps_certificate_operations import DpsCertificateOperations
from .iot_dps_resource_operations import IotDpsResourceOperations
from .dps_certificates_operations import DpsCertificatesOperations
__all__ = [
'Operations',
'DpsCertificateOperations',
'IotDpsResourceOperations',
'DpsCertificatesOperations',
]
|
initcron/ansible
|
refs/heads/devel
|
plugins/inventory/nova.py
|
40
|
#!/usr/bin/env python
# (c) 2012, Marco Vito Moscaritolo <marco@agavee.com>
#
# This file is part of Ansible,
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
import sys
import re
import os
import ConfigParser
from novaclient import client as nova_client
try:
import json
except:
import simplejson as json
from ansible.module_utils.openstack import *
###################################################
# executed with no parameters, return the list of
# all groups and hosts
NOVA_CONFIG_FILES = [os.getcwd() + "/nova.ini",
os.path.expanduser(os.environ.get('ANSIBLE_CONFIG', "~/nova.ini")),
"/etc/ansible/nova.ini"]
NOVA_DEFAULTS = {
'auth_system': None,
'region_name': None,
}
def nova_load_config_file():
p = ConfigParser.SafeConfigParser(NOVA_DEFAULTS)
for path in NOVA_CONFIG_FILES:
if os.path.exists(path):
p.read(path)
return p
return None
config = nova_load_config_file()
if not config:
sys.exit('Unable to find configfile in %s' % ', '.join(NOVA_CONFIG_FILES))
client = nova_client.Client(
config.get('openstack', 'version'),
config.get('openstack', 'username'),
config.get('openstack', 'api_key'),
config.get('openstack', 'project_id'),
config.get('openstack', 'auth_url'),
region_name = config.get('openstack', 'region_name'),
auth_system = config.get('openstack', 'auth_system')
)
if len(sys.argv) == 2 and (sys.argv[1] == '--list'):
groups = {}
# Cycle on servers
for server in client.servers.list():
private = openstack_find_nova_addresses(getattr(server, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(server, 'addresses'), 'floating', 'public')
# Define group (or set to empty string)
group = server.metadata['group'] if server.metadata.has_key('group') else 'undefined'
# Create group if not exist
if group not in groups:
groups[group] = []
# Append group to list
if server.accessIPv4:
groups[group].append(server.accessIPv4)
continue
if public:
groups[group].append(''.join(public))
continue
if private:
groups[group].append(''.join(private))
continue
# Return server list
print(json.dumps(groups, sort_keys=True, indent=2))
sys.exit(0)
#####################################################
# executed with a hostname as a parameter, return the
# variables for that host
elif len(sys.argv) == 3 and (sys.argv[1] == '--host'):
results = {}
ips = []
for instance in client.servers.list():
private = openstack_find_nova_addresses(getattr(instance, 'addresses'), 'fixed', 'private')
public = openstack_find_nova_addresses(getattr(instance, 'addresses'), 'floating', 'public')
ips.append( instance.accessIPv4)
ips.append(''.join(private))
ips.append(''.join(public))
if sys.argv[2] in ips:
for key in vars(instance):
# Extract value
value = getattr(instance, key)
# Generate sanitized key
key = 'os_' + re.sub("[^A-Za-z0-9\-]", "_", key).lower()
# Att value to instance result (exclude manager class)
#TODO: maybe use value.__class__ or similar inside of key_name
if key != 'os_manager':
results[key] = value
print(json.dumps(results, sort_keys=True, indent=2))
sys.exit(0)
else:
print "usage: --list ..OR.. --host <hostname>"
sys.exit(1)
|
mishbahr/djangocms-forms
|
refs/heads/master
|
djangocms_forms/migrations/0004_redirect_delay.py
|
1
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('djangocms_forms', '0003_add_referrer_field'),
]
operations = [
migrations.AddField(
model_name='formdefinition',
name='redirect_delay',
field=models.PositiveIntegerField(verbose_name='Redirect Delay', blank=True, null=True, help_text="Wait this number of milliseconds before redirecting. 1000 milliseconds = 1 second."),
),
]
|
lamarmeigs/django-clean-fields
|
refs/heads/master
|
tests/__init__.py
|
17
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
|
arguman/arguman.org
|
refs/heads/master
|
web/newsfeed/management/commands/create_initial_newsfeed.py
|
7
|
from django.core.management import BaseCommand
from newsfeed.models import Entry
from premises.models import Contention
class Command(BaseCommand):
def handle(self, *args, **options):
for contention in Contention.objects.all():
Entry.objects.create(
object_id=contention.id,
news_type=contention.get_newsfeed_type(),
sender=contention.get_actor(),
related_object=contention.get_newsfeed_bundle(),
date_creation=contention.date_creation
)
|
bjlittle/python-csp
|
refs/heads/master
|
csp/lint/channels.py
|
3
|
#!/usr/bin/env python
"""
Check that every process in a file has correct readsets and writesets.
Copyright (C) Sarah Mount, 2010.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import compiler
import compiler.ast as ast
import compiler.visitor as visitor
import exstatic.cspwarnings
__author__ = 'Sarah Mount <s.mount@wlv.ac.uk>'
__date__ = 'April 2010'
__all__ = ['ChannelChecker']
class ChannelChecker(visitor.ASTVisitor):
"""Check that documented readsets and writesets are correct
w.r.t. code.
"""
def __init__(self, filename):
visitor.ASTVisitor.__init__(self)
self.filename = filename
self.current_process = ''
self.current_process_lineno = 0
self.writeset = {}
self.readset = {}
self.readset_lineno = 0
self.writeset_lineno = 0
return
def extract_sets(self, doc):
"""Extract the readset and writeset from function
documentation.
"""
readset = []
writeset = []
has_readset = False
has_writeset = False
lineno = 0
if doc is not None:
for line in doc.split('\n'):
lineno += 1
words = line.strip().split('=')
if words is not None:
if words[0].strip() == 'readset':
has_readset = True
self.readset_lineno += lineno
chans = words[1].strip().split(',')
readset = [y for y in [x.strip() for x in chans] if y is not '']
elif words[0].strip() == 'writeset':
has_writeset = True
self.writeset_lineno += lineno
chans = words[1].strip().split(',')
writeset = [y for y in [x.strip() for x in chans] if y is not '']
# 'W002':'No readset given in documentation.'
if not has_readset:
exstatic.cspwarnings.create_error(self.filename,
self.readset_lineno,
self.current_process,
'W002')
# 'W003':'No writeset given in documentation.'
if not has_writeset:
exstatic.cspwarnings.create_error(self.filename,
self.writeset_lineno,
self.current_process,
'W003')
return set(readset), set(writeset)
def is_process(self, decorators):
"""Determine whether or not the current function is a CSP
process.
"""
for decorator in decorators:
if (decorator.name == 'process' or decorator.name == 'forever'):
return True
return False
def check_sets(self, readset, writeset):
"""Check that the documented readset and writeset of the
current function match the code inside the function
definition.
@param readset the documented readset of the current process
@param writeset the documented writeset of the current process
"""
# 'W001':'Channel in both readset and writeset.'
if len(readset.intersection(writeset)) > 0:
exstatic.cspwarnings.create_error(self.filename,
self.readset_lineno,
self.current_process,
'W001')
# 'E004':'Channel appears in documented readset but not read
# from in function body.'
diff = set(self.readset.values()).difference(readset)
for channel in diff:
exstatic.cspwarnings.create_error(self.filename,
self.readset_lineno,
self.current_process,
'E004')
# 'E005':'Channel is read from in function body but does not
# appear in documented readset'
diff = set(readset).difference(list(self.readset.values()))
for channel in diff:
for key in self.readset:
exstatic.cspwarnings.create_error(self.filename,
key,
self.current_process,
'E005')
# 'E006':'Channel appears in documented writeset but not
# written to in function body.'
diff = set(self.writeset.values()).difference(writeset)
for channel in diff:
exstatic.cspwarnings.create_error(self.filename,
self.writeset_lineno,
self.current_process,
'E006')
# 'E007':'Channel is written to in function body but does not
# appear in documented writeset'
diff = set(writeset).difference(list(self.writeset.values()))
for channel in diff:
for key in self.writeset:
exstatic.cspwarnings.create_error(self.filename,
key,
self.current_process,
'E007')
return
def visitFunction(self, node):
"""Visit function definition.
"""
# If this function definition is not a CSP process, ignore it.
if (node.decorators is None or
self.is_process(node.decorators) is None):
return
# Store useful information about this process.
self.current_process = node.name
self.current_process_lineno = node.lineno
self.readset_lineno, self.writeset_lineno = node.lineno, node.lineno
readset, writeset = self.extract_sets(node.doc)
# 'E002':'Channel in readset is not a formal parameter to this
# process.',
for channel in readset:
if not channel in node.argnames:
exstatic.cspwarnings.create_error(self.filename,
self.readset_lineno,
node.name,
'E002')
# 'E003':'Channel in writeset is not a formal parameter to
# this process.',
for channel in writeset:
if not channel in node.argnames:
exstatic.cspwarnings.create_error(self.filename,
self.writeset_lineno,
node.name,
'E003')
# Ensure that we visit every statement inside this fuction.
for stmt in node.code:
self.visit(stmt)
# Check the documented readset and writeset against actual
# method calls within the function.
self.check_sets(readset, writeset)
# Remove information held about this function.
self.current_process = ''
self.current_process_lineno = 0
self.writeset = {}
self.readset = {}
return
def visitCallFunc(self, node):
"""Visit function call.
TODO: Deal with Alt and Barrier types.
"""
callee = node.node
if isinstance(callee, ast.Getattr):
if not isinstance(callee.expr, ast.Getattr):
# Catch all calls to channel read().
if callee.attrname == 'read':
self.readset[callee.lineno] = callee.expr.name
# Catch all calls to channel write()
elif callee.attrname == 'write':
self.writeset[callee.lineno] = callee.expr.name
return
if __name__ == '__main__':
import sys
lint = ChannelChecker(sys.argv[1])
compiler.walk(compiler.parseFile(sys.argv[1]),
lint,
walker=lint,
verbose=5)
exstatic.cspwarnings.print_errors(excluded=[])
|
harlowja/urwid
|
refs/heads/master
|
docs/tutorial/qa.py
|
22
|
import urwid
def exit_on_q(key):
if key in ('q', 'Q'):
raise urwid.ExitMainLoop()
class QuestionBox(urwid.Filler):
def keypress(self, size, key):
if key != 'enter':
return super(QuestionBox, self).keypress(size, key)
self.original_widget = urwid.Text(
u"Nice to meet you,\n%s.\n\nPress Q to exit." %
edit.edit_text)
edit = urwid.Edit(u"What is your name?\n")
fill = QuestionBox(edit)
loop = urwid.MainLoop(fill, unhandled_input=exit_on_q)
loop.run()
|
xiangel/hue
|
refs/heads/master
|
desktop/core/ext-py/Django-1.6.10/tests/view_tests/app3/__init__.py
|
9480
|
#
|
jgmanzanas/CMNT_004_15
|
refs/heads/master
|
project-addons/product_pack/sale.py
|
1
|
# -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Comunitea All Rights Reserved
# $Jesús Ventosinos Mayor <jesus@comunitea.com>$
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields
from openerp import api, models
class sale_order_line(models.Model):
_inherit = 'sale.order.line'
_columns = {
'pack_depth': fields.integer(
'Depth', required=True,
help='Depth of the product if it is part of a pack.'
),
'pack_parent_line_id': fields.many2one(
'sale.order.line', 'Pack',
help='The pack that contains this product.', ondelete="cascade"
),
'pack_child_line_ids': fields.one2many(
'sale.order.line', 'pack_parent_line_id', 'Lines in pack'),
}
_defaults = {
'pack_depth': 0,
}
def invoice_line_create(self, cr, uid, ids, context=None):
no_pack_ids = []
for line in self.browse(cr, uid, ids, context):
if not line.pack_depth > 0:
no_pack_ids.append(line.id)
return super(sale_order_line, self).invoice_line_create(cr, uid, no_pack_ids, context)
@api.multi
def write(self, vals):
res = super(sale_order_line, self).write(vals)
for line in self:
line.refresh()
if line.pack_child_line_ids and (not line.product_id or not line.
product_id.pack_line_ids):
for cline in line.pack_child_line_ids:
cline.pack_depth = 0
cline.pack_parent_line_id = False
return res
@api.multi
def pack_in_moves(self, product_ids):
is_in_list = True
for child in self.pack_child_line_ids:
if child.pack_child_line_ids:
if not child.pack_in_moves(product_ids):
is_in_list = False
else:
if child.product_id.id not in product_ids:
is_in_list = False
return is_in_list
class sale_order(models.Model):
_inherit = 'sale.order'
def create(self, cr, uid, vals, context=None):
result = super(sale_order, self).create(cr, uid, vals, context)
self.expand_packs(cr, uid, [result], context)
return result
def write(self, cr, uid, ids, vals, context=None):
result = super(sale_order, self).write(cr, uid, ids, vals, context)
self.expand_packs(cr, uid, ids, context)
return result
def copy(self, cr, uid, id, default={}, context=None):
line_obj = self.pool.get('sale.order.line')
result = super(sale_order, self).copy(cr, uid, id, default, context)
sale = self.browse(cr, uid, result, context)
self.unlink_pack_components(cr, uid, sale.id, context)
self.expand_packs(cr, uid, sale.id, context)
return result
def unlink_pack_components(self, cr, uid, sale_id, context=None):
search_vals = [('order_id', '=', sale_id), ('pack_parent_line_id', '!=', None),
('pack_child_line_ids', '=', None)]
unlink_lines = self.pool.get('sale.order.line').search(cr, uid, search_vals,
context=context)
if unlink_lines:
self.pool.get('sale.order.line').unlink(cr, uid, unlink_lines, context)
self.unlink_pack_components(cr, uid, sale_id, context)
else:
return
def expand_packs(self, cr, uid, ids, context={}, depth=1):
if type(ids) in [int, long]:
ids = [ids]
if depth == 10:
return
updated_orders = []
for order in self.browse(cr, uid, ids, context):
fiscal_position = (
order.fiscal_position
and self.pool.get('account.fiscal.position').browse(
cr, uid, order.fiscal_position.id, context
)
or False
)
"""
The reorder variable is used to ensure lines of the same pack go
right after their parent. What the algorithm does is check if the
previous item had children. As children items must go right after
the parent if the line we're evaluating doesn't have a parent it
means it's a new item (and probably has the default 10 sequence
number - unless the appropiate c2c_sale_sequence module is
installed). In this case we mark the item for reordering and
evaluate the next one. Note that as the item is not evaluated and
it might have to be expanded it's put on the queue for another
iteration (it's simple and works well). Once the next item has been
evaluated the sequence of the item marked for reordering is updated
with the next value.
"""
sequence = -1
reorder = []
last_had_children = False
for line in order.order_line:
if last_had_children and not line.pack_parent_line_id:
reorder.append(line.id)
if (
line.product_id.pack_line_ids
and order.id not in updated_orders
):
updated_orders.append(order.id)
continue
sequence += 1
if sequence > line.sequence:
self.pool.get('sale.order.line').write(
cr, uid, [line.id], {'sequence': sequence, }, context)
else:
sequence = line.sequence
if line.state != 'draft':
continue
if not line.product_id:
continue
""" If pack was already expanded (in another create/write
operation or in a previous iteration) don't do it again. """
if line.pack_child_line_ids:
last_had_children = True
continue
last_had_children = False
for subline in line.product_id.pack_line_ids:
sequence += 1
subproduct = subline.product_id
quantity = subline.quantity * line.product_uom_qty
if line.product_id.pack_fixed_price:
price = 0.0
discount = 0.0
else:
pricelist = order.pricelist_id.id
price = self.pool.get('product.pricelist').price_get(
cr, uid, [pricelist], subproduct.id, quantity,
order.partner_id.id, {
'uom': subproduct.uom_id.id,
'date': order.date_order,
}
)[pricelist]
discount = line.discount
# Obtain product name in partner's language
ctx = {'lang': order.partner_id.lang}
subproduct_name = self.pool.get('product.product').browse(
cr, uid, subproduct.id, ctx).name
tax_ids = self.pool.get('account.fiscal.position').map_tax(
cr, uid, fiscal_position, subproduct.taxes_id)
if subproduct.uos_id:
uos_id = subproduct.uos_id.id
uos_qty = quantity * subproduct.uos_coeff
else:
uos_id = False
uos_qty = quantity
vals = {
'order_id': order.id,
'name': '%s%s' % (
'> ' * (line.pack_depth+1), subproduct_name
),
'sequence': sequence,
'delay': subproduct.sale_delay or 0.0,
'product_id': subproduct.id,
'procurement_ids': (
[(4, x.id) for x in line.procurement_ids]
),
'price_unit': price,
'tax_id': [(6, 0, tax_ids)],
'address_allotment_id': False,
'product_uom_qty': quantity,
'product_uom': subproduct.uom_id.id,
'product_uos_qty': uos_qty,
'product_uos': uos_id,
'product_packaging': False,
'discount': discount,
'number_packages': False,
'th_weight': False,
'state': 'draft',
'pack_parent_line_id': line.id,
'pack_depth': line.pack_depth + 1,
}
""" It's a control for the case that the
nan_external_prices was installed with the product pack """
if 'prices_used' in line:
vals['prices_used'] = line.prices_used
if line.deposit:
vals['deposit'] = True
self.pool.get('sale.order.line').create(
cr, uid, vals, context)
if order.id not in updated_orders:
updated_orders.append(order.id)
for id in reorder:
sequence += 1
self.pool.get('sale.order.line').write(
cr, uid, [id], {'sequence': sequence, }, context)
if updated_orders:
""" Try to expand again all those orders that had a pack in this
iteration. This way we support packs inside other packs. """
self.expand_packs(cr, uid, ids, context, depth + 1)
return
|
DESHRAJ/fjord
|
refs/heads/master
|
vendor/packages/translate-toolkit/translate/storage/projstore.py
|
4
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2010 Zuza Software Foundation
#
# This file is part of the Translate Toolkit.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>.
import os
from StringIO import StringIO
from lxml import etree
__all__ = ['FileExistsInProjectError', 'FileNotInProjectError', 'ProjectStore']
class FileExistsInProjectError(Exception):
pass
class FileNotInProjectError(Exception):
pass
class ProjectStore(object):
"""Basic project file container."""
# INITIALIZERS #
def __init__(self):
self._files = {}
self._sourcefiles = []
self._targetfiles = []
self._transfiles = []
self.settings = {}
self.convert_map = {}
# The above map maps the conversion of input files (keys) to its output
# file and template used (2-tuple). All values are project file names.
# eg. convert_map = {
# 'sources/doc.odt': ('trans/doc.odt.xlf', None),
# 'trans/doc.odt.xlf': ('targets/doc.odt', 'sources/doc.odt')
#}
# The following dict groups together sets of mappings from a file
# "type" string ("src", "tgt" or "trans") to various other values
# or objects.
self.TYPE_INFO = {
# type => prefix for new files
'f_prefix': {
'src': 'sources/',
'tgt': 'targets/',
'trans': 'trans/',
},
# type => list containing filenames for that type
'lists': {
'src': self._sourcefiles,
'tgt': self._targetfiles,
'trans': self._transfiles,
},
# type => next type in process: src => trans => tgt
'next_type': {
'src': 'trans',
'trans': 'tgt',
'tgt': None,
},
# type => name of the sub-section in the settings file/dict
'settings': {
'src': 'sources',
'tgt': 'targets',
'trans': 'transfiles',
}
}
def __del__(self):
try:
self.close()
except Exception:
pass
# ACCESSORS #
def _get_sourcefiles(self):
"""Read-only access to ``self._sourcefiles``."""
return tuple(self._sourcefiles)
sourcefiles = property(_get_sourcefiles)
def _get_targetfiles(self):
"""Read-only access to ``self._targetfiles``."""
return tuple(self._targetfiles)
targetfiles = property(_get_targetfiles)
def _get_transfiles(self):
"""Read-only access to ``self._transfiles``."""
return tuple(self._transfiles)
transfiles = property(_get_transfiles)
# SPECIAL METHODS #
def __in__(self, lhs):
"""@returns ``True`` if ``lhs`` is a file name or file object in the project store."""
return lhs in self._sourcefiles or \
lhs in self._targetfiles or \
lhs in self._transfiles or \
lhs in self._files or \
lhs in self._files.values()
# METHODS #
def append_file(self, afile, fname, ftype='trans', delete_orig=False):
"""Append the given file to the project with the given filename, marked
to be of type ``ftype`` ('src', 'trans', 'tgt').
:type delete_orig: bool
:param delete_orig: Whether or not the original (given) file should
be deleted after being appended. This is set to
``True`` by
:meth:`~translate.storage.project.convert_forward`
. Not used in this class."""
if not ftype in self.TYPE_INFO['f_prefix']:
raise ValueError('Invalid file type: %s' % (ftype))
if isinstance(afile, basestring) and os.path.isfile(afile) and not fname:
# Try and use afile as the file name
fname, afile = afile, open(afile)
# Check if we can get an real file name
realfname = fname
if realfname is None or not os.path.isfile(realfname):
realfname = getattr(afile, 'name', None)
if realfname is None or not os.path.isfile(realfname):
realfname = getattr(afile, 'filename', None)
if not realfname or not os.path.isfile(realfname):
realfname = None
# Try to get the file name from the file object, if it was not given:
if not fname:
fname = getattr(afile, 'name', None)
if not fname:
fname = getattr(afile, 'filename', None)
fname = self._fix_type_filename(ftype, fname)
if not fname:
raise ValueError('Could not deduce file name and none given')
if fname in self._files:
raise FileExistsInProjectError(fname)
if realfname is not None and os.path.isfile(realfname):
self._files[fname] = realfname
else:
self._files[fname] = afile
self.TYPE_INFO['lists'][ftype].append(fname)
return afile, fname
def append_sourcefile(self, afile, fname=None):
return self.append_file(afile, fname, ftype='src')
def append_targetfile(self, afile, fname=None):
return self.append_file(afile, fname, ftype='tgt')
def append_transfile(self, afile, fname=None):
return self.append_file(afile, fname, ftype='trans')
def remove_file(self, fname, ftype=None):
"""Remove the file with the given project name from the project.
If the file type ('src', 'trans' or 'tgt') is not given, it is
guessed."""
if fname not in self._files:
raise FileNotInProjectError(fname)
if not ftype:
# Guess file type (source/trans/target)
for ft, prefix in self.TYPE_INFO['f_prefix'].items():
if fname.startswith(prefix):
ftype = ft
break
self.TYPE_INFO['lists'][ftype].remove(fname)
if self._files[fname] and hasattr(self._files[fname], 'close'):
self._files[fname].close()
del self._files[fname]
def remove_sourcefile(self, fname):
self.remove_file(fname, ftype='src')
def remove_targetfile(self, fname):
self.remove_file(fname, ftype='tgt')
def remove_transfile(self, fname):
self.remove_file(fname, ftype='trans')
def close(self):
self.save()
def get_file(self, fname, mode='rb'):
"""Retrieve the file with the given name from the project store.
The file is looked up in the ``self._files`` dictionary. The values
in this dictionary may be ``None``, to indicate that the file is not
cacheable and needs to be retrieved in a special way. This special
way must be defined in this method of sub-classes. The value may
also be a string, which indicates that it is a real file accessible
via ``open``.
:type mode: str
:param mode: The mode in which to re-open the file (if it is closed).
"""
if fname not in self._files:
raise FileNotInProjectError(fname)
rfile = self._files[fname]
if isinstance(rfile, basestring):
rfile = open(rfile, 'rb')
# Check that the file is actually open
if getattr(rfile, 'closed', False):
rfname = fname
if not os.path.isfile(rfname):
rfname = getattr(rfile, 'name', None)
if not rfile or not os.path.isfile(rfname):
rfname = getattr(rfile, 'filename', None)
if not rfile or not os.path.isfile(rfname):
raise IOError('Could not locate file: %s (%s)' % (rfile, fname))
rfile = open(rfname, mode)
self._files[fname] = rfile
return rfile
def get_filename_type(self, fname):
"""Get the type of file ('src', 'trans', 'tgt') with the given name."""
for ftype in self.TYPE_INFO['lists']:
if fname in self.TYPE_INFO['lists'][ftype]:
return ftype
raise FileNotInProjectError(fname)
def get_proj_filename(self, realfname):
"""Try and find a project file name for the given real file name."""
for fname in self._files:
if fname == realfname or self._files[fname] == realfname:
return fname
raise ValueError('Real file not in project store: %s' % (realfname))
def load(self, *args, **kwargs):
"""Load the project in some way. Undefined for this (base) class."""
pass
def save(self, filename=None, *args, **kwargs):
"""Save the project in some way. Undefined for this (base) class."""
pass
def update_file(self, pfname, infile):
"""Remove the project file with name ``pfname`` and add the contents
from ``infile`` to the project under the same file name.
:returns: the results from :meth:`ProjectStore.append_file`."""
ftype = self.get_filename_type(pfname)
self.remove_file(pfname)
self.append_file(infile, pfname, ftype)
def _fix_type_filename(self, ftype, fname):
"""Strip the path from the filename and prepend the correct prefix."""
path, fname = os.path.split(fname)
return self.TYPE_INFO['f_prefix'][ftype] + fname
def _generate_settings(self):
"""@returns A XML string that represents the current settings."""
xml = etree.Element('translationproject')
# Add file names to settings XML
if self._sourcefiles:
sources_el = etree.Element('sources')
for fname in self._sourcefiles:
src_el = etree.Element('filename')
src_el.text = fname
sources_el.append(src_el)
xml.append(sources_el)
if self._transfiles:
transfiles_el = etree.Element('transfiles')
for fname in self._transfiles:
trans_el = etree.Element('filename')
trans_el.text = fname
transfiles_el.append(trans_el)
xml.append(transfiles_el)
if self._targetfiles:
target_el = etree.Element('targets')
for fname in self._targetfiles:
tgt_el = etree.Element('filename')
tgt_el.text = fname
target_el.append(tgt_el)
xml.append(target_el)
# Add conversion mappings
if self.convert_map:
conversions_el = etree.Element('conversions')
for in_fname, (out_fname, templ_fname) in self.convert_map.iteritems():
if in_fname not in self._files or out_fname not in self._files:
continue
conv_el = etree.Element('conv')
input_el = etree.Element('input')
input_el.text = in_fname
conv_el.append(input_el)
output_el = etree.Element('output')
output_el.text = out_fname
conv_el.append(output_el)
if templ_fname:
templ_el = etree.Element('template')
templ_el.text = templ_fname
conv_el.append(templ_el)
conversions_el.append(conv_el)
xml.append(conversions_el)
# Add options to settings
if 'options' in self.settings:
options_el = etree.Element('options')
for option, value in self.settings['options'].items():
opt_el = etree.Element('option')
opt_el.attrib['name'] = option
opt_el.text = value
options_el.append(opt_el)
xml.append(options_el)
return etree.tostring(xml, pretty_print=True)
def _load_settings(self, settingsxml):
"""Load project settings from the given XML string.
``settingsxml`` is parsed into a DOM tree (``lxml.etree.fromstring``)
which is then inspected."""
settings = {}
xml = etree.fromstring(settingsxml)
# Load files in project
for section in ('sources', 'targets', 'transfiles'):
groupnode = xml.find(section)
if groupnode is None:
continue
settings[section] = []
for fnode in groupnode.getchildren():
settings[section].append(fnode.text)
conversions_el = xml.find('conversions')
if conversions_el is not None:
self.convert_map = {}
for conv_el in conversions_el.iterchildren():
in_fname, out_fname, templ_fname = None, None, None
for child_el in conv_el.iterchildren():
if child_el.tag == 'input':
in_fname = child_el.text
elif child_el.tag == 'output':
out_fname = child_el.text
elif child_el.tag == 'template':
templ_fname = child_el.text
# Make sure that in_fname and out_fname exist in
# settings['sources'], settings['targets'] or
# settings['transfiles']
in_found, out_found, templ_found = False, False, False
for section in ('sources', 'transfiles', 'targets'):
if section not in settings:
continue
if in_fname in settings[section]:
in_found = True
if out_fname in settings[section]:
out_found = True
if templ_fname and templ_fname in settings[section]:
templ_found = True
if in_found and out_found and (not templ_fname or templ_found):
self.convert_map[in_fname] = (out_fname, templ_fname)
# Load options
groupnode = xml.find('options')
if groupnode is not None:
settings['options'] = {}
for opt in groupnode.iterchildren():
settings['options'][opt.attrib['name']] = opt.text
self.settings = settings
|
Matty-Downing2169/opencamlib
|
refs/heads/master
|
scripts/waterline/waterline_2_tux_adapt.py
|
7
|
import ocl
import camvtk
import time
import vtk
import datetime
import math
def drawLoops(myscreen, loops, loopcolor):
nloop = 0
for lop in loops:
n = 0
N = len(lop)
first_point=ocl.Point(-1,-1,5)
previous=ocl.Point(-1,-1,5)
for p in lop:
if n==0: # don't draw anything on the first iteration
previous=p
first_point = p
elif n== (N-1): # the last point
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) ) # the normal line
# and a line from p to the first point
myscreen.addActor( camvtk.Line(p1=(p.x,p.y,p.z),p2=(first_point.x,first_point.y,first_point.z),color=loopcolor) )
else:
myscreen.addActor( camvtk.Line(p1=(previous.x,previous.y,previous.z),p2=(p.x,p.y,p.z),color=loopcolor) )
previous=p
n=n+1
print "rendered loop ",nloop, " with ", len(lop), " points"
nloop = nloop+1
if __name__ == "__main__":
print ocl.revision()
myscreen = camvtk.VTKScreen()
#stl = camvtk.STLSurf("../../stl/demo.stl")
stl = camvtk.STLSurf("../../stl/gnu_tux_mod.stl")
#stl = camvtk.STLSurf("../../stl/waterline1.stl")
myscreen.addActor(stl)
stl.SetWireframe() # render tux as wireframe
#stl.SetSurface() # render tux as surface
stl.SetColor(camvtk.cyan)
polydata = stl.src.GetOutput()
s = ocl.STLSurf()
camvtk.vtkPolyData2OCLSTL(polydata, s)
print "STL surface read,", s.size(), "triangles"
zh=1.75145
diam = 1.4
length = 500
loops = []
#cutter = ocl.CylCutter( diam , length )
cutter = ocl.BallCutter( diam , length )
#cutter = ocl.BullCutter( diam , diam/5, length )
wl = ocl.Waterline()
wl.setSTL(s)
wl.setCutter(cutter)
wl.setZ(zh)
wl.setSampling(0.5)
#wl.setThreads(5)
t_before = time.time()
wl.run()
t_after = time.time()
calctime = t_after-t_before
print " Waterline done in ", calctime," s"
cutter_loops = wl.getLoops()
for l in cutter_loops:
loops.append(l)
aloops = []
awl = ocl.AdaptiveWaterline()
awl.setSTL(s)
awl.setCutter(cutter)
awl.setZ(zh)
awl.setSampling(0.1)
awl.setMinSampling(0.01)
#wl.setThreads(5)
t_before = time.time()
awl.run()
t_after = time.time()
calctime = t_after-t_before
print " AdaptiveWaterline done in ", calctime," s"
acutter_loops = awl.getLoops()
for l in acutter_loops:
aloops.append(l)
print "All waterlines done. Got", len(loops)," loops in total."
# draw the loops
drawLoops(myscreen, loops, camvtk.yellow)
drawLoops(myscreen, aloops, camvtk.red)
print "done."
myscreen.camera.SetPosition(15, 13, 7)
myscreen.camera.SetFocalPoint(5, 5, 0)
camvtk.drawArrows(myscreen,center=(-0.5,-0.5,-0.5))
camvtk.drawOCLtext(myscreen)
myscreen.render()
myscreen.iren.Start()
#raw_input("Press Enter to terminate")
|
bfirsh/django-old
|
refs/heads/master
|
django/conf/locale/sk/__init__.py
|
12133432
| |
openplans/shareabouts-phlush
|
refs/heads/master
|
src/sa_web/templatetags/__init__.py
|
12133432
| |
knossos-project/PythonQt
|
refs/heads/master
|
examples/NicePyConsole/pygments/styles/default.py
|
135
|
# -*- coding: utf-8 -*-
"""
pygments.styles.default
~~~~~~~~~~~~~~~~~~~~~~~
The default highlighting style.
:copyright: Copyright 2006-2014 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace
class DefaultStyle(Style):
"""
The default style (inspired by Emacs 22).
"""
background_color = "#f8f8f8"
default_style = ""
styles = {
Whitespace: "#bbbbbb",
Comment: "italic #408080",
Comment.Preproc: "noitalic #BC7A00",
#Keyword: "bold #AA22FF",
Keyword: "bold #008000",
Keyword.Pseudo: "nobold",
Keyword.Type: "nobold #B00040",
Operator: "#666666",
Operator.Word: "bold #AA22FF",
Name.Builtin: "#008000",
Name.Function: "#0000FF",
Name.Class: "bold #0000FF",
Name.Namespace: "bold #0000FF",
Name.Exception: "bold #D2413A",
Name.Variable: "#19177C",
Name.Constant: "#880000",
Name.Label: "#A0A000",
Name.Entity: "bold #999999",
Name.Attribute: "#7D9029",
Name.Tag: "bold #008000",
Name.Decorator: "#AA22FF",
String: "#BA2121",
String.Doc: "italic",
String.Interpol: "bold #BB6688",
String.Escape: "bold #BB6622",
String.Regex: "#BB6688",
#String.Symbol: "#B8860B",
String.Symbol: "#19177C",
String.Other: "#008000",
Number: "#666666",
Generic.Heading: "bold #000080",
Generic.Subheading: "bold #800080",
Generic.Deleted: "#A00000",
Generic.Inserted: "#00A000",
Generic.Error: "#FF0000",
Generic.Emph: "italic",
Generic.Strong: "bold",
Generic.Prompt: "bold #000080",
Generic.Output: "#888",
Generic.Traceback: "#04D",
Error: "border:#FF0000"
}
|
Event38/MissionPlanner
|
refs/heads/master
|
Lib/site-packages/numpy/numarray/ufuncs.py
|
102
|
__all__ = ['abs', 'absolute', 'add', 'arccos', 'arccosh', 'arcsin', 'arcsinh',
'arctan', 'arctan2', 'arctanh', 'bitwise_and', 'bitwise_not',
'bitwise_or', 'bitwise_xor', 'ceil', 'cos', 'cosh', 'divide',
'equal', 'exp', 'fabs', 'floor', 'floor_divide',
'fmod', 'greater', 'greater_equal', 'hypot', 'isnan',
'less', 'less_equal', 'log', 'log10', 'logical_and', 'logical_not',
'logical_or', 'logical_xor', 'lshift', 'maximum', 'minimum',
'minus', 'multiply', 'negative', 'not_equal',
'power', 'product', 'remainder', 'rshift', 'sin', 'sinh', 'sqrt',
'subtract', 'sum', 'tan', 'tanh', 'true_divide',
'conjugate', 'sign']
from numpy import absolute as abs, absolute, add, arccos, arccosh, arcsin, \
arcsinh, arctan, arctan2, arctanh, bitwise_and, invert as bitwise_not, \
bitwise_or, bitwise_xor, ceil, cos, cosh, divide, \
equal, exp, fabs, floor, floor_divide, fmod, greater, greater_equal, \
hypot, isnan, less, less_equal, log, log10, logical_and, \
logical_not, logical_or, logical_xor, left_shift as lshift, \
maximum, minimum, negative as minus, multiply, negative, \
not_equal, power, product, remainder, right_shift as rshift, sin, \
sinh, sqrt, subtract, sum, tan, tanh, true_divide, conjugate, sign
|
jamesgk/ufo2fdk
|
refs/heads/master
|
Lib/ufo2ft/maxContextCalc.py
|
2
|
from __future__ import print_function, division, absolute_import, unicode_literals
__all__ = ['maxCtxFont']
def maxCtxFont(font):
"""Calculate the usMaxContext value for an entire font."""
maxCtx = 0
for tag in ('GSUB', 'GPOS'):
if tag not in font:
continue
table = font[tag].table
if table.LookupList is None:
continue
for lookup in table.LookupList.Lookup:
for st in lookup.SubTable:
maxCtx = maxCtxSubtable(maxCtx, tag, lookup.LookupType, st)
return maxCtx
def maxCtxSubtable(maxCtx, tag, lookupType, st):
"""Calculate usMaxContext based on a single lookup table (and an existing
max value).
"""
# single positioning, single / multiple substitution
if (tag == 'GPOS' and lookupType == 1) or (
tag == 'GSUB' and lookupType in (1, 2, 3)):
maxCtx = max(maxCtx, 1)
# pair positioning
elif tag == 'GPOS' and lookupType == 2:
maxCtx = max(maxCtx, 2)
# ligatures
elif tag == 'GSUB' and lookupType == 4:
for ligatures in st.ligatures.values():
for ligature in ligatures:
maxCtx = max(maxCtx, ligature.CompCount)
# context
elif (tag == 'GPOS' and lookupType == 7) or (
tag == 'GSUB' and lookupType == 5):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub')
# chained context
elif (tag == 'GPOS' and lookupType == 8) or (
tag == 'GSUB' and lookupType == 6):
maxCtx = maxCtxContextualSubtable(
maxCtx, st, 'Pos' if tag == 'GPOS' else 'Sub', 'Chain')
# extensions
elif (tag == 'GPOS' and lookupType == 9) or (
tag == 'GSUB' and lookupType == 7):
maxCtx = maxCtxSubtable(
maxCtx, tag, st.ExtensionLookupType, st.ExtSubTable)
# reverse-chained context
elif tag == 'GSUB' and lookupType == 8:
maxCtx = maxCtxContextualRule(maxCtx, st, 'Reverse')
return maxCtx
def maxCtxContextualSubtable(maxCtx, st, ruleType, chain=''):
"""Calculate usMaxContext based on a contextual feature subtable."""
if st.Format == 1:
for ruleset in getattr(st, '%s%sRuleSet' % (chain, ruleType)):
if ruleset is None:
continue
for rule in getattr(ruleset, '%s%sRule' % (chain, ruleType)):
if rule is None:
continue
maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
elif st.Format == 2:
for ruleset in getattr(st, '%s%sClassSet' % (chain, ruleType)):
if ruleset is None:
continue
for rule in getattr(ruleset, '%s%sClassRule' % (chain, ruleType)):
if rule is None:
continue
maxCtx = maxCtxContextualRule(maxCtx, rule, chain)
elif st.Format == 3:
maxCtx = maxCtxContextualRule(maxCtx, st, chain)
return maxCtx
def maxCtxContextualRule(maxCtx, st, chain):
"""Calculate usMaxContext based on a contextual feature rule."""
if not chain:
return max(maxCtx, st.GlyphCount)
elif chain == 'Reverse':
return max(maxCtx, st.GlyphCount + st.LookAheadGlyphCount)
return max(maxCtx, st.InputGlyphCount + st.LookAheadGlyphCount)
|
omnirom/android_external_chromium-org
|
refs/heads/android-5.1
|
tools/find_runtime_symbols/find_runtime_symbols.py
|
102
|
#!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Find symbols in a binary corresponding to given runtime virtual addresses.
Note that source file names are treated as symbols in this script while they
are actually not.
"""
import json
import logging
import os
import sys
from static_symbols import StaticSymbolsInFile
_BASE_PATH = os.path.dirname(os.path.abspath(__file__))
_TOOLS_LINUX_PATH = os.path.join(_BASE_PATH, os.pardir, 'linux')
sys.path.insert(0, _TOOLS_LINUX_PATH)
from procfs import ProcMaps # pylint: disable=F0401
try:
from collections import OrderedDict # pylint: disable=E0611
except ImportError:
_SIMPLEJSON_PATH = os.path.join(_BASE_PATH, os.pardir, os.pardir,
'third_party')
sys.path.insert(0, _SIMPLEJSON_PATH)
from simplejson import OrderedDict
FUNCTION_SYMBOLS = 0
SOURCEFILE_SYMBOLS = 1
TYPEINFO_SYMBOLS = 2
_MAPS_FILENAME = 'maps'
_FILES_FILENAME = 'files.json'
class RuntimeSymbolsInProcess(object):
def __init__(self):
self._maps = None
self._static_symbols_in_filse = {}
def find_procedure(self, runtime_address):
for vma in self._maps.iter(ProcMaps.executable):
if vma.begin <= runtime_address < vma.end:
static_symbols = self._static_symbols_in_filse.get(vma.name)
if static_symbols:
return static_symbols.find_procedure_by_runtime_address(
runtime_address, vma)
else:
return None
return None
def find_sourcefile(self, runtime_address):
for vma in self._maps.iter(ProcMaps.executable):
if vma.begin <= runtime_address < vma.end:
static_symbols = self._static_symbols_in_filse.get(vma.name)
if static_symbols:
return static_symbols.find_sourcefile_by_runtime_address(
runtime_address, vma)
else:
return None
return None
def find_typeinfo(self, runtime_address):
for vma in self._maps.iter(ProcMaps.constants):
if vma.begin <= runtime_address < vma.end:
static_symbols = self._static_symbols_in_filse.get(vma.name)
if static_symbols:
return static_symbols.find_typeinfo_by_runtime_address(
runtime_address, vma)
else:
return None
return None
@staticmethod
def load(prepared_data_dir):
symbols_in_process = RuntimeSymbolsInProcess()
with open(os.path.join(prepared_data_dir, _MAPS_FILENAME), mode='r') as f:
symbols_in_process._maps = ProcMaps.load_file(f)
with open(os.path.join(prepared_data_dir, _FILES_FILENAME), mode='r') as f:
files = json.load(f)
# pylint: disable=W0212
for vma in symbols_in_process._maps.iter(ProcMaps.executable_and_constants):
file_entry = files.get(vma.name)
if not file_entry:
continue
static_symbols = StaticSymbolsInFile(vma.name)
nm_entry = file_entry.get('nm')
if nm_entry and nm_entry['format'] == 'bsd':
with open(os.path.join(prepared_data_dir, nm_entry['file']), 'r') as f:
static_symbols.load_nm_bsd(f, nm_entry['mangled'])
readelf_entry = file_entry.get('readelf-e')
if readelf_entry:
with open(os.path.join(prepared_data_dir, readelf_entry['file']),
'r') as f:
static_symbols.load_readelf_ew(f)
decodedline_file_entry = file_entry.get('readelf-debug-decodedline-file')
if decodedline_file_entry:
with open(os.path.join(prepared_data_dir,
decodedline_file_entry['file']), 'r') as f:
static_symbols.load_readelf_debug_decodedline_file(f)
symbols_in_process._static_symbols_in_filse[vma.name] = static_symbols
return symbols_in_process
def _find_runtime_function_symbols(symbols_in_process, addresses):
result = OrderedDict()
for address in addresses:
if isinstance(address, basestring):
address = int(address, 16)
found = symbols_in_process.find_procedure(address)
if found:
result[address] = found.name
else:
result[address] = '0x%016x' % address
return result
def _find_runtime_sourcefile_symbols(symbols_in_process, addresses):
result = OrderedDict()
for address in addresses:
if isinstance(address, basestring):
address = int(address, 16)
found = symbols_in_process.find_sourcefile(address)
if found:
result[address] = found
else:
result[address] = ''
return result
def _find_runtime_typeinfo_symbols(symbols_in_process, addresses):
result = OrderedDict()
for address in addresses:
if isinstance(address, basestring):
address = int(address, 16)
if address == 0:
result[address] = 'no typeinfo'
else:
found = symbols_in_process.find_typeinfo(address)
if found:
if found.startswith('typeinfo for '):
result[address] = found[13:]
else:
result[address] = found
else:
result[address] = '0x%016x' % address
return result
_INTERNAL_FINDERS = {
FUNCTION_SYMBOLS: _find_runtime_function_symbols,
SOURCEFILE_SYMBOLS: _find_runtime_sourcefile_symbols,
TYPEINFO_SYMBOLS: _find_runtime_typeinfo_symbols,
}
def find_runtime_symbols(symbol_type, symbols_in_process, addresses):
return _INTERNAL_FINDERS[symbol_type](symbols_in_process, addresses)
def main():
# FIX: Accept only .pre data
if len(sys.argv) < 2:
sys.stderr.write("""Usage:
%s /path/to/prepared_data_dir/ < addresses.txt
""" % sys.argv[0])
return 1
log = logging.getLogger('find_runtime_symbols')
log.setLevel(logging.WARN)
handler = logging.StreamHandler()
handler.setLevel(logging.WARN)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
log.addHandler(handler)
prepared_data_dir = sys.argv[1]
if not os.path.exists(prepared_data_dir):
log.warn("Nothing found: %s" % prepared_data_dir)
return 1
if not os.path.isdir(prepared_data_dir):
log.warn("Not a directory: %s" % prepared_data_dir)
return 1
symbols_in_process = RuntimeSymbolsInProcess.load(prepared_data_dir)
symbols_dict = find_runtime_symbols(FUNCTION_SYMBOLS,
symbols_in_process,
sys.stdin)
for address, symbol in symbols_dict.iteritems():
if symbol:
print '%016x %s' % (address, symbol)
else:
print '%016x' % address
return 0
if __name__ == '__main__':
sys.exit(main())
|
ptroja/spark2014
|
refs/heads/master
|
include/generate_session.py
|
1
|
import os
import shutil
def exec_gnatprove(file_to_prove, option=""):
cmd = "gnatprove -P spark_lemmas.gpr -U --prover=coq"
if ":" not in file_to_prove:
print (cmd + " " + option + file_to_prove)
os.system(cmd + " " + option + file_to_prove)
else:
print (cmd + " " + option + "--limit-line=" + file_to_prove)
os.system(cmd + " " + option + "--limit-line=" + file_to_prove)
def check_all(f):
for i in f:
exec_gnatprove(i)
os.system("gnatprove -P spark_lemmas.gpr --prover=cvc4 --level=2\
--no-counterexample -j0")
# b = true => change to off
# b = false => change to on
def replace_spark_mode(fname, b):
temp = fname + "___tmp.tmp"
with open(temp, 'w') as new:
with open(fname) as f:
for line in f:
if b:
new.write(line.replace("SPARK_Mode => On -- TEST_ON", "SPARK\
_Mode => Off -- TEST_ON"))
else:
new.write(line.replace("SPARK_Mode => Off -- TEST_ON", "SPARK\
_Mode => On -- TEST_ON"))
os.remove(fname)
shutil.move(temp, fname)
# This changes the spark_mode in lines tagged with -- TEST_ON in commentary
# true => change to off
# false => change to on
def change_all_spark_mode(b):
for files in os.listdir('.'):
if files.endswith(".adb"):
replace_spark_mode(files, b)
def kill_and_regenerate_all():
change_all_spark_mode(False)
os.system("make clean")
os.system("gnatprove -P spark_lemmas.gpr --prover=cvc4 --level=4 \
-f --no-counterexample -j0")
# Force regeneration of coq files where necessary.
# This step is used to generate the fake coq files and put the names of
# coq files inside the session. This cannot be done in one step because
# if coq files are already present, it will create new ones (not
# check the present coq files).
with open("manual_proof.in") as f:
for i in f:
exec_gnatprove(i)
# cleaning and regeneration of *.v
os.system("make clean")
os.system("make generate")
with open("manual_proof.in") as v:
for i in v:
exec_gnatprove(i)
# discharge the remaining proofs with z3 and alt-ergo
os.system("gnatprove -P spark_lemmas.gpr --prover=z3 \
--level=2 --no-counterexample -j0")
os.system("gnatprove -P spark_lemmas.gpr --report=all \
--prover=alt-ergo --level=4 --no-counterexample -j0")
change_all_spark_mode(True)
kill_and_regenerate_all()
|
ridfrustum/lettuce
|
refs/heads/master
|
tests/integration/lib/Django-1.2.5/tests/regressiontests/forms/localflavor/ie.py
|
89
|
from django.contrib.localflavor.ie.forms import IECountySelect
from utils import LocalFlavorTestCase
class IELocalFlavorTests(LocalFlavorTestCase):
def test_IECountySelect(self):
f = IECountySelect()
out = u'''<select name="counties">
<option value="antrim">Antrim</option>
<option value="armagh">Armagh</option>
<option value="carlow">Carlow</option>
<option value="cavan">Cavan</option>
<option value="clare">Clare</option>
<option value="cork">Cork</option>
<option value="derry">Derry</option>
<option value="donegal">Donegal</option>
<option value="down">Down</option>
<option value="dublin" selected="selected">Dublin</option>
<option value="fermanagh">Fermanagh</option>
<option value="galway">Galway</option>
<option value="kerry">Kerry</option>
<option value="kildare">Kildare</option>
<option value="kilkenny">Kilkenny</option>
<option value="laois">Laois</option>
<option value="leitrim">Leitrim</option>
<option value="limerick">Limerick</option>
<option value="longford">Longford</option>
<option value="louth">Louth</option>
<option value="mayo">Mayo</option>
<option value="meath">Meath</option>
<option value="monaghan">Monaghan</option>
<option value="offaly">Offaly</option>
<option value="roscommon">Roscommon</option>
<option value="sligo">Sligo</option>
<option value="tipperary">Tipperary</option>
<option value="tyrone">Tyrone</option>
<option value="waterford">Waterford</option>
<option value="westmeath">Westmeath</option>
<option value="wexford">Wexford</option>
<option value="wicklow">Wicklow</option>
</select>'''
self.assertEqual(f.render('counties', 'dublin'), out)
|
varunagrawal/azure-services
|
refs/heads/master
|
varunagrawal/site-packages/django/utils/http.py
|
26
|
import calendar
import datetime
import re
import sys
import urllib
import urlparse
from email.utils import formatdate
from django.utils.datastructures import MultiValueDict
from django.utils.encoding import smart_str, force_unicode
from django.utils.functional import allow_lazy
ETAG_MATCH = re.compile(r'(?:W/)?"((?:\\.|[^"])*)"')
MONTHS = 'jan feb mar apr may jun jul aug sep oct nov dec'.split()
__D = r'(?P<day>\d{2})'
__D2 = r'(?P<day>[ \d]\d)'
__M = r'(?P<mon>\w{3})'
__Y = r'(?P<year>\d{4})'
__Y2 = r'(?P<year>\d{2})'
__T = r'(?P<hour>\d{2}):(?P<min>\d{2}):(?P<sec>\d{2})'
RFC1123_DATE = re.compile(r'^\w{3}, %s %s %s %s GMT$' % (__D, __M, __Y, __T))
RFC850_DATE = re.compile(r'^\w{6,9}, %s-%s-%s %s GMT$' % (__D, __M, __Y2, __T))
ASCTIME_DATE = re.compile(r'^\w{3} %s %s %s %s$' % (__M, __D2, __T, __Y))
def urlquote(url, safe='/'):
"""
A version of Python's urllib.quote() function that can operate on unicode
strings. The url is first UTF-8 encoded before quoting. The returned string
can safely be used as part of an argument to a subsequent iri_to_uri() call
without double-quoting occurring.
"""
return force_unicode(urllib.quote(smart_str(url), smart_str(safe)))
urlquote = allow_lazy(urlquote, unicode)
def urlquote_plus(url, safe=''):
"""
A version of Python's urllib.quote_plus() function that can operate on
unicode strings. The url is first UTF-8 encoded before quoting. The
returned string can safely be used as part of an argument to a subsequent
iri_to_uri() call without double-quoting occurring.
"""
return force_unicode(urllib.quote_plus(smart_str(url), smart_str(safe)))
urlquote_plus = allow_lazy(urlquote_plus, unicode)
def urlunquote(quoted_url):
"""
A wrapper for Python's urllib.unquote() function that can operate on
the result of django.utils.http.urlquote().
"""
return force_unicode(urllib.unquote(smart_str(quoted_url)))
urlunquote = allow_lazy(urlunquote, unicode)
def urlunquote_plus(quoted_url):
"""
A wrapper for Python's urllib.unquote_plus() function that can operate on
the result of django.utils.http.urlquote_plus().
"""
return force_unicode(urllib.unquote_plus(smart_str(quoted_url)))
urlunquote_plus = allow_lazy(urlunquote_plus, unicode)
def urlencode(query, doseq=0):
"""
A version of Python's urllib.urlencode() function that can operate on
unicode strings. The parameters are first case to UTF-8 encoded strings and
then encoded as per normal.
"""
if isinstance(query, MultiValueDict):
query = query.lists()
elif hasattr(query, 'items'):
query = query.items()
return urllib.urlencode(
[(smart_str(k),
isinstance(v, (list,tuple)) and [smart_str(i) for i in v] or smart_str(v))
for k, v in query],
doseq)
def cookie_date(epoch_seconds=None):
"""
Formats the time to ensure compatibility with Netscape's cookie standard.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD-Mon-YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s-%s-%s GMT' % (rfcdate[:7], rfcdate[8:11], rfcdate[12:25])
def http_date(epoch_seconds=None):
"""
Formats the time to match the RFC1123 date format as specified by HTTP
RFC2616 section 3.3.1.
Accepts a floating point number expressed in seconds since the epoch, in
UTC - such as that outputted by time.time(). If set to None, defaults to
the current time.
Outputs a string in the format 'Wdy, DD Mon YYYY HH:MM:SS GMT'.
"""
rfcdate = formatdate(epoch_seconds)
return '%s GMT' % rfcdate[:25]
def parse_http_date(date):
"""
Parses a date format as specified by HTTP RFC2616 section 3.3.1.
The three formats allowed by the RFC are accepted, even if only the first
one is still in widespread use.
Returns an floating point number expressed in seconds since the epoch, in
UTC.
"""
# emails.Util.parsedate does the job for RFC1123 dates; unfortunately
# RFC2616 makes it mandatory to support RFC850 dates too. So we roll
# our own RFC-compliant parsing.
for regex in RFC1123_DATE, RFC850_DATE, ASCTIME_DATE:
m = regex.match(date)
if m is not None:
break
else:
raise ValueError("%r is not in a valid HTTP date format" % date)
try:
year = int(m.group('year'))
if year < 100:
if year < 70:
year += 2000
else:
year += 1900
month = MONTHS.index(m.group('mon').lower()) + 1
day = int(m.group('day'))
hour = int(m.group('hour'))
min = int(m.group('min'))
sec = int(m.group('sec'))
result = datetime.datetime(year, month, day, hour, min, sec)
return calendar.timegm(result.utctimetuple())
except Exception:
raise ValueError("%r is not a valid date" % date)
def parse_http_date_safe(date):
"""
Same as parse_http_date, but returns None if the input is invalid.
"""
try:
return parse_http_date(date)
except Exception:
pass
# Base 36 functions: useful for generating compact URLs
def base36_to_int(s):
"""
Converts a base 36 string to an ``int``. Raises ``ValueError` if the
input won't fit into an int.
"""
# To prevent overconsumption of server resources, reject any
# base36 string that is long than 13 base36 digits (13 digits
# is sufficient to base36-encode any 64-bit integer)
if len(s) > 13:
raise ValueError("Base36 input too large")
value = int(s, 36)
# ... then do a final check that the value will fit into an int.
if value > sys.maxint:
raise ValueError("Base36 input too large")
return value
def int_to_base36(i):
"""
Converts an integer to a base36 string
"""
digits = "0123456789abcdefghijklmnopqrstuvwxyz"
factor = 0
if not 0 <= i <= sys.maxint:
raise ValueError("Base36 conversion input too large or incorrect type.")
# Find starting factor
while True:
factor += 1
if i < 36 ** factor:
factor -= 1
break
base36 = []
# Construct base36 representation
while factor >= 0:
j = 36 ** factor
base36.append(digits[i // j])
i = i % j
factor -= 1
return ''.join(base36)
def parse_etags(etag_str):
"""
Parses a string with one or several etags passed in If-None-Match and
If-Match headers by the rules in RFC 2616. Returns a list of etags
without surrounding double quotes (") and unescaped from \<CHAR>.
"""
etags = ETAG_MATCH.findall(etag_str)
if not etags:
# etag_str has wrong format, treat it as an opaque string then
return [etag_str]
etags = [e.decode('string_escape') for e in etags]
return etags
def quote_etag(etag):
"""
Wraps a string in double quotes escaping contents as necesary.
"""
return '"%s"' % etag.replace('\\', '\\\\').replace('"', '\\"')
if sys.version_info >= (2, 6):
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return (p1.scheme, p1.hostname, p1.port) == (p2.scheme, p2.hostname, p2.port)
else:
# Python 2.5 compatibility. This actually works for Python 2.6 and above,
# but the above definition is much more obviously correct and so is
# preferred going forward.
def same_origin(url1, url2):
"""
Checks if two URLs are 'same-origin'
"""
p1, p2 = urlparse.urlparse(url1), urlparse.urlparse(url2)
return p1[0:2] == p2[0:2]
|
ruibarreira/linuxtrail
|
refs/heads/master
|
usr/lib/python2.7/imputil.py
|
228
|
"""
Import utilities
Exported classes:
ImportManager Manage the import process
Importer Base class for replacing standard import functions
BuiltinImporter Emulate the import mechanism for builtin and frozen modules
DynLoadSuffixImporter
"""
from warnings import warnpy3k
warnpy3k("the imputil module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
# note: avoid importing non-builtin modules
import imp ### not available in Jython?
import sys
import __builtin__
# for the DirectoryImporter
import struct
import marshal
__all__ = ["ImportManager","Importer","BuiltinImporter"]
_StringType = type('')
_ModuleType = type(sys) ### doesn't work in Jython...
class ImportManager:
"Manage the import process."
def install(self, namespace=vars(__builtin__)):
"Install this ImportManager into the specified namespace."
if isinstance(namespace, _ModuleType):
namespace = vars(namespace)
# Note: we have no notion of "chaining"
# Record the previous import hook, then install our own.
self.previous_importer = namespace['__import__']
self.namespace = namespace
namespace['__import__'] = self._import_hook
### fix this
#namespace['reload'] = self._reload_hook
def uninstall(self):
"Restore the previous import mechanism."
self.namespace['__import__'] = self.previous_importer
def add_suffix(self, suffix, importFunc):
assert hasattr(importFunc, '__call__')
self.fs_imp.add_suffix(suffix, importFunc)
######################################################################
#
# PRIVATE METHODS
#
clsFilesystemImporter = None
def __init__(self, fs_imp=None):
# we're definitely going to be importing something in the future,
# so let's just load the OS-related facilities.
if not _os_stat:
_os_bootstrap()
# This is the Importer that we use for grabbing stuff from the
# filesystem. It defines one more method (import_from_dir) for our use.
if fs_imp is None:
cls = self.clsFilesystemImporter or _FilesystemImporter
fs_imp = cls()
self.fs_imp = fs_imp
# Initialize the set of suffixes that we recognize and import.
# The default will import dynamic-load modules first, followed by
# .py files (or a .py file's cached bytecode)
for desc in imp.get_suffixes():
if desc[2] == imp.C_EXTENSION:
self.add_suffix(desc[0],
DynLoadSuffixImporter(desc).import_file)
self.add_suffix('.py', py_suffix_importer)
def _import_hook(self, fqname, globals=None, locals=None, fromlist=None):
"""Python calls this hook to locate and import a module."""
parts = fqname.split('.')
# determine the context of this import
parent = self._determine_import_context(globals)
# if there is a parent, then its importer should manage this import
if parent:
module = parent.__importer__._do_import(parent, parts, fromlist)
if module:
return module
# has the top module already been imported?
try:
top_module = sys.modules[parts[0]]
except KeyError:
# look for the topmost module
top_module = self._import_top_module(parts[0])
if not top_module:
# the topmost module wasn't found at all.
raise ImportError, 'No module named ' + fqname
# fast-path simple imports
if len(parts) == 1:
if not fromlist:
return top_module
if not top_module.__dict__.get('__ispkg__'):
# __ispkg__ isn't defined (the module was not imported by us),
# or it is zero.
#
# In the former case, there is no way that we could import
# sub-modules that occur in the fromlist (but we can't raise an
# error because it may just be names) because we don't know how
# to deal with packages that were imported by other systems.
#
# In the latter case (__ispkg__ == 0), there can't be any sub-
# modules present, so we can just return.
#
# In both cases, since len(parts) == 1, the top_module is also
# the "bottom" which is the defined return when a fromlist
# exists.
return top_module
importer = top_module.__dict__.get('__importer__')
if importer:
return importer._finish_import(top_module, parts[1:], fromlist)
# Grrr, some people "import os.path" or do "from os.path import ..."
if len(parts) == 2 and hasattr(top_module, parts[1]):
if fromlist:
return getattr(top_module, parts[1])
else:
return top_module
# If the importer does not exist, then we have to bail. A missing
# importer means that something else imported the module, and we have
# no knowledge of how to get sub-modules out of the thing.
raise ImportError, 'No module named ' + fqname
def _determine_import_context(self, globals):
"""Returns the context in which a module should be imported.
The context could be a loaded (package) module and the imported module
will be looked for within that package. The context could also be None,
meaning there is no context -- the module should be looked for as a
"top-level" module.
"""
if not globals or not globals.get('__importer__'):
# globals does not refer to one of our modules or packages. That
# implies there is no relative import context (as far as we are
# concerned), and it should just pick it off the standard path.
return None
# The globals refer to a module or package of ours. It will define
# the context of the new import. Get the module/package fqname.
parent_fqname = globals['__name__']
# if a package is performing the import, then return itself (imports
# refer to pkg contents)
if globals['__ispkg__']:
parent = sys.modules[parent_fqname]
assert globals is parent.__dict__
return parent
i = parent_fqname.rfind('.')
# a module outside of a package has no particular import context
if i == -1:
return None
# if a module in a package is performing the import, then return the
# package (imports refer to siblings)
parent_fqname = parent_fqname[:i]
parent = sys.modules[parent_fqname]
assert parent.__name__ == parent_fqname
return parent
def _import_top_module(self, name):
# scan sys.path looking for a location in the filesystem that contains
# the module, or an Importer object that can import the module.
for item in sys.path:
if isinstance(item, _StringType):
module = self.fs_imp.import_from_dir(item, name)
else:
module = item.import_top(name)
if module:
return module
return None
def _reload_hook(self, module):
"Python calls this hook to reload a module."
# reloading of a module may or may not be possible (depending on the
# importer), but at least we can validate that it's ours to reload
importer = module.__dict__.get('__importer__')
if not importer:
### oops. now what...
pass
# okay. it is using the imputil system, and we must delegate it, but
# we don't know what to do (yet)
### we should blast the module dict and do another get_code(). need to
### flesh this out and add proper docco...
raise SystemError, "reload not yet implemented"
class Importer:
"Base class for replacing standard import functions."
def import_top(self, name):
"Import a top-level module."
return self._import_one(None, name, name)
######################################################################
#
# PRIVATE METHODS
#
def _finish_import(self, top, parts, fromlist):
# if "a.b.c" was provided, then load the ".b.c" portion down from
# below the top-level module.
bottom = self._load_tail(top, parts)
# if the form is "import a.b.c", then return "a"
if not fromlist:
# no fromlist: return the top of the import tree
return top
# the top module was imported by self.
#
# this means that the bottom module was also imported by self (just
# now, or in the past and we fetched it from sys.modules).
#
# since we imported/handled the bottom module, this means that we can
# also handle its fromlist (and reliably use __ispkg__).
# if the bottom node is a package, then (potentially) import some
# modules.
#
# note: if it is not a package, then "fromlist" refers to names in
# the bottom module rather than modules.
# note: for a mix of names and modules in the fromlist, we will
# import all modules and insert those into the namespace of
# the package module. Python will pick up all fromlist names
# from the bottom (package) module; some will be modules that
# we imported and stored in the namespace, others are expected
# to be present already.
if bottom.__ispkg__:
self._import_fromlist(bottom, fromlist)
# if the form is "from a.b import c, d" then return "b"
return bottom
def _import_one(self, parent, modname, fqname):
"Import a single module."
# has the module already been imported?
try:
return sys.modules[fqname]
except KeyError:
pass
# load the module's code, or fetch the module itself
result = self.get_code(parent, modname, fqname)
if result is None:
return None
module = self._process_result(result, fqname)
# insert the module into its parent
if parent:
setattr(parent, modname, module)
return module
def _process_result(self, result, fqname):
ispkg, code, values = result
# did get_code() return an actual module? (rather than a code object)
is_module = isinstance(code, _ModuleType)
# use the returned module, or create a new one to exec code into
if is_module:
module = code
else:
module = imp.new_module(fqname)
### record packages a bit differently??
module.__importer__ = self
module.__ispkg__ = ispkg
# insert additional values into the module (before executing the code)
module.__dict__.update(values)
# the module is almost ready... make it visible
sys.modules[fqname] = module
# execute the code within the module's namespace
if not is_module:
try:
exec code in module.__dict__
except:
if fqname in sys.modules:
del sys.modules[fqname]
raise
# fetch from sys.modules instead of returning module directly.
# also make module's __name__ agree with fqname, in case
# the "exec code in module.__dict__" played games on us.
module = sys.modules[fqname]
module.__name__ = fqname
return module
def _load_tail(self, m, parts):
"""Import the rest of the modules, down from the top-level module.
Returns the last module in the dotted list of modules.
"""
for part in parts:
fqname = "%s.%s" % (m.__name__, part)
m = self._import_one(m, part, fqname)
if not m:
raise ImportError, "No module named " + fqname
return m
def _import_fromlist(self, package, fromlist):
'Import any sub-modules in the "from" list.'
# if '*' is present in the fromlist, then look for the '__all__'
# variable to find additional items (modules) to import.
if '*' in fromlist:
fromlist = list(fromlist) + \
list(package.__dict__.get('__all__', []))
for sub in fromlist:
# if the name is already present, then don't try to import it (it
# might not be a module!).
if sub != '*' and not hasattr(package, sub):
subname = "%s.%s" % (package.__name__, sub)
submod = self._import_one(package, sub, subname)
if not submod:
raise ImportError, "cannot import name " + subname
def _do_import(self, parent, parts, fromlist):
"""Attempt to import the module relative to parent.
This method is used when the import context specifies that <self>
imported the parent module.
"""
top_name = parts[0]
top_fqname = parent.__name__ + '.' + top_name
top_module = self._import_one(parent, top_name, top_fqname)
if not top_module:
# this importer and parent could not find the module (relatively)
return None
return self._finish_import(top_module, parts[1:], fromlist)
######################################################################
#
# METHODS TO OVERRIDE
#
def get_code(self, parent, modname, fqname):
"""Find and retrieve the code for the given module.
parent specifies a parent module to define a context for importing. It
may be None, indicating no particular context for the search.
modname specifies a single module (not dotted) within the parent.
fqname specifies the fully-qualified module name. This is a
(potentially) dotted name from the "root" of the module namespace
down to the modname.
If there is no parent, then modname==fqname.
This method should return None, or a 3-tuple.
* If the module was not found, then None should be returned.
* The first item of the 2- or 3-tuple should be the integer 0 or 1,
specifying whether the module that was found is a package or not.
* The second item is the code object for the module (it will be
executed within the new module's namespace). This item can also
be a fully-loaded module object (e.g. loaded from a shared lib).
* The third item is a dictionary of name/value pairs that will be
inserted into new module before the code object is executed. This
is provided in case the module's code expects certain values (such
as where the module was found). When the second item is a module
object, then these names/values will be inserted *after* the module
has been loaded/initialized.
"""
raise RuntimeError, "get_code not implemented"
######################################################################
#
# Some handy stuff for the Importers
#
# byte-compiled file suffix character
_suffix_char = __debug__ and 'c' or 'o'
# byte-compiled file suffix
_suffix = '.py' + _suffix_char
def _compile(pathname, timestamp):
"""Compile (and cache) a Python source file.
The file specified by <pathname> is compiled to a code object and
returned.
Presuming the appropriate privileges exist, the bytecodes will be
saved back to the filesystem for future imports. The source file's
modification timestamp must be provided as a Long value.
"""
codestring = open(pathname, 'rU').read()
if codestring and codestring[-1] != '\n':
codestring = codestring + '\n'
code = __builtin__.compile(codestring, pathname, 'exec')
# try to cache the compiled code
try:
f = open(pathname + _suffix_char, 'wb')
except IOError:
pass
else:
f.write('\0\0\0\0')
f.write(struct.pack('<I', timestamp))
marshal.dump(code, f)
f.flush()
f.seek(0, 0)
f.write(imp.get_magic())
f.close()
return code
_os_stat = _os_path_join = None
def _os_bootstrap():
"Set up 'os' module replacement functions for use during import bootstrap."
names = sys.builtin_module_names
join = None
if 'posix' in names:
sep = '/'
from posix import stat
elif 'nt' in names:
sep = '\\'
from nt import stat
elif 'dos' in names:
sep = '\\'
from dos import stat
elif 'os2' in names:
sep = '\\'
from os2 import stat
else:
raise ImportError, 'no os specific module found'
if join is None:
def join(a, b, sep=sep):
if a == '':
return b
lastchar = a[-1:]
if lastchar == '/' or lastchar == sep:
return a + b
return a + sep + b
global _os_stat
_os_stat = stat
global _os_path_join
_os_path_join = join
def _os_path_isdir(pathname):
"Local replacement for os.path.isdir()."
try:
s = _os_stat(pathname)
except OSError:
return None
return (s.st_mode & 0170000) == 0040000
def _timestamp(pathname):
"Return the file modification time as a Long."
try:
s = _os_stat(pathname)
except OSError:
return None
return long(s.st_mtime)
######################################################################
#
# Emulate the import mechanism for builtin and frozen modules
#
class BuiltinImporter(Importer):
def get_code(self, parent, modname, fqname):
if parent:
# these modules definitely do not occur within a package context
return None
# look for the module
if imp.is_builtin(modname):
type = imp.C_BUILTIN
elif imp.is_frozen(modname):
type = imp.PY_FROZEN
else:
# not found
return None
# got it. now load and return it.
module = imp.load_module(modname, None, modname, ('', '', type))
return 0, module, { }
######################################################################
#
# Internal importer used for importing from the filesystem
#
class _FilesystemImporter(Importer):
def __init__(self):
self.suffixes = [ ]
def add_suffix(self, suffix, importFunc):
assert hasattr(importFunc, '__call__')
self.suffixes.append((suffix, importFunc))
def import_from_dir(self, dir, fqname):
result = self._import_pathname(_os_path_join(dir, fqname), fqname)
if result:
return self._process_result(result, fqname)
return None
def get_code(self, parent, modname, fqname):
# This importer is never used with an empty parent. Its existence is
# private to the ImportManager. The ImportManager uses the
# import_from_dir() method to import top-level modules/packages.
# This method is only used when we look for a module within a package.
assert parent
for submodule_path in parent.__path__:
code = self._import_pathname(_os_path_join(submodule_path, modname), fqname)
if code is not None:
return code
return self._import_pathname(_os_path_join(parent.__pkgdir__, modname),
fqname)
def _import_pathname(self, pathname, fqname):
if _os_path_isdir(pathname):
result = self._import_pathname(_os_path_join(pathname, '__init__'),
fqname)
if result:
values = result[2]
values['__pkgdir__'] = pathname
values['__path__'] = [ pathname ]
return 1, result[1], values
return None
for suffix, importFunc in self.suffixes:
filename = pathname + suffix
try:
finfo = _os_stat(filename)
except OSError:
pass
else:
return importFunc(filename, finfo, fqname)
return None
######################################################################
#
# SUFFIX-BASED IMPORTERS
#
def py_suffix_importer(filename, finfo, fqname):
file = filename[:-3] + _suffix
t_py = long(finfo[8])
t_pyc = _timestamp(file)
code = None
if t_pyc is not None and t_pyc >= t_py:
f = open(file, 'rb')
if f.read(4) == imp.get_magic():
t = struct.unpack('<I', f.read(4))[0]
if t == t_py:
code = marshal.load(f)
f.close()
if code is None:
file = filename
code = _compile(file, t_py)
return 0, code, { '__file__' : file }
class DynLoadSuffixImporter:
def __init__(self, desc):
self.desc = desc
def import_file(self, filename, finfo, fqname):
fp = open(filename, self.desc[1])
module = imp.load_module(fqname, fp, filename, self.desc)
module.__file__ = filename
return 0, module, { }
######################################################################
def _print_importers():
items = sys.modules.items()
items.sort()
for name, module in items:
if module:
print name, module.__dict__.get('__importer__', '-- no importer')
else:
print name, '-- non-existent module'
def _test_revamp():
ImportManager().install()
sys.path.insert(0, BuiltinImporter())
######################################################################
#
# TODO
#
# from Finn Bock:
# type(sys) is not a module in Jython. what to use instead?
# imp.C_EXTENSION is not in Jython. same for get_suffixes and new_module
#
# given foo.py of:
# import sys
# sys.modules['foo'] = sys
#
# ---- standard import mechanism
# >>> import foo
# >>> foo
# <module 'sys' (built-in)>
#
# ---- revamped import mechanism
# >>> import imputil
# >>> imputil._test_revamp()
# >>> import foo
# >>> foo
# <module 'foo' from 'foo.py'>
#
#
# from MAL:
# should BuiltinImporter exist in sys.path or hard-wired in ImportManager?
# need __path__ processing
# performance
# move chaining to a subclass [gjs: it's been nuked]
# deinstall should be possible
# query mechanism needed: is a specific Importer installed?
# py/pyc/pyo piping hooks to filter/process these files
# wish list:
# distutils importer hooked to list of standard Internet repositories
# module->file location mapper to speed FS-based imports
# relative imports
# keep chaining so that it can play nice with other import hooks
#
# from Gordon:
# push MAL's mapper into sys.path[0] as a cache (hard-coded for apps)
#
# from Guido:
# need to change sys.* references for rexec environs
# need hook for MAL's walk-me-up import strategy, or Tim's absolute strategy
# watch out for sys.modules[...] is None
# flag to force absolute imports? (speeds _determine_import_context and
# checking for a relative module)
# insert names of archives into sys.path (see quote below)
# note: reload does NOT blast module dict
# shift import mechanisms and policies around; provide for hooks, overrides
# (see quote below)
# add get_source stuff
# get_topcode and get_subcode
# CRLF handling in _compile
# race condition in _compile
# refactoring of os.py to deal with _os_bootstrap problem
# any special handling to do for importing a module with a SyntaxError?
# (e.g. clean up the traceback)
# implement "domain" for path-type functionality using pkg namespace
# (rather than FS-names like __path__)
# don't use the word "private"... maybe "internal"
#
#
# Guido's comments on sys.path caching:
#
# We could cache this in a dictionary: the ImportManager can have a
# cache dict mapping pathnames to importer objects, and a separate
# method for coming up with an importer given a pathname that's not yet
# in the cache. The method should do a stat and/or look at the
# extension to decide which importer class to use; you can register new
# importer classes by registering a suffix or a Boolean function, plus a
# class. If you register a new importer class, the cache is zapped.
# The cache is independent from sys.path (but maintained per
# ImportManager instance) so that rearrangements of sys.path do the
# right thing. If a path is dropped from sys.path the corresponding
# cache entry is simply no longer used.
#
# My/Guido's comments on factoring ImportManager and Importer:
#
# > However, we still have a tension occurring here:
# >
# > 1) implementing policy in ImportManager assists in single-point policy
# > changes for app/rexec situations
# > 2) implementing policy in Importer assists in package-private policy
# > changes for normal, operating conditions
# >
# > I'll see if I can sort out a way to do this. Maybe the Importer class will
# > implement the methods (which can be overridden to change policy) by
# > delegating to ImportManager.
#
# Maybe also think about what kind of policies an Importer would be
# likely to want to change. I have a feeling that a lot of the code
# there is actually not so much policy but a *necessity* to get things
# working given the calling conventions for the __import__ hook: whether
# to return the head or tail of a dotted name, or when to do the "finish
# fromlist" stuff.
#
|
100Shapes/wagtail
|
refs/heads/master
|
wagtail/wagtailcore/templatetags/pageurl.py
|
1
|
import warnings
from wagtail.utils.deprecation import RemovedInWagtail06Warning
warnings.warn(
"The pageurl tag library has been moved to wagtailcore_tags. "
"Use {% load wagtailcore_tags %} instead.", RemovedInWagtail06Warning)
from wagtail.wagtailcore.templatetags.wagtailcore_tags import register, pageurl
|
scalable-networks/gnuradio-3.7.2.1
|
refs/heads/master
|
gr-blocks/python/blocks/qa_conjugate.py
|
57
|
#!/usr/bin/env python
#
# Copyright 2012,2013 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr, gr_unittest, blocks
class test_conjugate (gr_unittest.TestCase):
def setUp (self):
self.tb = gr.top_block ()
def tearDown (self):
self.tb = None
def test_000 (self):
src_data = (-2-2j, -1-1j, -2+2j, -1+1j,
2-2j, 1-1j, 2+2j, 1+1j,
0+0j)
exp_data = (-2+2j, -1+1j, -2-2j, -1-1j,
2+2j, 1+1j, 2-2j, 1-1j,
0-0j)
src = blocks.vector_source_c(src_data)
op = blocks.conjugate_cc ()
dst = blocks.vector_sink_c ()
self.tb.connect(src, op)
self.tb.connect(op, dst)
self.tb.run()
result_data = dst.data ()
self.assertEqual (exp_data, result_data)
if __name__ == '__main__':
gr_unittest.run(test_conjugate, "test_conjugate.xml")
|
mrlegion/web-portfolio-server
|
refs/heads/master
|
node_modules/node-gyp/gyp/pylib/gyp/generator/gypd.py
|
1824
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""gypd output module
This module produces gyp input as its output. Output files are given the
.gypd extension to avoid overwriting the .gyp files that they are generated
from. Internal references to .gyp files (such as those found in
"dependencies" sections) are not adjusted to point to .gypd files instead;
unlike other paths, which are relative to the .gyp or .gypd file, such paths
are relative to the directory from which gyp was run to create the .gypd file.
This generator module is intended to be a sample and a debugging aid, hence
the "d" for "debug" in .gypd. It is useful to inspect the results of the
various merges, expansions, and conditional evaluations performed by gyp
and to see a representation of what would be fed to a generator module.
It's not advisable to rename .gypd files produced by this module to .gyp,
because they will have all merges, expansions, and evaluations already
performed and the relevant constructs not present in the output; paths to
dependencies may be wrong; and various sections that do not belong in .gyp
files such as such as "included_files" and "*_excluded" will be present.
Output will also be stripped of comments. This is not intended to be a
general-purpose gyp pretty-printer; for that, you probably just want to
run "pprint.pprint(eval(open('source.gyp').read()))", which will still strip
comments but won't do all of the other things done to this module's output.
The specific formatting of the output generated by this module is subject
to change.
"""
import gyp.common
import errno
import os
import pprint
# These variables should just be spit back out as variable references.
_generator_identity_variables = [
'CONFIGURATION_NAME',
'EXECUTABLE_PREFIX',
'EXECUTABLE_SUFFIX',
'INTERMEDIATE_DIR',
'LIB_DIR',
'PRODUCT_DIR',
'RULE_INPUT_ROOT',
'RULE_INPUT_DIRNAME',
'RULE_INPUT_EXT',
'RULE_INPUT_NAME',
'RULE_INPUT_PATH',
'SHARED_INTERMEDIATE_DIR',
'SHARED_LIB_DIR',
'SHARED_LIB_PREFIX',
'SHARED_LIB_SUFFIX',
'STATIC_LIB_PREFIX',
'STATIC_LIB_SUFFIX',
]
# gypd doesn't define a default value for OS like many other generator
# modules. Specify "-D OS=whatever" on the command line to provide a value.
generator_default_variables = {
}
# gypd supports multiple toolsets
generator_supports_multiple_toolsets = True
# TODO(mark): This always uses <, which isn't right. The input module should
# notify the generator to tell it which phase it is operating in, and this
# module should use < for the early phase and then switch to > for the late
# phase. Bonus points for carrying @ back into the output too.
for v in _generator_identity_variables:
generator_default_variables[v] = '<(%s)' % v
def GenerateOutput(target_list, target_dicts, data, params):
output_files = {}
for qualified_target in target_list:
[input_file, target] = \
gyp.common.ParseQualifiedTarget(qualified_target)[0:2]
if input_file[-4:] != '.gyp':
continue
input_file_stem = input_file[:-4]
output_file = input_file_stem + params['options'].suffix + '.gypd'
if not output_file in output_files:
output_files[output_file] = input_file
for output_file, input_file in output_files.iteritems():
output = open(output_file, 'w')
pprint.pprint(data[input_file], output)
output.close()
|
emgirardin/compassion-modules
|
refs/heads/master
|
sbc_compassion/models/__init__.py
|
2
|
# -*- encoding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2015 Compassion CH (http://www.compassion.ch)
# Releasing children from poverty in Jesus' name
# @author: Emmanuel Mathier <emmanuel.mathier@gmail.com>
#
# The licence is in the file __openerp__.py
#
##############################################################################
from . import partner_compassion
from . import correspondence_metadata
from . import correspondence
from . import correspondence_page
from . import correspondence_template
from . import import_config
from . import import_letters_history
from . import import_letter_line
from . import contracts
from . import correspondence_b2s_layout
from . import correspondence_translation_box
from . import project_compassion
|
DrAndrey/tennis_model
|
refs/heads/master
|
tennis_model_scraper/tennis_model_scraper/spiders/__init__.py
|
2415
|
# This package will contain the spiders of your Scrapy project
#
# Please refer to the documentation for information on how to create and manage
# your spiders.
|
PolicyStat/selenium-old
|
refs/heads/master
|
py/test/selenium/webdriver/firefox/test_ff_frame_switching.py
|
4
|
#!/usr/bin/python
#
# Copyright 2008-2010 WebDriver committers
# Copyright 2008-2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from selenium import webdriver
from selenium.test.selenium.webdriver.common import frame_switching_tests
from selenium.test.selenium.webdriver.common.webserver import SimpleWebServer
def setup_module(module):
webserver = SimpleWebServer()
webserver.start()
FirefoxFrameSwitchingTest.webserver = webserver
FirefoxFrameSwitchingTest.driver = webdriver.Firefox()
class FirefoxFrameSwitchingTest(frame_switching_tests.FrameSwitchingTest):
pass
def teardown_module(module):
FirefoxFrameSwitchingTest.driver.quit()
FirefoxFrameSwitchingTest.webserver.stop()
|
abridgett/boto
|
refs/heads/develop
|
tests/unit/mws/test_response.py
|
114
|
#!/usr/bin/env python
from boto.mws.connection import MWSConnection
from boto.mws.response import (ResponseFactory, ResponseElement, Element,
MemberList, ElementList, SimpleList)
from tests.unit import AWSMockServiceTestCase
from boto.compat import filter, map
from tests.compat import unittest
class TestMWSResponse(AWSMockServiceTestCase):
connection_class = MWSConnection
mws = True
def test_parsing_nested_elements(self):
class Test9one(ResponseElement):
Nest = Element()
Zoom = Element()
class Test9Result(ResponseElement):
Item = Element(Test9one)
text = b"""<Test9Response><Test9Result>
<Item>
<Foo>Bar</Foo>
<Nest>
<Zip>Zap</Zip>
<Zam>Zoo</Zam>
</Nest>
<Bif>Bam</Bif>
</Item>
</Test9Result></Test9Response>"""
obj = self.check_issue(Test9Result, text)
Item = obj._result.Item
useful = lambda x: not x[0].startswith('_')
nest = dict(filter(useful, Item.Nest.__dict__.items()))
self.assertEqual(nest, dict(Zip='Zap', Zam='Zoo'))
useful = lambda x: not x[0].startswith('_') and not x[0] == 'Nest'
item = dict(filter(useful, Item.__dict__.items()))
self.assertEqual(item, dict(Foo='Bar', Bif='Bam', Zoom=None))
def test_parsing_member_list_specification(self):
class Test8extra(ResponseElement):
Foo = SimpleList()
class Test8Result(ResponseElement):
Item = MemberList(SimpleList)
Extra = MemberList(Test8extra)
text = b"""<Test8Response><Test8Result>
<Item>
<member>0</member>
<member>1</member>
<member>2</member>
<member>3</member>
</Item>
<Extra>
<member><Foo>4</Foo><Foo>5</Foo></member>
<member></member>
<member><Foo>6</Foo><Foo>7</Foo></member>
</Extra>
</Test8Result></Test8Response>"""
obj = self.check_issue(Test8Result, text)
self.assertSequenceEqual(
list(map(int, obj._result.Item)),
list(range(4)),
)
self.assertSequenceEqual(
list(map(lambda x: list(map(int, x.Foo)), obj._result.Extra)),
[[4, 5], [], [6, 7]],
)
def test_parsing_nested_lists(self):
class Test7Result(ResponseElement):
Item = MemberList(Nest=MemberList(),
List=ElementList(Simple=SimpleList()))
text = b"""<Test7Response><Test7Result>
<Item>
<member>
<Value>One</Value>
<Nest>
<member><Data>2</Data></member>
<member><Data>4</Data></member>
<member><Data>6</Data></member>
</Nest>
</member>
<member>
<Value>Two</Value>
<Nest>
<member><Data>1</Data></member>
<member><Data>3</Data></member>
<member><Data>5</Data></member>
</Nest>
<List>
<Simple>4</Simple>
<Simple>5</Simple>
<Simple>6</Simple>
</List>
<List>
<Simple>7</Simple>
<Simple>8</Simple>
<Simple>9</Simple>
</List>
</member>
<member>
<Value>Six</Value>
<List>
<Complex>Foo</Complex>
<Simple>1</Simple>
<Simple>2</Simple>
<Simple>3</Simple>
</List>
<List>
<Complex>Bar</Complex>
</List>
</member>
</Item>
</Test7Result></Test7Response>"""
obj = self.check_issue(Test7Result, text)
item = obj._result.Item
self.assertEqual(len(item), 3)
nests = [z.Nest for z in filter(lambda x: x.Nest, item)]
self.assertSequenceEqual(
[[y.Data for y in nest] for nest in nests],
[[u'2', u'4', u'6'], [u'1', u'3', u'5']],
)
self.assertSequenceEqual(
[element.Simple for element in item[1].List],
[[u'4', u'5', u'6'], [u'7', u'8', u'9']],
)
self.assertSequenceEqual(
item[-1].List[0].Simple,
['1', '2', '3'],
)
self.assertEqual(item[-1].List[1].Simple, [])
self.assertSequenceEqual(
[e.Value for e in obj._result.Item],
['One', 'Two', 'Six'],
)
def test_parsing_member_list(self):
class Test6Result(ResponseElement):
Item = MemberList()
text = b"""<Test6Response><Test6Result>
<Item>
<member><Value>One</Value></member>
<member><Value>Two</Value>
<Error>Four</Error>
</member>
<member><Value>Six</Value></member>
</Item>
</Test6Result></Test6Response>"""
obj = self.check_issue(Test6Result, text)
self.assertSequenceEqual(
[e.Value for e in obj._result.Item],
['One', 'Two', 'Six'],
)
self.assertTrue(obj._result.Item[1].Error == 'Four')
with self.assertRaises(AttributeError) as e:
obj._result.Item[2].Error
def test_parsing_empty_member_list(self):
class Test5Result(ResponseElement):
Item = MemberList(Nest=MemberList())
text = b"""<Test5Response><Test5Result>
<Item/>
</Test5Result></Test5Response>"""
obj = self.check_issue(Test5Result, text)
self.assertSequenceEqual(obj._result.Item, [])
def test_parsing_missing_member_list(self):
class Test4Result(ResponseElement):
Item = MemberList(NestedItem=MemberList())
text = b"""<Test4Response><Test4Result>
</Test4Result></Test4Response>"""
obj = self.check_issue(Test4Result, text)
self.assertSequenceEqual(obj._result.Item, [])
def test_parsing_element_lists(self):
class Test1Result(ResponseElement):
Item = ElementList()
text = b"""<Test1Response><Test1Result>
<Item><Foo>Bar</Foo></Item>
<Item><Zip>Bif</Zip></Item>
<Item><Foo>Baz</Foo>
<Zam>Zoo</Zam></Item>
</Test1Result></Test1Response>"""
obj = self.check_issue(Test1Result, text)
self.assertTrue(len(obj._result.Item) == 3)
elements = lambda x: getattr(x, 'Foo', getattr(x, 'Zip', '?'))
elements = list(map(elements, obj._result.Item))
self.assertSequenceEqual(elements, ['Bar', 'Bif', 'Baz'])
def test_parsing_missing_lists(self):
class Test2Result(ResponseElement):
Item = ElementList()
text = b"""<Test2Response><Test2Result>
</Test2Result></Test2Response>"""
obj = self.check_issue(Test2Result, text)
self.assertEqual(obj._result.Item, [])
def test_parsing_simple_lists(self):
class Test3Result(ResponseElement):
Item = SimpleList()
text = b"""<Test3Response><Test3Result>
<Item>Bar</Item>
<Item>Bif</Item>
<Item>Baz</Item>
</Test3Result></Test3Response>"""
obj = self.check_issue(Test3Result, text)
self.assertSequenceEqual(obj._result.Item, ['Bar', 'Bif', 'Baz'])
def check_issue(self, klass, text):
action = klass.__name__[:-len('Result')]
factory = ResponseFactory(scopes=[{klass.__name__: klass}])
parser = factory(action, connection=self.service_connection)
return self.service_connection._parse_response(parser, 'text/xml', text)
if __name__ == "__main__":
unittest.main()
|
zhreshold/mxnet
|
refs/heads/master
|
tests/python/unittest/test_subgraph_op.py
|
1
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import ctypes
import mxnet as mx
from mxnet.base import SymbolHandle, check_call, _LIB, mx_uint, c_str_array, c_str, mx_real_t
from mxnet.symbol import Symbol
import numpy as np
from mxnet.test_utils import assert_almost_equal
from mxnet import gluon
from mxnet.gluon import nn
from mxnet import nd
import pytest
import tempfile
def network_structure_1():
data1 = mx.sym.var('data1', shape=(2, 3, 10, 10))
data2 = mx.sym.var('data2')
conv1 = mx.sym.Convolution(data=data1, weight=data2, no_bias=True, kernel=(2, 2), num_filter=1)
conv2 = mx.sym.Convolution(data=data2, no_bias=True, kernel=(1, 1), num_filter=1)
out = mx.sym.Group([conv1, conv2])
return (out, ['data1'], [(2, 3, 10, 10)])
def network_structure_2():
# this tests whether the partitioning algorithm can deal with cycles
data = mx.sym.var('data', shape=(2, 3, 10, 10))
ret = mx.sym.exp(data)
ret1 = mx.sym.cos(ret)
ret2 = mx.sym.sin(ret)
ret = ret1 + ret2
return (ret, ['data'], [(2, 3, 10, 10)])
def network_structure_3():
# this tests whether the partitioned sym can distinguish in_args and aux_states
data = mx.sym.var('data', shape=(2, 3, 10, 10))
ret = mx.sym.exp(data)
ret1 = mx.sym.cos(ret)
ret2 = mx.sym.sin(ret)
ret = ret1 + ret2
ret = mx.sym.BatchNorm(ret)
ret = mx.sym.BatchNorm(ret)
# Return the same and shape of 'data' and auxiliary states
return (ret, ['data'] + ret.list_auxiliary_states(), [(2, 3, 10, 10), (3,), (3,), (3,), (3,)])
def network_structure_4():
# the last op has multiple duplicate outputs
data = mx.sym.var('data', shape=(2, 3, 10, 10))
ret = mx.sym.exp(data)
ret = mx.sym.Group([ret, ret, ret])
return (ret, ['data'], [(2, 3, 10, 10)])
def network_structure_5():
# the subgraph has two duplicate input entries
data = mx.sym.var('data', shape=(2, 3, 10, 10))
ret = data + data
return (ret, ['data'], [(2, 3, 10, 10)])
def network_structure_6():
data1 = mx.sym.Variable('data1', shape=(3, 3, 10, 10), dtype=np.float32)
data2 = mx.sym.Variable('data2', shape=(1, 0, 2, 2))
data3 = mx.sym.sin(data2)
conv = mx.sym.Convolution(data=data1, weight=data3, kernel=(2, 2), num_filter=1)
return (conv, ['data1'], [(3, 3, 10, 10)])
def network_structure_7():
# in this graph, the subgraph node and the other two external nodes form a cycle
data = mx.sym.Variable('data', shape=(1,))
ret1 = mx.sym.sin(data)
ret2 = mx.sym.cos(ret1)
for _ in range(5):
ret2 = mx.sym.cos(ret2)
ret = ret1 + ret2
return (ret, ['data'], [(1,)])
def get_graphs():
return [
(network_structure_1(), ['Convolution']),
(network_structure_2(), ['exp', 'sin', '_Plus', 'elemwise_add', '_plus']),
(network_structure_2(), ['exp', 'cos', '_Plus', 'elemwise_add', '_plus']),
(network_structure_3(), ['exp', 'sin', '_Plus', 'elemwise_add', '_plus']),
(network_structure_3(), ['exp', 'cos', '_Plus', 'elemwise_add', '_plus']),
(network_structure_3(), ['exp', 'sin', '_Plus', 'elemwise_add', '_plus', 'BatchNorm']),
(network_structure_3(), ['exp', 'cos', '_Plus', 'elemwise_add', '_plus', 'BatchNorm']),
(network_structure_3(), ['exp', 'BatchNorm']),
(network_structure_3(), ['BatchNorm']),
(network_structure_4(), ['exp']),
(network_structure_5(), ['_plus', '_Plus', 'elemwise_add']),
(network_structure_6(), []),
(network_structure_6(), [mx.sym.sin.__name__]),
(network_structure_6(), [mx.sym.Convolution.__name__]),
(network_structure_6(), [mx.sym.sin.__name__, mx.sym.Convolution.__name__]),
(network_structure_7(), ['sin', 'elemwise_add', '_plus', '_Plus'])
]
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe1(sym, subgraph_backend, op_names):
"""Use the partitioned sym to simple_bind an executor and compare the outputs
with those of the original executor"""
sym, _, _ = sym
out = SymbolHandle()
check_call(_LIB.MXBuildSubgraphByOpNames(sym.handle, c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names), ctypes.byref(out)))
partitioned_sym = Symbol(out)
assert partitioned_sym.list_inputs() == sym.list_inputs()
assert partitioned_sym.list_arguments() == sym.list_arguments()
assert partitioned_sym.list_auxiliary_states() == sym.list_auxiliary_states()
exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
partitioned_exe = partitioned_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
input_names = sym.list_inputs()
for name in input_names:
if name in exe.arg_dict:
exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)
partitioned_exe.arg_dict[name][:] = exe.arg_dict[name]
else:
assert name in exe.aux_dict
exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)
partitioned_exe.aux_dict[name][:] = exe.aux_dict[name]
exe.forward()
partitioned_exe.forward()
assert len(exe.outputs) == len(partitioned_exe.outputs)
for i in range(len(exe.outputs)):
assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(),
np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe2(sym, subgraph_backend, op_names):
"""Use env var MXNET_SUBGRAPH_BACKEND=default to trigger graph partitioning in simple_bind
and compare results of the partitioned sym and the original sym."""
def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None):
if subgraph_backend is not None:
os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend
check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
exe = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
input_names = sym.list_inputs()
for name in input_names:
if name in exe.arg_dict:
exe.arg_dict[name][:] = mx.nd.random.uniform(shape=exe.arg_dict[name].shape)\
if original_exec is None else original_exec.arg_dict[name]
else:
assert name in exe.aux_dict
exe.aux_dict[name][:] = mx.nd.random.uniform(shape=exe.aux_dict[name].shape)\
if original_exec is None else original_exec.aux_dict[name]
exe.forward()
if subgraph_backend is not None:
check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
del os.environ['MXNET_SUBGRAPH_BACKEND']
return exe
sym, _, _ = sym
original_exec = get_executor(sym)
partitioned_exec = get_executor(sym, subgraph_backend, op_names, original_exec)
outputs1 = original_exec.outputs
outputs2 = partitioned_exec.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe3(sym, subgraph_backend, op_names):
"""Use the partitioned sym to bind an executor and compare the outputs
with those of the original executor"""
sym, _, _ = sym
out = SymbolHandle()
check_call(_LIB.MXBuildSubgraphByOpNames(sym.handle, c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names), ctypes.byref(out)))
partitioned_sym = Symbol(out)
input_names = sym.list_inputs()
arg_names = sym.list_arguments()
aux_names = sym.list_auxiliary_states()
assert partitioned_sym.list_inputs() == input_names
assert partitioned_sym.list_arguments() == arg_names
assert partitioned_sym.list_auxiliary_states() == aux_names
arg_shapes, _, aux_shapes = sym.infer_shape()
arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
exe = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
partitioned_exe = partitioned_sym.bind(ctx=mx.current_context(), args=arg_array,
aux_states=aux_array, grad_req='null')
exe.forward()
partitioned_exe.forward()
assert len(exe.outputs) == len(partitioned_exe.outputs)
for i in range(len(exe.outputs)):
assert_almost_equal((exe.outputs[i] - partitioned_exe.outputs[i]).abs().sum().asnumpy(),
np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe4(sym, subgraph_backend, op_names):
"""Use env var MXNET_SUBGRAPH_BACKEND=default to trigger graph partitioning in bind
and compare results of the partitioned sym and the original sym."""
def get_executor(sym, subgraph_backend=None, op_names=None, original_exec=None):
if subgraph_backend is not None:
os.environ['MXNET_SUBGRAPH_BACKEND'] = subgraph_backend
check_call(_LIB.MXSetSubgraphPropertyOpNames(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
arg_shapes, _, aux_shapes = sym.infer_shape()
if subgraph_backend is None:
arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
else:
arg_array = None
aux_array = None
exe = sym.bind(ctx=mx.current_context(),
args=arg_array if subgraph_backend is None else original_exec.arg_arrays,
aux_states=aux_array if subgraph_backend is None else original_exec.aux_arrays,
grad_req='null')
exe.forward()
if subgraph_backend is not None:
check_call(_LIB.MXRemoveSubgraphPropertyOpNames(c_str(subgraph_backend)))
del os.environ['MXNET_SUBGRAPH_BACKEND']
return exe
sym, _, _ = sym
original_exec = get_executor(sym)
partitioned_exec = get_executor(sym, subgraph_backend, op_names, original_exec)
outputs1 = original_exec.outputs
outputs2 = partitioned_exec.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
def set_random_inputs(exe1, input_names):
"""Sets random values to exe1's args and auxs"""
for name in input_names:
if name in exe1.arg_dict:
exe1.arg_dict[name][:] = mx.nd.random.uniform(shape=exe1.arg_dict[name].shape)
else:
assert name in exe1.aux_dict
exe1.aux_dict[name][:] = mx.nd.random.uniform(shape=exe1.aux_dict[name].shape)
def copy_inputs_between_executors(exe1, exe2, input_names):
"""Copies values of args and auxs from exe1 to exe2"""
for name in input_names:
if name in exe2.arg_dict:
exe2.arg_dict[name][:] = exe1.arg_dict[name]
else:
assert name in exe2.aux_dict
exe2.aux_dict[name][:] = exe1.aux_dict[name]
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe5(sym, subgraph_backend, op_names):
"""Call optimize_for to trigger graph partitioning without infer shapes/types before,
then simple_bind and compare results of the partitioned sym and the original sym."""
# simple_bind
sym, _, _ = sym
exe1 = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
input_names = sym.list_inputs()
set_random_inputs(exe1, input_names)
exe1.forward()
# partition before simple_bind
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
part_sym = sym.optimize_for(subgraph_backend)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
exe2 = part_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
copy_inputs_between_executors(exe1, exe2, input_names)
exe2.forward()
# compare outputs
outputs1 = exe1.outputs
outputs2 = exe2.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe6(sym, subgraph_backend, op_names):
"""Call optimize_for to trigger graph partitioning with shapes/types, then simple_bind
and compare results of the partitioned sym and the original sym."""
# simple_bind
sym, _, _ = sym
exe1 = sym.simple_bind(ctx=mx.current_context(), grad_req='null')
input_names = sym.list_inputs()
set_random_inputs(exe1, input_names)
exe1.forward()
# infer shape/type before partition before simple_bind
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
part_sym = sym.optimize_for(subgraph_backend, exe1.arg_dict, exe1.aux_dict)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
exe2 = part_sym.simple_bind(ctx=mx.current_context(), grad_req='null')
copy_inputs_between_executors(exe1, exe2, input_names)
exe2.forward()
# compare outputs
outputs1 = exe1.outputs
outputs2 = exe2.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe7(sym, subgraph_backend, op_names):
"""Call optimize_for to trigger graph partitioning without infer shapes/types before,
then bind and compare results of the partitioned sym and the original sym."""
# bind
sym, _, _ = sym
arg_shapes, _, aux_shapes = sym.infer_shape()
arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
exe1 = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
exe1.forward()
# partition before bind
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
part_sym = sym.optimize_for(subgraph_backend)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
exe2 = part_sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
exe2.forward()
# compare outputs
outputs1 = exe1.outputs
outputs2 = exe2.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_exe8(sym, subgraph_backend, op_names):
"""Call optimize_for to infer shapes, types and dtypes followed by graph partitioning,
then bind and compare results of the partitioned sym and the original sym."""
# bind
sym, _, _ = sym
arg_shapes, _, aux_shapes = sym.infer_shape()
arg_array = [mx.nd.random.uniform(shape=shape) for shape in arg_shapes]
aux_array = [mx.nd.random.uniform(shape=shape) for shape in aux_shapes]
exe1 = sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
exe1.forward()
# infer shape/type before partition before bind
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
part_sym = sym.optimize_for(subgraph_backend, arg_array, aux_array)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
exe2 = part_sym.bind(ctx=mx.current_context(), args=arg_array, aux_states=aux_array, grad_req='null')
exe2.forward()
# compare outputs
outputs1 = exe1.outputs
outputs2 = exe2.outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
@pytest.mark.parametrize('subgraph_backend', ['default', 'default_v2'])
@pytest.mark.parametrize('sym,op_names', get_graphs())
def test_subgraph_backend_gluon(sym, subgraph_backend, op_names, tmpdir):
"""Call hybridize() to partition the graph, and then compare results of the partitioned
sym and the original sym. Here do an inference before hybridizing with the subgraph_backend
which means we'll pass shapes/types"""
# create Gluon block for given symbol
inputs = [mx.sym.var(i, dtype=mx_real_t) for i in sym[1]]
sym_block = nn.SymbolBlock(sym[0], inputs)
sym_block.initialize(ctx=mx.current_context())
x = [mx.nd.random.uniform(shape=s,ctx=mx.current_context()) for s in sym[2]]
# hybridize and export to get baseline
sym_block.hybridize()
outputs1 = sym_block(*x)
_, json_path = tempfile.mkstemp(suffix='-symbol.json', dir=str(tmpdir))
export_path = json_path.replace('-symbol.json', '')
params_path = export_path + '-0000.params'
sym_block.export(export_path)
# load model and partition
sym_block = nn.SymbolBlock.imports(json_path,sym[1], params_path,
ctx=mx.current_context())
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
sym_block.hybridize(backend=subgraph_backend)
outputs2 = sym_block(*x)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
# compare outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
# Test Gluon HybridBlocks for graph partitioning a network created by HybridSequential.
@pytest.mark.serial
def test_subgraph_backend_gluon_ext1(tmpdir):
def get_net():
net = nn.HybridSequential() # Here we use the class HybridSequential.
net.add(nn.Dense(256, activation='relu'),
nn.Dense(128, activation='relu'),
nn.Dense(2))
return net
# regular inference
x = nd.random.normal(shape=(1, 512),ctx=mx.current_context())
net = get_net()
net.collect_params().initialize(ctx=mx.current_context())
outputs1 = net(x)
param_path = os.path.join(str(tmpdir), 'test_subgraph_backend_gluon_ext1.params')
net.save_parameters(param_path)
# after partitioning
net = get_net()
net.load_parameters(param_path,ctx=mx.current_context())
subgraph_backend = 'default'
op_names = ['FullyConnected']
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
net.hybridize(backend = subgraph_backend)
outputs2 = net(x)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
# compare outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
# Test Gluon HybridBlocks for graph partitioning a network created by HybridBlock.
@pytest.mark.serial
def test_subgraph_backend_gluon_ext2(tmpdir):
class Net(gluon.HybridBlock):
def __init__(self, **kwargs):
super(Net, self).__init__(**kwargs)
with self.name_scope():
self.fc1 = nn.Dense(256)
self.fc2 = nn.Dense(128)
self.fc3 = nn.Dense(2)
def hybrid_forward(self, F, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
return self.fc3(x)
# regular inference
x = nd.random.normal(shape=(1, 512),ctx=mx.current_context())
net = Net()
net.collect_params().initialize(ctx=mx.current_context())
outputs1 = net(x)
param_path = os.path.join(str(tmpdir), 'test_subgraph_backend_gluon_ext2.params')
net.save_parameters(param_path)
# after partitioning
net = Net()
net.load_parameters(param_path, ctx=mx.current_context())
subgraph_backend = 'default'
op_names = ['FullyConnected']
check_call(_LIB.MXSetSubgraphPropertyOpNamesV2(c_str(subgraph_backend), mx_uint(len(op_names)),
c_str_array(op_names)))
net.hybridize(backend = subgraph_backend)
outputs2 = net(x)
check_call(_LIB.MXRemoveSubgraphPropertyOpNamesV2(c_str(subgraph_backend)))
# compare outputs
assert len(outputs1) == len(outputs2)
for i in range(len(outputs1)):
assert_almost_equal((outputs1[i] - outputs2[i]).abs().sum().asnumpy(), np.zeros(shape=(1,)))
|
tyarkoni/pymc3
|
refs/heads/master
|
pymc3/glm/glm.py
|
14
|
import numpy as np
from ..core import *
from ..distributions import *
from ..tuning.starting import find_MAP
import patsy
import theano
import pandas as pd
from collections import defaultdict
from pandas.tools.plotting import scatter_matrix
from . import families
def linear_component(formula, data, priors=None,
intercept_prior=None,
regressor_prior=None,
init_vals=None, family=None,
model=None):
"""Create linear model according to patsy specification.
Parameters
----------
formula : str
Patsy linear model descriptor.
data : array
Labeled array (e.g. pandas DataFrame, recarray).
priors : dict
Mapping prior name to prior distribution.
E.g. {'Intercept': Normal.dist(mu=0, sd=1)}
intercept_prior : pymc3 distribution
Prior to use for the intercept.
Default: Normal.dist(mu=0, tau=1.0E-12)
regressor_prior : pymc3 distribution
Prior to use for all regressor(s).
Default: Normal.dist(mu=0, tau=1.0E-12)
init_vals : dict
Set starting values externally: parameter -> value
Default: None
family : statsmodels.family
Link function to pass to statsmodels (init has to be True).
See `statsmodels.api.families`
Default: identity
Output
------
(y_est, coeffs) : Estimate for y, list of coefficients
Example
-------
# Logistic regression
y_est, coeffs = glm('male ~ height + weight',
htwt_data,
family=glm.families.Binomial(link=glm.family.logit))
y_data = Bernoulli('y', y_est, observed=data.male)
"""
if intercept_prior is None:
intercept_prior = Normal.dist(mu=0, tau=1.0E-12)
if regressor_prior is None:
regressor_prior = Normal.dist(mu=0, tau=1.0E-12)
if priors is None:
priors = defaultdict(None)
# Build patsy design matrix and get regressor names.
_, dmatrix = patsy.dmatrices(formula, data)
reg_names = dmatrix.design_info.column_names
if init_vals is None:
init_vals = {}
# Create individual coefficients
model = modelcontext(model)
coeffs = []
if reg_names[0] == 'Intercept':
prior = priors.get('Intercept', intercept_prior)
coeff = model.Var(reg_names.pop(0), prior)
if 'Intercept' in init_vals:
coeff.tag.test_value = init_vals['Intercept']
coeffs.append(coeff)
for reg_name in reg_names:
prior = priors.get(reg_name, regressor_prior)
coeff = model.Var(reg_name, prior)
if reg_name in init_vals:
coeff.tag.test_value = init_vals[reg_name]
coeffs.append(coeff)
y_est = theano.dot(np.asarray(dmatrix), theano.tensor.stack(*coeffs)).reshape((1, -1))
return y_est, coeffs
def glm(*args, **kwargs):
"""Create GLM after Patsy model specification string.
Parameters
----------
formula : str
Patsy linear model descriptor.
data : array
Labeled array (e.g. pandas DataFrame, recarray).
priors : dict
Mapping prior name to prior distribution.
E.g. {'Intercept': Normal.dist(mu=0, sd=1)}
intercept_prior : pymc3 distribution
Prior to use for the intercept.
Default: Normal.dist(mu=0, tau=1.0E-12)
regressor_prior : pymc3 distribution
Prior to use for all regressor(s).
Default: Normal.dist(mu=0, tau=1.0E-12)
init_vals : dict
Set starting values externally: parameter -> value
Default: None
family : Family object
Distribution of likelihood, see pymc3.glm.families
(init has to be True).
Output
------
vars : List of created random variables (y_est, coefficients etc)
Example
-------
# Logistic regression
vars = glm('male ~ height + weight',
data,
family=glm.families.Binomial(link=glm.families.logit))
"""
model = modelcontext(kwargs.get('model'))
family = kwargs.pop('family', families.Normal())
call_find_map = kwargs.pop('find_MAP', True)
formula = args[0]
data = args[1]
y_data = np.asarray(patsy.dmatrices(formula, data)[0]).T
y_est, coeffs = linear_component(*args, **kwargs)
family.create_likelihood(y_est, y_data)
return [y_est] + coeffs
def plot_posterior_predictive(trace, eval=None, lm=None, samples=30, **kwargs):
"""Plot posterior predictive of a linear model.
:Arguments:
trace : <array>
Array of posterior samples with columns
eval : <array>
Array over which to evaluate lm
lm : function <default: linear function>
Function mapping parameters at different points
to their respective outputs.
input: point, sample
output: estimated value
samples : int <default=30>
How many posterior samples to draw.
Additional keyword arguments are passed to pylab.plot().
"""
import matplotlib.pyplot as plt
if lm is None:
lm = lambda x, sample: sample['Intercept'] + sample['x'] * x
if eval is None:
eval = np.linspace(0, 1, 100)
# Set default plotting arguments
if 'lw' not in kwargs and 'linewidth' not in kwargs:
kwargs['lw'] = .2
if 'c' not in kwargs and 'color' not in kwargs:
kwargs['c'] = 'k'
for rand_loc in np.random.randint(0, len(trace), samples):
rand_sample = trace[rand_loc]
plt.plot(eval, lm(eval, rand_sample), **kwargs)
# Make sure to not plot label multiple times
kwargs.pop('label', None)
plt.title('Posterior predictive')
|
RonnyPfannschmidt/whl_setup
|
refs/heads/master
|
whl_setup.py
|
2
|
import subprocess
import sys
import types
import os
import site
if not os.path.isdir('.setup_requires'):
os.mkdir('.setup_requires')
site.addsitedir('.setup_requires')
sys.path.insert(0, sys.path.pop())
# VERSION GOES HERE
class Lazy(object):
@staticmethod
def resolve(obj):
if isinstance(obj, Lazy):
return obj.func()
else:
return obj
def __init__(self, func):
self.func = func
def setup(**kwargs):
setup_requires = Lazy.resolve(kwargs.pop('setup_requires', None))
if setup_requires:
try:
subprocess.check_output([
sys.executable, __file__,
'install', '--target', '.setup_requires'] + setup_requires,
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as ex:
print(ex.command)
print(ex.output)
sys.exit(exself.returncode)
from setuptools import setup as real_setup
real_kwargs = {name: Lazy.resolve(value) for name, value in kwargs.items()}
return real_setup(**real_kwargs)
def get_existing_version(path):
if not os.path.isfile(path):
return None
with open(path) as fp:
lines = list(fp)
version = next((x for x in lines if x.startswith('__version__')), None)
if version is None:
return object
return version[version.find("'"):].strip("'\n")
def install():
from pkg_resources import get_distribution
new_version = get_distribution('whl.setup').version
version = get_existing_version('whl_setup.py')
if version is object:
return
print(new_version, version)
import inspect
import whl_setup
source = inspect.getsource(whl_setup).replace(
"# VERSION GOES HERE", "__version__ = '%s'" % new_version)
with open('whl_setup.py', 'w') as fp:
fp.write(source)
if __name__ == '__main__' and sys.argv[1] == 'install':
from pip import main
main(sys.argv[1:])
|
monchitos82/shop
|
refs/heads/master
|
src/app/views.py
|
2
|
# -*- coding: utf-8 -*-
from app import app, cfg, error, language
from validations import validate_user, valid_session
from flask import Flask, abort, jsonify, redirect, render_template, request, session, url_for
import controls
import ast, json, re, string
def is_blocked(endpoint, session):
''' Validate if a view is restricted '''
if endpoint in cfg['RESTRICTED']:
if session.get('isAdmin') != True:
return True
return False
@app.after_request
def add_header(response):
''' custom header to purge cache '''
response.headers['X-UA-Compatible'] = 'IE-Edge, chrome=1'
response.headers['Cache-Control'] = 'no-cache, max-age=0'
response.headers['Pragma'] = 'no-cache'
response.headers['Expires'] = '0'
return response
@app.before_request
def print_session():
session_dict = dict(session)
session_dict['ip'] = request.remote_addr
app.logger.debug(session_dict)
@app.errorhandler(401)
def page_forbidden(e):
''' Forbidden error '''
return render_template('401.html', language=language), 401
@app.errorhandler(404)
def page_not_found(e):
''' Page not found '''
return render_template('404.html', language=language), 404
@app.errorhandler(500)
def server_error(e):
''' Error on the server '''
return render_template('500.html', language=language), 500
@app.route('/', methods=['GET'])
def index():
''' Base URL '''
return render_template('index.html', language=language, version=cfg['VERSION'], csrf=cfg['CSRF'])
@app.route('/login', methods=['GET', 'POST'])
def login():
''' Return a login form and create a session on POST '''
app.logger.info(request.form.get('username', None))
username = request.form.get('username', None)
password = request.form.get('password', None)
error = None
if request.method == 'POST':
if (not username or
not password or
not validate_user(username, password)):
app.logger.error('Invalid credentials')
abort(401)
else:
session['logged_in'] = True
app.logger.info('User %s logged in' % request.form['username'])
return redirect(url_for('index'))
return redirect(url_for('index'))
@app.route('/logout')
def logout():
''' Finalize a session '''
session.pop('logged_in', None)
session.pop('username', None)
session.pop('isAdmin', False)
return redirect(url_for('index'))
@valid_session(session)
@app.route('/change_password', methods=['GET', 'POST'])
def change_password():
''' Return a change password form and change a password on POST '''
username = request.form.get('username', None)
old_password = request.form.get('old_password', '')
new_password = request.form.get('new_password', '')
new_password_confirm = request.form.get('new_password_confirm', '')
session_user = session.get('username', None)
admin_user = session.get('isAdmin', False)
app.logger.debug((session_user, admin_user))
error_msg = None
if request.method == 'POST':
if not username:
if not admin_user:
username = session_user
else:
return render_template(
'password_error.html',
language=language,
error=error['us-000'],
csrf=cfg['CSRF']
)
if old_password == new_password:
error_msg = error['us-005']
elif new_password != new_password_confirm:
error_msg = error['us-006']
else:
timestamp = controls.common.timestamp()
if username != session_user and not admin_user:
error_msg = error['us-007']
elif ((not old_password and not admin_user) or
not validate_user(username, old_password, admin_user)):
error_msg = error['us-008']
else:
if (controls.common.score_password(new_password) < 0.5 or
len(new_password) < 10):
error_msg = error['us-009']
else:
user = json.loads(controls.user.find_user(username))
app.logger.debug(user)
hash_ = controls.common.generate_hash(
username,
new_password,
cfg['HASH_ROUNDS'],
cfg['SALT_SIZE']
)
class User(object): pass
class Change(object): pass
cls_user = User
cls_change = Change
user['identifier'] = hash_
user['last_update'] = timestamp
if controls.user.insert_user_event(user, 'users', cls_user):
change = {
'user_id': user['user_id'],
'user_name': user['user_name'],
'issuer': session_user,
'stamp': timestamp
}
if not controls.user.insert_user_event(change, 'userchange', cls_change):
app.logger.error(error['us-016'])
return render_template(
'password_succeed.html',
language=language,
message='change_succeed',
csrf=cfg['CSRF'])
return render_template('password_error.html', language=language, error=error_msg, csrf=cfg['CSRF'])
elif request.method == 'GET':
return render_template('change_password.html', language=language, is_admin=admin_user, csrf=cfg['CSRF'])
@valid_session(session)
@app.route('/add_user', methods=['GET', 'POST'])
def add_user():
''' Return a change password form and change a password on POST '''
if not session.get('isAdmin', False):
abort(401)
if request.method == 'POST':
error_msg = None
username = request.form.get('username', None)
password = request.form.get('password', None)
if not username or not password:
error_msg = error['us-011']
else:
if username == session.get('username', None):
error_msg = error['us-012']
else:
if (controls.common.score_password(password) < 0.5 or
len(password) < 10):
error_msg = error['us-009']
else:
user = json.loads(controls.user.find_user(username))
app.logger.info(user)
if user:
error_msg = error['us-013']
else:
hash_ = controls.common.generate_hash(
username,
password,
cfg['HASH_ROUNDS'],
cfg['SALT_SIZE']
)
class User(object): pass
class Change(object): pass
cls_user = User
cls_change = Change
timestamp = controls.common.timestamp()
user = {
'user_name': username,
'identifier': hash_,
'last_update': timestamp
}
if controls.user.insert_user_event(user, 'users', cls_user):
user = json.loads(controls.user.find_user(username))
if 'user_id' in user:
change = {
'user_id': user['user_id'],
'user_name': user['user_name'],
'issuer': session.get('username', ''),
'stamp': timestamp
}
if not controls.user.insert_user_event(change, 'userchange', cls_change):
error_msg = error['us-015']
app.logger.error(error['us-016'])
else:
error_msg = error['us-014']
if error_msg:
app.logger.error(error_msg)
return render_template(
'password_error.html',
language=language,
error=error_msg,
csrf=cfg['CSRF']
)
else:
return render_template(
'password_succeed.html',
language=language,
message='create_succeed',
csrf=cfg['CSRF']
)
elif request.method == 'GET':
return render_template(
'add_user.html',
language=language,
is_admin=session.get('isAdmin', False),
csrf=cfg['CSRF']
)
@app.route('/routes', methods=['POST'])
def routes():
''' Serve the existing routes '''
route = request.get_json()
app.logger.info(route)
response = {}
response['name'] = route['tab']
try:
response['url'] = url_for(response['name'])
app.logger.debug(response)
return jsonify(tabs=response)
except ValueError:
abort(404)
@app.route('/checkout', methods=['GET'])
@valid_session(session)
def checkout():
''' Checkout view '''
return render_template(
'checkout.html',
language=language,
header=cfg['HEADER'],
contact=cfg['CONTACT'],
csrf=cfg['CSRF']
)
@app.route('/checkout/insert', methods=['POST'])
@valid_session(session)
def insert_checkout():
''' New checkout '''
checkout = request.get_json()
app.logger.info(checkout)
if not checkout:
abort(400)
response = controls.checkout.insert_checkout(checkout)
return response
@app.route('/checkout/type/all', methods=['GET'])
def find_checkout_types():
''' Checkout types: sale, quote, refund '''
result_list = []
if cfg['ACTIVITY_TYPES'] != None:
for type_ in cfg['ACTIVITY_TYPES']:
result_dict = {}
result_dict['type_'] = type_
result_list.append(result_dict)
app.logger.debug(result_list)
if is_blocked('checkout/type/all', session) == True:
result_list = []
return jsonify(types=sorted(result_list))
@app.route('/customer', methods=['GET'])
@valid_session(session)
def customer():
''' Customer view '''
return render_template('customer.html', language=language, csrf=cfg['CSRF'])
@app.route('/customer/all', methods=['GET'])
@valid_session(session)
def all_customer():
''' Get all customers '''
response = json.loads(controls.customer.all_customer())
app.logger.debug(response)
if is_blocked('customer/all', session) == True:
response = []
return jsonify(customers=response)
@app.route('/customer/find', methods=['POST'])
@valid_session(session)
def find_customer():
''' Query a customer '''
customer = request.get_json()
app.logger.info(customer)
if not customer:
abort(400)
customer, option = customer['customer'], customer['option']
response = json.loads(controls.customer.find_customer(customer, option))
app.logger.debug(response)
result_list = []
if response != None:
for item in response:
item_dict = {
'customer_id': item['customer_id'],
'name': item['name'],
'tin': item['tin'],
'email': item['email']
}
address_dict = {
'street': item['street'],
'exterior': item['exterior'],
'interior': item['interior'],
'local': item['location'],
'town': item['town'],
'state': item['state'],
'country': item['country'],
'zip_': item['zip_']
}
app.logger.debug(item_dict)
item_dict['address'] = address_dict
result_list.append(item_dict)
return jsonify(customers=sorted(result_list))
@app.route('/customer/insert', methods=['POST'])
@valid_session(session)
def insert_customer():
''' Add/Update a customer '''
customer = request.get_json()
app.logger.info(customer)
if not customer:
abort(400)
return controls.customer.insert_customer(customer)
@app.route('/inventory', methods=['GET'])
def inventory():
''' Inventory view '''
return render_template('inventory.html', language=language, csrf=cfg['CSRF'])
@app.route('/inventory/all', methods=['GET'])
def all_inventory():
''' Get all inventory items '''
response = json.loads(controls.inventory.all_inventory())
app.logger.debug(response)
# Hide empty warehouse
wh_item = 0
for item in list(response):
if not item['amount']:
response.pop(wh_item)
wh_item += 1
if is_blocked('inventory/all', session) == True:
response = []
return jsonify(inventories=response)
@app.route('/inventory/find', methods=['POST'])
def find_inventory():
''' Query the inventory '''
inventory = request.get_json()
app.logger.info(inventory)
if not inventory:
abort(400)
inventory, option = inventory['inventory'], inventory['option']
response = json.loads(controls.inventory.find_inventory(inventory, option))
app.logger.debug(response)
result_list = []
if response != None:
if option == 'rop':
for item in response:
item_dict = {
'product_id': item['product_id'],
'name': item['name'],
'barcode': item['barcode'],
'price': (float(item['price'])/100),
'supplier_id': item['supplier_id'],
'rop': item['rop'],
'warehouse':[],
'supplier_name': item['supplier_name'],
'total': item['total']
}
result_list.append(item_dict)
else:
for item in response:
app.logger.debug(item)
warehouse = {}
warehouse['warehouse_id'] = item['warehouse_id']
warehouse['warehouse_name'] = item['warehouse_name']
warehouse['amount'] = item['amount']
if len(result_list) >= 1:
found = 0
for element in result_list:
if element['product_id'] == item['product_id']:
element['warehouse'].append(warehouse)
element['warehouse'] = sorted(element['warehouse'])
found = 1
if found == 0:
warehouse_list = []
warehouse_list.append(warehouse)
item_dict = {
'product_id': item['product_id'],
'name': item['name'],
'barcode': item['barcode'],
'price': (float(item['price'])/100),
'supplier_id': item['supplier_id'],
'rop': item['rop'],
'warehouse': warehouse_list,
'supplier_name': item['supplier_name']
}
result_list.append(item_dict)
else:
warehouse_list = []
warehouse_list.append(warehouse)
item_dict = {
'product_id': item['product_id'],
'name': item['name'],
'barcode': item['barcode'],
'price': (float(item['price'])/100),
'supplier_id': item['supplier_id'],
'rop': item['rop'],
'warehouse': warehouse_list,
'supplier_name': item['supplier_name']
}
result_list.append(item_dict)
for item in result_list:
item['total'] = 0
for element in item['warehouse']:
item['total'] += element['amount']
app.logger.debug(result_list)
return jsonify(inventories=sorted(result_list))
@app.route('/inventory/insert', methods=['POST'])
@valid_session(session)
def insert_inventory():
''' Update the inventory '''
inventory = request.get_json()
app.logger.info(inventory)
if not inventory:
abort(400)
return controls.inventory.insert_inventory(inventory)
@app.route('/offer', methods=['GET'])
def offer():
''' Offer view '''
return render_template('offer.html', language=language, csrf=cfg['CSRF'])
@app.route('/offer/all', methods=['GET'])
def all_offer():
''' Get all offers '''
response = json.loads(controls.offer.all_offer())
app.logger.debug(response)
if is_blocked('offer/all',session) == True:
response = []
return jsonify(offers=response)
@app.route('/offer/find', methods=['POST'])
def find_offer():
''' Find an offer '''
offer = request.get_json()
app.logger.info(offer)
if not offer:
abort(400)
offer, option = offer['offer'], offer['option']
response = json.loads(controls.offer.find_offer(offer, option))
app.logger.debug(response)
result_list = []
if response != None:
for item in response:
product = {}
product['product_id'] = item['product_id']
product['product_name'] = item['product_name']
product['product_type'] = item['type_']
if len(result_list) >= 1:
found = 0
for element in result_list:
if element['offer_id'] == item['offer_id']:
element['products'].append(product)
element['products'] = sorted(element['products'])
found = 1
if found == 0:
product_list = []
product_list.append(product)
item_dict = {
'offer_id': item['offer_id'],
'description': item['description'],
'products': product_list,
'discount': item['discount'],
'start_date': item['start_date'],
'end_date': item['end_date']
}
result_list.append(item_dict)
else:
product_list = []
product_list.append(product)
item_dict = {
'offer_id': item['offer_id'],
'description': item['description'],
'products': product_list,
'discount': item['discount'],
'start_date': item['start_date'],
'end_date': item['end_date']
}
result_list.append(item_dict)
return jsonify(offers=sorted(result_list))
@app.route('/offer/insert', methods=['POST'])
@valid_session(session)
def insert_offer():
''' Add/Update an offer '''
offer = request.get_json()
app.logger.info(offer)
if not offer:
abort(400)
return controls.offer.insert_offer(offer)
@app.route('/product', methods=['GET'])
def product():
''' Product view '''
return render_template('product.html', language=language, csrf=cfg['CSRF'])
@app.route('/product/all', methods=['GET'])
def all_product():
''' Get all products '''
response = json.loads(controls.product.all_product())
app.logger.debug(response)
if is_blocked('product/all',session) == True:
response = []
return jsonify(products=response)
@app.route('/product/find', methods=['POST'])
def find_product():
''' Find a product '''
product = request.get_json()
app.logger.info(product)
if not product:
abort(400)
product, option = product['product'], product['option']
response = json.loads(controls.product.find_product(product, option))
app.logger.debug(response)
return jsonify(products=response)
@app.route('/product/insert', methods=['POST'])
@valid_session(session)
def insert_product():
''' Add/Update a product '''
product = request.get_json()
app.logger.info(product)
if not product:
abort(400)
return controls.product.insert_product(product)
@app.route('/product/bundle/all', methods=['GET'])
def all_bundle():
''' Get all bundles '''
response = json.loads(controls.product.all_bundle())
app.logger.debug(response)
if is_blocked('product/bundle/all',session) == True:
response = []
return jsonify(bundles=response)
@app.route('/product/bundle/find', methods=['POST'])
def find_bundle():
''' Find a bundle '''
bundle = request.get_json()
app.logger.info(bundle)
if not bundle:
abort(400)
bundle, option = bundle['bundle'], bundle['option']
response = json.loads(controls.product.find_bundle(bundle, option))
app.logger.debug(response)
result_list = []
if response != None:
for item in response:
if not any(d['bundle_id'] == item['bundle_id'] for d in result_list):
item_dict = {
'bundle_id': item['bundle_id'],
'name':item['name'],
'barcode': item['barcode'],
'fixed_discount': item['fixed_discount'],
'products': []
}
for row in response:
if row['bundle_id'] == item_dict['bundle_id']:
product_dict = {
'product_id': row['product_id'],
'product_name':row['product_name'],
'amount': row['product_amount']
}
item_dict['products'].append(product_dict)
item_dict['products'] = sorted(item_dict['products'])
result_list.append(item_dict)
return jsonify(bundles=sorted(result_list))
@app.route('/product/bundle/insert', methods=['POST'])
@valid_session(session)
def insert_bundle():
''' Add/Update a bundle '''
bundle = request.get_json()
app.logger.info(bundle)
if not bundle:
abort(400)
return controls.product.insert_bundle(bundle)
@app.route('/product/type/all', methods=['GET'])
def find_product_types():
''' Product types list '''
result_list = []
if cfg['PRODUCT_TYPES'] != None:
for type_ in cfg['PRODUCT_TYPES']:
result_dict = {}
result_dict['type_'] = type_
result_list.append(result_dict)
app.logger.debug(result_list)
return jsonify(types=sorted(result_list))
@app.route('/warehouse', methods=['GET'])
def warehouse():
''' Warehouse view '''
return render_template('warehouse.html', language=language, csrf=cfg['CSRF'])
@app.route('/warehouse/all', methods=['GET'])
def all_warehouse():
''' Get all warehouses '''
response = json.loads(controls.warehouse.all_warehouse())
app.logger.debug(response)
if is_blocked('warehouse/all',session) == True:
response = []
return jsonify(warehouses=response)
@app.route('/warehouse/find', methods=['POST'])
def find_warehouse():
''' Find a warehouse '''
warehouse = request.get_json()
app.logger.debug(warehouse)
if not warehouse:
abort(400)
warehouse = warehouse['warehouse']
response = json.loads(controls.warehouse.find_warehouse(warehouse))
app.logger.debug(response)
return jsonify(warehouses=sorted(response))
@app.route('/warehouse/insert', methods=['POST'])
@valid_session(session)
def insert_warehouse():
''' Add/Update a warehouse '''
warehouse = request.get_json()
app.logger.info(warehouse)
if not warehouse:
abort(400)
return controls.warehouse.insert_warehouse(warehouse)
@app.route('/store', methods=['GET'])
def store():
''' Store view '''
return render_template('store.html', language=language, csrf=cfg['CSRF'])
@app.route('/store/all', methods=['GET'])
def all_store():
''' Get all stores '''
ip = request.remote_addr
app.logger.info(ip)
if ip in cfg['RESTRICT_BY_IP']:
store = json.loads(
controls.store.find_store(
cfg['RESTRICT_BY_IP'][ip]['store_id'],
'id'
)
)
return jsonify(stores=store)
response = json.loads(controls.store.all_store())
app.logger.debug(response)
if is_blocked('store/all',session) == True:
response = []
return jsonify(stores=response)
@app.route('/store/find', methods=['POST'])
def find_store():
''' Find a store '''
store = request.get_json()
app.logger.info(store)
if not store:
abort(400)
store, option = store['store'], store['option']
response = json.loads(controls.store.find_store(store, option))
app.logger.debug(response)
return jsonify(stores=sorted(response))
@app.route('/store/insert', methods=['POST'])
@valid_session(session)
def insert_store():
''' Add/Update a store '''
store = request.get_json()
app.logger.info(store)
if not store:
abort(400)
return controls.store.insert_store(store)
@app.route('/transaction', methods=['GET'])
@valid_session(session)
def transaction():
''' Transaction view '''
return render_template('transaction.html', language=language, csrf=cfg['CSRF'])
@app.route('/transaction/all', methods=['GET'])
@valid_session(session)
def all_transaction():
''' Get all transactions '''
response = json.loads(controls.transaction.all_transaction())
app.logger.debug(response)
if is_blocked('transaction/all',session) == True:
response = []
return jsonify(transactions=response)
@app.route('/transaction/find', methods=['POST'])
@valid_session(session)
def find_transaction():
''' Find a transaction '''
transaction = request.get_json()
app.logger.info(transaction)
if not transaction:
abort(400)
transaction, option = transaction['transaction'], transaction['option']
response = json.loads(controls.transaction.find_transaction(transaction, option))
app.logger.debug(response)
result_list = []
if response != None:
for item in response:
item_dict = {
'id_': item['transaction_id'],
'product': item['product_id'],
'store': item['store_id'],
'total': (float(item['total'])/100),
'amount':item['amount'],
'supplier':item['supplier_id'],
'tax': item['tax'],
'action':item['action'],
'status_': item['status_']
}
result_list.append(item_dict)
return jsonify(transactions=sorted(result_list))
@app.route('/supplier', methods=['GET'])
@valid_session(session)
def supplier():
''' Supplier view '''
return render_template('supplier.html', language=language, csrf=cfg['CSRF'])
@app.route('/supplier/all', methods=['GET'])
@valid_session(session)
def all_supplier():
''' Get all suppliers '''
response = json.loads(controls.supplier.all_supplier())
app.logger.debug(response)
if is_blocked('supplier/all',session) == True:
response = []
return jsonify(suppliers=response)
@app.route('/supplier/find', methods=['POST'])
@valid_session(session)
def find_supplier():
''' Find a supplier '''
supplier = request.get_json()
app.logger.info(supplier)
if not supplier:
abort(400)
supplier = supplier['supplier']
response = json.loads(controls.supplier.find_supplier(supplier))
app.logger.debug(response)
return jsonify(suppliers=sorted(response))
@app.route('/supplier/insert', methods=['POST'])
@valid_session(session)
def insert_supplier():
''' Add/Update a supplier '''
supplier = request.get_json()
app.logger.info(supplier)
if not supplier:
abort(400)
return controls.supplier.insert_supplier(supplier)
@app.route('/about', methods=['GET'])
@valid_session(session)
def about():
''' Describe this API '''
base = cfg['BASE_DIR']
api = []
route_regex = '@app\.route\(\'((/\w*)+)+\', methods={0,1}\[(\'\w+\'(,){0,1})+\]\)'
return_regex = '( )*return .*'
abort_regex = '( )*abort\(\d+\)'
route_c_regex = re.compile(route_regex)
return_c_regex = re.compile(return_regex)
abort_c_regex = re.compile(abort_regex)
with open(base + '/app/views.py') as file_:
endpoint_dict = {}
return_list = []
error_list = []
for line in file_:
if route_c_regex.match(line):
if len(endpoint_dict) > 0:
endpoint_dict['returns'] = return_list
endpoint_dict['errors'] = error_list
if 'endpoint' in endpoint_dict:
api.append(endpoint_dict)
endpoint_dict = {}
endpoint = re.search(route_regex, line).group(1)
methods = re.search(route_regex, line).group(0).split("=")[1]
app.logger.debug(methods)
endpoint_dict = {
'endpoint': endpoint,
'methods': ast.literal_eval(
methods.translate(None,'\()')
)
}
return_list = []
error_list = []
if return_c_regex.match(line):
if 'jsonify' in re.search(return_regex, line).group(0):
if len(return_list) == 0 or 'json object' not in return_list:
return_list.append('json object')
if 'redirect' in re.search(return_regex, line).group(0):
if len(return_list) == 0 or 'redirects' not in return_list:
return_list.append('redirects')
if 'render' in re.search(return_regex, line).group(0):
if len(return_list) == 0 or 'html' not in return_list:
return_list.append('html template')
if 'controls' in re.search(return_regex, line).group(0):
if len(return_list) == 0 or 'controller response' not in return_list:
return_list.append('controller response')
if abort_c_regex.match(line):
all=string.maketrans('', '')
nodigs=all.translate(all, string.digits)
error = re.search(abort_regex, line).group(0).translate(all, nodigs)
if len(error_list) == 0 or error in error_list:
error_list.append(error)
api_descriptor = {
'about': cfg['ABOUT'],
'author': cfg['AUTHOR']
}
endpoint_dict['returns'] = return_list
endpoint_dict['errors'] = error_list
if 'endpoint' in endpoint_dict:
api.append(endpoint_dict)
api = sorted(api)
api.insert(0,api_descriptor)
return jsonify(api=api)
def shutdown_server():
''' Werkzeug shutdown '''
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
app.logger.error('Not running with the Werkzeug Server')
func()
@app.route('/shutdown', methods=['GET'])
def shutdown():
''' Shutdown the process '''
if cfg['SHUTDOWN_ENABLED'] == True:
shutdown_server()
return 'Shutdown issued', 201
return 'Shutdown disabled', 403
|
Changaco/oh-mainline
|
refs/heads/master
|
vendor/packages/Django/django/contrib/gis/gdal/tests/test_driver.py
|
199
|
import unittest
from django.contrib.gis.gdal import Driver, OGRException
valid_drivers = ('ESRI Shapefile', 'MapInfo File', 'TIGER', 'S57', 'DGN',
'Memory', 'CSV', 'GML', 'KML')
invalid_drivers = ('Foo baz', 'clucka', 'ESRI Shp')
aliases = {'eSrI' : 'ESRI Shapefile',
'TigER/linE' : 'TIGER',
'SHAPE' : 'ESRI Shapefile',
'sHp' : 'ESRI Shapefile',
}
class DriverTest(unittest.TestCase):
def test01_valid_driver(self):
"Testing valid OGR Data Source Drivers."
for d in valid_drivers:
dr = Driver(d)
self.assertEqual(d, str(dr))
def test02_invalid_driver(self):
"Testing invalid OGR Data Source Drivers."
for i in invalid_drivers:
self.assertRaises(OGRException, Driver, i)
def test03_aliases(self):
"Testing driver aliases."
for alias, full_name in aliases.items():
dr = Driver(alias)
self.assertEqual(full_name, str(dr))
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(DriverTest))
return s
def run(verbosity=2):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
|
joegomes/deepchem
|
refs/heads/master
|
deepchem/molnet/load_function/delaney_datasets.py
|
1
|
"""
Delaney dataset loader.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem
def load_delaney(featurizer='ECFP', split='index'):
"""Load delaney datasets."""
# Featurize Delaney dataset
print("About to featurize Delaney dataset.")
if "DEEPCHEM_DATA_DIR" in os.environ:
data_dir = os.environ["DEEPCHEM_DATA_DIR"]
else:
data_dir = "/tmp"
dataset_file = os.path.join(data_dir, "delaney-processed.csv")
if not os.path.exists(dataset_file):
os.system(
'wget -P ' + data_dir +
' http://deepchem.io.s3-website-us-west-1.amazonaws.com/datasets/delaney-processed.csv'
)
delaney_tasks = ['measured log solubility in mols per litre']
if featurizer == 'ECFP':
featurizer = deepchem.feat.CircularFingerprint(size=1024)
elif featurizer == 'GraphConv':
featurizer = deepchem.feat.ConvMolFeaturizer()
elif featurizer == 'Weave':
featurizer = deepchem.feat.WeaveFeaturizer()
elif featurizer == 'Raw':
featurizer = deepchem.feat.RawFeaturizer()
loader = deepchem.data.CSVLoader(
tasks=delaney_tasks, smiles_field="smiles", featurizer=featurizer)
dataset = loader.featurize(dataset_file, shard_size=8192)
# Initialize transformers
transformers = [
deepchem.trans.NormalizationTransformer(
transform_y=True, dataset=dataset)
]
print("About to transform data")
for transformer in transformers:
dataset = transformer.transform(dataset)
splitters = {
'index': deepchem.splits.IndexSplitter(),
'random': deepchem.splits.RandomSplitter(),
'scaffold': deepchem.splits.ScaffoldSplitter()
}
splitter = splitters[split]
train, valid, test = splitter.train_valid_test_split(dataset)
return delaney_tasks, (train, valid, test), transformers
|
renyi533/tensorflow
|
refs/heads/master
|
tensorflow/python/keras/utils/multi_gpu_utils_test.py
|
4
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for multi-gpu training utilities."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import data
from tensorflow.python import keras
from tensorflow.python.eager import context
from tensorflow.python.framework import config
from tensorflow.python.framework import test_util
from tensorflow.python.keras.utils import multi_gpu_utils
from tensorflow.python.keras.utils import np_utils
from tensorflow.python.platform import test
def check_if_compatible_devices(gpus=2):
available_devices = [
keras.utils.multi_gpu_utils._normalize_device_name(name)
for name in keras.utils.multi_gpu_utils._get_available_devices()
]
if '/gpu:%d' % (gpus - 1) not in available_devices:
return False
return True
@test_util.run_all_in_deprecated_graph_mode_only
class TestMultiGPUModel(test.TestCase):
def __init__(self, methodName='runTest'): # pylint: disable=invalid-name
super(TestMultiGPUModel, self).__init__(methodName)
gpu_devices = config.list_physical_devices('GPU')
if len(gpu_devices) == 1:
# A GPU is available, simulate 2 instead.
config.set_logical_device_configuration(gpu_devices[0], [
context.LogicalDeviceConfiguration(500),
context.LogicalDeviceConfiguration(500)
])
def test_multi_gpu_test_simple_model(self):
gpus = 2
num_samples = 1000
input_dim = 10
output_dim = 1
hidden_dim = 10
epochs = 2
target_gpu_id = [0, 1]
if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only')
with self.cached_session():
model = keras.models.Sequential()
model.add(keras.layers.Dense(hidden_dim,
input_shape=(input_dim,)))
model.add(keras.layers.Dense(output_dim))
x = np.random.random((num_samples, input_dim))
y = np.random.random((num_samples, output_dim))
parallel_model = multi_gpu_utils.multi_gpu_model(model, gpus=gpus)
parallel_model.compile(loss='mse', optimizer='rmsprop')
parallel_model.fit(x, y, epochs=epochs)
parallel_model = multi_gpu_utils.multi_gpu_model(
model, gpus=target_gpu_id)
parallel_model.compile(loss='mse', optimizer='rmsprop')
parallel_model.fit(x, y, epochs=epochs)
def test_multi_gpu_test_multi_io_model(self):
gpus = 2
num_samples = 1000
input_dim_a = 10
input_dim_b = 5
output_dim_a = 1
output_dim_b = 2
hidden_dim = 10
epochs = 2
target_gpu_id = [0, 1]
if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only')
with self.cached_session():
input_a = keras.Input((input_dim_a,))
input_b = keras.Input((input_dim_b,))
a = keras.layers.Dense(hidden_dim)(input_a)
b = keras.layers.Dense(hidden_dim)(input_b)
c = keras.layers.concatenate([a, b])
output_a = keras.layers.Dense(output_dim_a)(c)
output_b = keras.layers.Dense(output_dim_b)(c)
model = keras.models.Model([input_a, input_b], [output_a, output_b])
a_x = np.random.random((num_samples, input_dim_a))
b_x = np.random.random((num_samples, input_dim_b))
a_y = np.random.random((num_samples, output_dim_a))
b_y = np.random.random((num_samples, output_dim_b))
parallel_model = multi_gpu_utils.multi_gpu_model(model, gpus=gpus)
parallel_model.compile(loss='mse', optimizer='rmsprop')
parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs)
parallel_model = multi_gpu_utils.multi_gpu_model(
model, gpus=target_gpu_id)
parallel_model.compile(loss='mse', optimizer='rmsprop')
parallel_model.fit([a_x, b_x], [a_y, b_y], epochs=epochs)
def test_multi_gpu_test_invalid_devices(self):
if not check_if_compatible_devices(gpus=2):
self.skipTest('multi gpu only')
with self.cached_session():
input_shape = (1000, 10)
model = keras.models.Sequential()
model.add(keras.layers.Dense(10,
activation='relu',
input_shape=input_shape[1:]))
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.compile(loss='mse', optimizer='rmsprop')
x = np.random.random(input_shape)
y = np.random.random((input_shape[0], 1))
with self.assertRaises(ValueError):
parallel_model = multi_gpu_utils.multi_gpu_model(
model, gpus=len(keras.backend._get_available_gpus()) + 1)
parallel_model.fit(x, y, epochs=2)
with self.assertRaises(ValueError):
parallel_model = multi_gpu_utils.multi_gpu_model(
model, gpus=[0, 2, 4, 6, 8])
parallel_model.fit(x, y, epochs=2)
with self.assertRaises(ValueError):
parallel_model = multi_gpu_utils.multi_gpu_model(model, gpus=1)
parallel_model.fit(x, y, epochs=2)
with self.assertRaises(ValueError):
parallel_model = multi_gpu_utils.multi_gpu_model(model, gpus=[0])
parallel_model.fit(x, y, epochs=2)
def test_nested_model_with_tensor_input(self):
gpus = 2
input_dim = 10
shape = (input_dim,)
num_samples = 16
num_classes = 10
if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only')
with self.cached_session():
input_shape = (num_samples,) + shape
x_train = np.random.randint(0, 255, input_shape)
y_train = np.random.randint(0, num_classes, (input_shape[0],))
y_train = np_utils.to_categorical(y_train, num_classes)
x_train = x_train.astype('float32')
y_train = y_train.astype('float32')
dataset = data.Dataset.from_tensor_slices((x_train, y_train))
dataset = dataset.repeat()
dataset = dataset.batch(4)
iterator = data.make_one_shot_iterator(dataset)
inputs, targets = iterator.get_next()
input_tensor = keras.layers.Input(tensor=inputs)
model = keras.models.Sequential()
model.add(keras.layers.Dense(3,
input_shape=(input_dim,)))
model.add(keras.layers.Dense(num_classes))
output = model(input_tensor)
outer_model = keras.Model(input_tensor, output)
parallel_model = multi_gpu_utils.multi_gpu_model(outer_model, gpus=gpus)
parallel_model.compile(
loss='categorical_crossentropy',
optimizer=keras.optimizers.RMSprop(lr=0.0001, decay=1e-6),
metrics=['accuracy'],
target_tensors=[targets])
parallel_model.fit(epochs=1, steps_per_epoch=3)
def test_multi_gpu_with_multi_input_layers(self):
gpus = 2
if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only')
with self.cached_session():
inputs = keras.Input((4, 3))
init_state = keras.Input((3,))
outputs = keras.layers.SimpleRNN(
3, return_sequences=True)(inputs, initial_state=init_state)
x = [np.random.randn(2, 4, 3), np.random.randn(2, 3)]
y = np.random.randn(2, 4, 3)
model = keras.Model([inputs, init_state], outputs)
parallel_model = multi_gpu_utils.multi_gpu_model(model, gpus=gpus)
parallel_model.compile(loss='mean_squared_error', optimizer='adam')
parallel_model.train_on_batch(x, y)
def test_multi_gpu_with_siamese_network(self):
gpus = 2
if not check_if_compatible_devices(gpus=gpus):
self.skipTest('multi gpu only')
with self.cached_session():
input_shape = (3,)
nested_model = keras.models.Sequential([
keras.layers.Dense(32, input_shape=input_shape),
keras.layers.Dense(1)
], name='nested')
input1 = keras.Input(input_shape)
input2 = keras.Input(input_shape)
score1 = nested_model(input1)
score2 = nested_model(input2)
score_sum = keras.layers.Add(name='add')([score1, score2])
siamese = keras.models.Model(inputs=[input1, input2],
outputs=[score_sum, score1, score2],
name='siamese')
parallel_siamese = multi_gpu_utils.multi_gpu_model(siamese, gpus)
self.assertEqual(parallel_siamese.output_names,
['add', 'nested', 'nested_1'])
if __name__ == '__main__':
test.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.