repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
googlearchive/cloud-playground | __pg/fixit.py | 2 | 1835 | """Module migrating/fixing schemas."""
import webapp2
from mimic.__mimic import common
from google.appengine.api import taskqueue
from google.appengine.datastore.datastore_query import Cursor
from . import model
from . import secret
from . import settings
from . import shared
# number of entities to fix at a time
_CURSOR_PAGE_SIZE = 200
def Begin():
taskqueue.add(queue_name='fixit', url='/playground/fix/project')
def FixProject(project):
"""Fix or update a project entity."""
shared.w(project.key.id())
dirty = False
if not project.access_key:
project.access_key = secret.GenerateRandomString()
dirty = True
# pylint:disable-msg=protected-access
if project._properties.has_key('end_user_url'):
project._properties.pop('end_user_url')
dirty = True
if dirty:
project.put()
shared.w('fixed {}'.format(project.key))
class ProjectHandler(webapp2.RequestHandler):
def post(self): # pylint:disable-msg=invalid-name,missing-docstring
assert self.request.environ[common.HTTP_X_APPENGINE_QUEUENAME]
query = model.Project.query(namespace=settings.PLAYGROUND_NAMESPACE)
cursor = self.request.get('cursor', None)
if cursor:
cursor = Cursor(urlsafe=cursor)
projects, next_cursor, more = query.fetch_page(_CURSOR_PAGE_SIZE,
start_cursor=cursor)
if more and next_cursor:
taskqueue.add(queue_name='fixit',
url='/playground/fix/project',
params={'cursor': next_cursor.urlsafe()})
for project in projects:
FixProject(project)
if not next_cursor:
shared.w('REACHED END OF QUERY CURSOR, '
'ALTHOUGH OTHER TASKS MAY STILL BE EXECUTING')
app = webapp2.WSGIApplication([
('/playground/fix/project', ProjectHandler),
], debug=True)
| apache-2.0 |
cg31/tensorflow | tensorflow/python/util/net_lib.py | 21 | 1028 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A Python interface for creating TensorFlow tests."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python import pywrap_tensorflow
def pick_unused_port_or_die():
"""Find an unused port on localhost."""
return pywrap_tensorflow.PickUnusedPortOrDie()
| apache-2.0 |
Jionglun/-w16b_test | static/Brython3.1.1-20150328-091302/Lib/multiprocessing/pool.py | 694 | 23263 | #
# Module providing the `Pool` class for managing a process pool
#
# multiprocessing/pool.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
__all__ = ['Pool']
#
# Imports
#
import threading
import queue
import itertools
import collections
import time
from multiprocessing import Process, cpu_count, TimeoutError
from multiprocessing.util import Finalize, debug
#
# Constants representing the state of a pool
#
RUN = 0
CLOSE = 1
TERMINATE = 2
#
# Miscellaneous
#
job_counter = itertools.count()
def mapstar(args):
return list(map(*args))
def starmapstar(args):
return list(itertools.starmap(args[0], args[1]))
#
# Code run by worker processes
#
class MaybeEncodingError(Exception):
"""Wraps possible unpickleable errors, so they can be
safely sent through the socket."""
def __init__(self, exc, value):
self.exc = repr(exc)
self.value = repr(value)
super(MaybeEncodingError, self).__init__(self.exc, self.value)
def __str__(self):
return "Error sending result: '%s'. Reason: '%s'" % (self.value,
self.exc)
def __repr__(self):
return "<MaybeEncodingError: %s>" % str(self)
def worker(inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) == int and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
try:
result = (True, func(*args, **kwds))
except Exception as e:
result = (False, e)
try:
put((job, i, result))
except Exception as e:
wrapped = MaybeEncodingError(e, result[1])
debug("Possible encoding error while sending result: %s" % (
wrapped))
put((job, i, (False, wrapped)))
completed += 1
debug('worker exiting after %d tasks' % completed)
#
# Class representing a process pool
#
class Pool(object):
'''
Class which supports an async version of applying functions to arguments.
'''
Process = Process
def __init__(self, processes=None, initializer=None, initargs=(),
maxtasksperchild=None):
self._setup_queues()
self._taskqueue = queue.Queue()
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not callable(initializer):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, )
)
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue, self._pool)
)
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache)
)
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15
)
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
debug('cleaning up worker %d' % i)
worker.join()
cleaned = True
del self._pool[i]
return cleaned
def _repopulate_pool(self):
"""Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=worker,
args=(self._inqueue, self._outqueue,
self._initializer,
self._initargs, self._maxtasksperchild)
)
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def _maintain_pool(self):
"""Clean up any exited workers and start replacements for them.
"""
if self._join_exited_workers():
self._repopulate_pool()
def _setup_queues(self):
from .queues import SimpleQueue
self._inqueue = SimpleQueue()
self._outqueue = SimpleQueue()
self._quick_put = self._inqueue._writer.send
self._quick_get = self._outqueue._reader.recv
def apply(self, func, args=(), kwds={}):
'''
Equivalent of `func(*args, **kwds)`.
'''
assert self._state == RUN
return self.apply_async(func, args, kwds).get()
def map(self, func, iterable, chunksize=None):
'''
Apply `func` to each element in `iterable`, collecting the results
in a list that is returned.
'''
return self._map_async(func, iterable, mapstar, chunksize).get()
def starmap(self, func, iterable, chunksize=None):
'''
Like `map()` method but the elements of the `iterable` are expected to
be iterables as well and will be unpacked as arguments. Hence
`func` and (a, b) becomes func(a, b).
'''
return self._map_async(func, iterable, starmapstar, chunksize).get()
def starmap_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `starmap()` method.
'''
return self._map_async(func, iterable, starmapstar, chunksize,
callback, error_callback)
def imap(self, func, iterable, chunksize=1):
'''
Equivalent of `map()` -- can be MUCH slower than `Pool.map()`.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def imap_unordered(self, func, iterable, chunksize=1):
'''
Like `imap()` method but ordering of results is arbitrary.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if chunksize == 1:
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, func, (x,), {})
for i, x in enumerate(iterable)), result._set_length))
return result
else:
assert chunksize > 1
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = IMapUnorderedIterator(self._cache)
self._taskqueue.put((((result._job, i, mapstar, (x,), {})
for i, x in enumerate(task_batches)), result._set_length))
return (item for chunk in result for item in chunk)
def apply_async(self, func, args=(), kwds={}, callback=None,
error_callback=None):
'''
Asynchronous version of `apply()` method.
'''
if self._state != RUN:
raise ValueError("Pool not running")
result = ApplyResult(self._cache, callback, error_callback)
self._taskqueue.put(([(result._job, None, func, args, kwds)], None))
return result
def map_async(self, func, iterable, chunksize=None, callback=None,
error_callback=None):
'''
Asynchronous version of `map()` method.
'''
return self._map_async(func, iterable, mapstar, chunksize, callback,
error_callback)
def _map_async(self, func, iterable, mapper, chunksize=None, callback=None,
error_callback=None):
'''
Helper function to implement map, starmap and their async counterparts.
'''
if self._state != RUN:
raise ValueError("Pool not running")
if not hasattr(iterable, '__len__'):
iterable = list(iterable)
if chunksize is None:
chunksize, extra = divmod(len(iterable), len(self._pool) * 4)
if extra:
chunksize += 1
if len(iterable) == 0:
chunksize = 0
task_batches = Pool._get_tasks(func, iterable, chunksize)
result = MapResult(self._cache, chunksize, len(iterable), callback,
error_callback=error_callback)
self._taskqueue.put((((result._job, i, mapper, (x,), {})
for i, x in enumerate(task_batches)), None))
return result
@staticmethod
def _handle_workers(pool):
thread = threading.current_thread()
# Keep maintaining workers until the cache gets drained, unless the pool
# is terminated.
while thread._state == RUN or (pool._cache and thread._state != TERMINATE):
pool._maintain_pool()
time.sleep(0.1)
# send sentinel to stop workers
pool._taskqueue.put(None)
debug('worker handler exiting')
@staticmethod
def _handle_tasks(taskqueue, put, outqueue, pool):
thread = threading.current_thread()
for taskseq, set_length in iter(taskqueue.get, None):
i = -1
for i, task in enumerate(taskseq):
if thread._state:
debug('task handler found thread._state != RUN')
break
try:
put(task)
except IOError:
debug('could not put task on queue')
break
else:
if set_length:
debug('doing set_length()')
set_length(i+1)
continue
break
else:
debug('task handler got sentinel')
try:
# tell result handler to finish when cache is empty
debug('task handler sending sentinel to result handler')
outqueue.put(None)
# tell workers there is no more work
debug('task handler sending sentinel to workers')
for p in pool:
put(None)
except IOError:
debug('task handler got IOError when sending sentinels')
debug('task handler exiting')
@staticmethod
def _handle_results(outqueue, get, cache):
thread = threading.current_thread()
while 1:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if thread._state:
assert thread._state == TERMINATE
debug('result handler found thread._state=TERMINATE')
break
if task is None:
debug('result handler got sentinel')
break
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
while cache and thread._state != TERMINATE:
try:
task = get()
except (IOError, EOFError):
debug('result handler got EOFError/IOError -- exiting')
return
if task is None:
debug('result handler ignoring extra sentinel')
continue
job, i, obj = task
try:
cache[job]._set(i, obj)
except KeyError:
pass
if hasattr(outqueue, '_reader'):
debug('ensuring that outqueue is not full')
# If we don't make room available in outqueue then
# attempts to add the sentinel (None) to outqueue may
# block. There is guaranteed to be no more than 2 sentinels.
try:
for i in range(10):
if not outqueue._reader.poll():
break
get()
except (IOError, EOFError):
pass
debug('result handler exiting: len(cache)=%s, thread._state=%s',
len(cache), thread._state)
@staticmethod
def _get_tasks(func, it, size):
it = iter(it)
while 1:
x = tuple(itertools.islice(it, size))
if not x:
return
yield (func, x)
def __reduce__(self):
raise NotImplementedError(
'pool objects cannot be passed between processes or pickled'
)
def close(self):
debug('closing pool')
if self._state == RUN:
self._state = CLOSE
self._worker_handler._state = CLOSE
def terminate(self):
debug('terminating pool')
self._state = TERMINATE
self._worker_handler._state = TERMINATE
self._terminate()
def join(self):
debug('joining pool')
assert self._state in (CLOSE, TERMINATE)
self._worker_handler.join()
self._task_handler.join()
self._result_handler.join()
for p in self._pool:
p.join()
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# task_handler may be blocked trying to put items on inqueue
debug('removing tasks from inqueue until task handler finished')
inqueue._rlock.acquire()
while task_handler.is_alive() and inqueue._reader.poll():
inqueue._reader.recv()
time.sleep(0)
@classmethod
def _terminate_pool(cls, taskqueue, inqueue, outqueue, pool,
worker_handler, task_handler, result_handler, cache):
# this is guaranteed to only be called once
debug('finalizing pool')
worker_handler._state = TERMINATE
task_handler._state = TERMINATE
debug('helping task handler/workers to finish')
cls._help_stuff_finish(inqueue, task_handler, len(pool))
assert result_handler.is_alive() or len(cache) == 0
result_handler._state = TERMINATE
outqueue.put(None) # sentinel
# We must wait for the worker handler to exit before terminating
# workers because we don't want workers to be restarted behind our back.
debug('joining worker handler')
if threading.current_thread() is not worker_handler:
worker_handler.join()
# Terminate workers which haven't already finished.
if pool and hasattr(pool[0], 'terminate'):
debug('terminating workers')
for p in pool:
if p.exitcode is None:
p.terminate()
debug('joining task handler')
if threading.current_thread() is not task_handler:
task_handler.join()
debug('joining result handler')
if threading.current_thread() is not result_handler:
result_handler.join()
if pool and hasattr(pool[0], 'terminate'):
debug('joining pool workers')
for p in pool:
if p.is_alive():
# worker has not yet exited
debug('cleaning up worker %d' % p.pid)
p.join()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.terminate()
#
# Class whose instances are returned by `Pool.apply_async()`
#
class ApplyResult(object):
def __init__(self, cache, callback, error_callback):
self._event = threading.Event()
self._job = next(job_counter)
self._cache = cache
self._callback = callback
self._error_callback = error_callback
cache[self._job] = self
def ready(self):
return self._event.is_set()
def successful(self):
assert self.ready()
return self._success
def wait(self, timeout=None):
self._event.wait(timeout)
def get(self, timeout=None):
self.wait(timeout)
if not self.ready():
raise TimeoutError
if self._success:
return self._value
else:
raise self._value
def _set(self, i, obj):
self._success, self._value = obj
if self._callback and self._success:
self._callback(self._value)
if self._error_callback and not self._success:
self._error_callback(self._value)
self._event.set()
del self._cache[self._job]
AsyncResult = ApplyResult # create alias -- see #17805
#
# Class whose instances are returned by `Pool.map_async()`
#
class MapResult(ApplyResult):
def __init__(self, cache, chunksize, length, callback, error_callback):
ApplyResult.__init__(self, cache, callback,
error_callback=error_callback)
self._success = True
self._value = [None] * length
self._chunksize = chunksize
if chunksize <= 0:
self._number_left = 0
self._event.set()
del cache[self._job]
else:
self._number_left = length//chunksize + bool(length % chunksize)
def _set(self, i, success_result):
success, result = success_result
if success:
self._value[i*self._chunksize:(i+1)*self._chunksize] = result
self._number_left -= 1
if self._number_left == 0:
if self._callback:
self._callback(self._value)
del self._cache[self._job]
self._event.set()
else:
self._success = False
self._value = result
if self._error_callback:
self._error_callback(self._value)
del self._cache[self._job]
self._event.set()
#
# Class whose instances are returned by `Pool.imap()`
#
class IMapIterator(object):
def __init__(self, cache):
self._cond = threading.Condition(threading.Lock())
self._job = next(job_counter)
self._cache = cache
self._items = collections.deque()
self._index = 0
self._length = None
self._unsorted = {}
cache[self._job] = self
def __iter__(self):
return self
def next(self, timeout=None):
self._cond.acquire()
try:
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
self._cond.wait(timeout)
try:
item = self._items.popleft()
except IndexError:
if self._index == self._length:
raise StopIteration
raise TimeoutError
finally:
self._cond.release()
success, value = item
if success:
return value
raise value
__next__ = next # XXX
def _set(self, i, obj):
self._cond.acquire()
try:
if self._index == i:
self._items.append(obj)
self._index += 1
while self._index in self._unsorted:
obj = self._unsorted.pop(self._index)
self._items.append(obj)
self._index += 1
self._cond.notify()
else:
self._unsorted[i] = obj
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
def _set_length(self, length):
self._cond.acquire()
try:
self._length = length
if self._index == self._length:
self._cond.notify()
del self._cache[self._job]
finally:
self._cond.release()
#
# Class whose instances are returned by `Pool.imap_unordered()`
#
class IMapUnorderedIterator(IMapIterator):
def _set(self, i, obj):
self._cond.acquire()
try:
self._items.append(obj)
self._index += 1
self._cond.notify()
if self._index == self._length:
del self._cache[self._job]
finally:
self._cond.release()
#
#
#
class ThreadPool(Pool):
from .dummy import Process
def __init__(self, processes=None, initializer=None, initargs=()):
Pool.__init__(self, processes, initializer, initargs)
def _setup_queues(self):
self._inqueue = queue.Queue()
self._outqueue = queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
@staticmethod
def _help_stuff_finish(inqueue, task_handler, size):
# put sentinels at head of inqueue to make workers finish
inqueue.not_empty.acquire()
try:
inqueue.queue.clear()
inqueue.queue.extend([None] * size)
inqueue.not_empty.notify_all()
finally:
inqueue.not_empty.release()
| agpl-3.0 |
veger/ansible | lib/ansible/modules/network/nxos/nxos_ntp_auth.py | 61 | 9574 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'network'}
DOCUMENTATION = '''
---
module: nxos_ntp_auth
extends_documentation_fragment: nxos
version_added: "2.2"
short_description: Manages NTP authentication.
description:
- Manages NTP authentication.
author:
- Jason Edelman (@jedelman8)
notes:
- Tested against NXOSv 7.3.(0)D1(1) on VIRL
- If C(state=absent), the module will remove the given key configuration if it exists.
- If C(state=absent) and C(authentication=on), authentication will be turned off.
options:
key_id:
description:
- Authentication key identifier (numeric).
md5string:
description:
- MD5 String.
auth_type:
description:
- Whether the given md5string is in cleartext or
has been encrypted. If in cleartext, the device
will encrypt it before storing it.
default: text
choices: ['text', 'encrypt']
trusted_key:
description:
- Whether the given key is required to be supplied by a time source
for the device to synchronize to the time source.
choices: [ 'false', 'true' ]
default: 'false'
authentication:
description:
- Turns NTP authentication on or off.
choices: ['on', 'off']
state:
description:
- Manage the state of the resource.
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
# Basic NTP authentication configuration
- nxos_ntp_auth:
key_id: 32
md5string: hello
auth_type: text
'''
RETURN = '''
commands:
description: command sent to the device
returned: always
type: list
sample: ["ntp authentication-key 32 md5 helloWorld 0", "ntp trusted-key 32"]
'''
import re
from ansible.module_utils.network.nxos.nxos import get_config, load_config, run_commands
from ansible.module_utils.network.nxos.nxos import nxos_argument_spec, check_args
from ansible.module_utils.basic import AnsibleModule
def execute_show_command(command, module):
if 'show run' not in command:
command = {
'command': command,
'output': 'json',
}
else:
command = {
'command': command,
'output': 'text',
}
return run_commands(module, [command])
def flatten_list(command_lists):
flat_command_list = []
for command in command_lists:
if isinstance(command, list):
flat_command_list.extend(command)
else:
flat_command_list.append(command)
return flat_command_list
def get_ntp_auth(module):
command = 'show ntp authentication-status'
body = execute_show_command(command, module)[0]
ntp_auth_str = body['authentication']
if 'enabled' in ntp_auth_str:
ntp_auth = True
else:
ntp_auth = False
return ntp_auth
def get_ntp_trusted_key(module):
trusted_key_list = []
command = 'show run | inc ntp.trusted-key'
trusted_key_str = execute_show_command(command, module)[0]
if trusted_key_str:
trusted_keys = trusted_key_str.splitlines()
else:
trusted_keys = []
for line in trusted_keys:
if line:
trusted_key_list.append(str(line.split()[2]))
return trusted_key_list
def get_ntp_auth_key(key_id, module):
authentication_key = {}
command = 'show run | inc ntp.authentication-key.{0}'.format(key_id)
auth_regex = (r".*ntp\sauthentication-key\s(?P<key_id>\d+)\s"
r"md5\s(?P<md5string>\S+)\s(?P<atype>\S+).*")
body = execute_show_command(command, module)[0]
try:
match_authentication = re.match(auth_regex, body, re.DOTALL)
group_authentication = match_authentication.groupdict()
authentication_key['key_id'] = group_authentication['key_id']
authentication_key['md5string'] = group_authentication['md5string']
if group_authentication['atype'] == '7':
authentication_key['auth_type'] = 'encrypt'
else:
authentication_key['auth_type'] = 'text'
except (AttributeError, TypeError):
authentication_key = {}
return authentication_key
def get_ntp_auth_info(key_id, module):
auth_info = get_ntp_auth_key(key_id, module)
trusted_key_list = get_ntp_trusted_key(module)
auth_power = get_ntp_auth(module)
if key_id in trusted_key_list:
auth_info['trusted_key'] = 'true'
else:
auth_info['trusted_key'] = 'false'
if auth_power:
auth_info['authentication'] = 'on'
else:
auth_info['authentication'] = 'off'
return auth_info
def auth_type_to_num(auth_type):
if auth_type == 'encrypt':
return '7'
else:
return '0'
def set_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
ntp_auth_cmds = []
if key_id and md5string:
auth_type_num = auth_type_to_num(auth_type)
ntp_auth_cmds.append(
'ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if trusted_key == 'true':
ntp_auth_cmds.append(
'ntp trusted-key {0}'.format(key_id))
elif trusted_key == 'false':
ntp_auth_cmds.append(
'no ntp trusted-key {0}'.format(key_id))
if authentication == 'on':
ntp_auth_cmds.append(
'ntp authenticate')
elif authentication == 'off':
ntp_auth_cmds.append(
'no ntp authenticate')
return ntp_auth_cmds
def remove_ntp_auth_key(key_id, md5string, auth_type, trusted_key, authentication):
auth_remove_cmds = []
if key_id:
auth_type_num = auth_type_to_num(auth_type)
auth_remove_cmds.append(
'no ntp authentication-key {0} md5 {1} {2}'.format(
key_id, md5string, auth_type_num))
if authentication:
auth_remove_cmds.append(
'no ntp authenticate')
return auth_remove_cmds
def main():
argument_spec = dict(
key_id=dict(type='str'),
md5string=dict(type='str'),
auth_type=dict(choices=['text', 'encrypt'], default='text'),
trusted_key=dict(choices=['true', 'false'], default='false'),
authentication=dict(choices=['on', 'off']),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(nxos_argument_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
warnings = list()
check_args(module, warnings)
key_id = module.params['key_id']
md5string = module.params['md5string']
auth_type = module.params['auth_type']
trusted_key = module.params['trusted_key']
authentication = module.params['authentication']
state = module.params['state']
if key_id:
if not trusted_key and not md5string:
module.fail_json(msg='trusted_key or md5string MUST be specified')
args = dict(key_id=key_id, md5string=md5string,
auth_type=auth_type, trusted_key=trusted_key,
authentication=authentication)
changed = False
proposed = dict((k, v) for k, v in args.items() if v is not None)
existing = get_ntp_auth_info(key_id, module)
end_state = existing
delta = dict(set(proposed.items()).difference(existing.items()))
commands = []
if state == 'present':
if delta:
command = set_ntp_auth_key(
key_id, md5string, delta.get('auth_type'),
delta.get('trusted_key'), delta.get('authentication'))
if command:
commands.append(command)
elif state == 'absent':
auth_toggle = None
if existing.get('authentication') == 'on':
auth_toggle = True
if not existing.get('key_id'):
key_id = None
command = remove_ntp_auth_key(
key_id, md5string, auth_type, trusted_key, auth_toggle)
if command:
commands.append(command)
cmds = flatten_list(commands)
if cmds:
if module.check_mode:
module.exit_json(changed=True, commands=cmds)
else:
load_config(module, cmds)
end_state = get_ntp_auth_info(key_id, module)
delta = dict(set(end_state.items()).difference(existing.items()))
if delta or (len(existing) != len(end_state)):
changed = True
if 'configure' in cmds:
cmds.pop(0)
results = {}
results['proposed'] = proposed
results['existing'] = existing
results['updates'] = cmds
results['changed'] = changed
results['warnings'] = warnings
results['end_state'] = end_state
module.exit_json(**results)
if __name__ == '__main__':
main()
| gpl-3.0 |
IronLanguages/ironpython3 | Src/Scripts/generate_alltypes.py | 1 | 18881 | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
from generate import generate
import operator
import clr
from System import *
long = type(1 << 63) # https://github.com/IronLanguages/ironpython3/issues/52
def get_min_max(type):
if hasattr(type, 'MinValue'):
return type.MinValue, type.MaxValue
return Double.NegativeInfinity, Double.PositiveInfinity
types = []
class NumType:
def __init__(self, type):
self.name = clr.GetClrType(type).Name
self.type = type
self.ops = self.name+"Ops"
self.min, self.max = get_min_max(type)
self.is_signed = self.min < 0
self.size = self.max-self.min + 1
try:
self.is_float = self.type(1) // self.type(2) != self.type(0.5)
except:
self.is_float = True
def get_dict(self):
toObj = "(%s)" % self.name
toObjFooter = ""
if self.name == "Int32":
toObj = "Microsoft.Scripting.Runtime.ScriptingRuntimeHelpers.Int32ToObject((Int32)"
toObjFooter = ")"
if self.get_overflow_type() == bigint:
op_type = 'BigInteger'
else:
op_type = 'Int32'
return dict(type = self.name, bigger_type = self.get_overflow_type().get_signed().name,
bigger_signed = self.get_overflow_type().get_signed().name,
type_to_object = toObj, type_to_object_footer = toObjFooter, op_type=op_type, rop_type=op_type)
def get_other_sign(self):
if self.is_signed: return self.get_unsigned()
else: return self.get_signed()
def get_unsigned(self):
if not self.is_signed: return self
for ty in types:
if not ty.is_signed and ty.size == self.size: return ty
raise ValueError
def get_signed(self):
if self.is_signed: return self
for ty in types:
if ty.is_signed and ty.size == self.size: return ty
raise ValueError(ty.name)
def get_overflow_type(self):
if self.is_float or self == bigint: return self
if self.type == int: return bigint # special Python overflow rule (skips int64)
for ty in types:
if not ty.is_float and ty.is_signed == self.is_signed and ty.size == self.size**2:
return ty
return bigint
def is_implicit(self, oty):
if self.is_float:
if oty.is_float:
return self.size <= oty.size
else:
return False
else:
if oty.is_float:
return True
elif self.is_signed:
if oty.is_signed:
if self.name == 'Double': return oty.name == 'Double' or oty.name == 'Complex64'
else: return self.size <= oty.size
else:
return False
else:
if oty.is_signed:
return self.size < oty.size
else:
return self.size <= oty.size
for type in SByte, Byte, Int16, UInt16, Int32, UInt32, Int64, UInt64, Single, Double, complex, long:
types.append(NumType(type))
bigint = types[-1]
simple_identity_method = """\
public static %(type)s %(method_name)s(%(type)s x) {
return x;
}"""
identity_method = """\
[SpecialName]
public static %(type)s %(method_name)s(%(type)s x) {
return x;
}"""
simple_method = """\
[SpecialName]
public static %(type)s %(method_name)s(%(type)s x) {
return (%(type)s)(%(symbol)s(x));
}"""
signed_abs = """\
[SpecialName]
public static object Abs(%(type)s x) {
if (x < 0) {
if (x == %(type)s.MinValue) return -(%(bigger_signed)s)%(type)s.MinValue;
else return (%(type)s)(-x);
} else {
return x;
}
}"""
signed_negate = """\
[SpecialName]
public static object Negate(%(type)s x) {
if (x == %(type)s.MinValue) return -(%(bigger_signed)s)%(type)s.MinValue;
else return (%(type)s)(-x);
}"""
unsigned_negate_or_invert = """\
[SpecialName]
public static object %(method_name)s(%(type)s x) {
return %(bigger_signed)sOps.%(method_name)s((%(bigger_signed)s)x);
}"""
float_trunc = """\
public static object __trunc__(%(type)s x) {
if (x >= int.MaxValue || x <= int.MinValue) {
return (BigInteger)x;
} else {
return (int)x;
}
}"""
def gen_unaryops(cw, ty):
cw.write("// Unary Operations")
cw.write(identity_method, method_name="Plus")
if ty.is_float:
cw.write(simple_method, method_name="Negate", symbol="-")
cw.write(simple_method, method_name="Abs", symbol="Math.Abs")
elif ty.is_signed:
cw.write(signed_negate)
cw.write(signed_abs)
cw.write(simple_method, method_name="OnesComplement", symbol="~")
else:
cw.write(unsigned_negate_or_invert, method_name="Negate")
cw.write(identity_method, method_name="Abs")
cw.write(unsigned_negate_or_invert, method_name="OnesComplement")
if (ty.type is not complex) and (ty.type is not bigint):
cw.enter_block('public static bool __bool__(%s x)' % (ty.name))
cw.writeline('return (x != 0);')
cw.exit_block()
cw.writeline()
# this is handled in another Ops file
if not ty.is_float:
cw.enter_block('public static string __repr__(%s x)' % (ty.name))
cw.writeline('return x.ToString(CultureInfo.InvariantCulture);')
cw.exit_block()
if ty.is_float:
cw.write(float_trunc, type=ty.name)
else:
cw.write(simple_identity_method, type=ty.name, method_name="__trunc__")
cw.enter_block('public static int __hash__(%s x)' % (ty.name))
if ty.max > Int32.MaxValue:
if ty.is_signed:
cw.enter_block('if (x < 0)')
cw.writeline('if (x == long.MinValue) return -2;')
cw.writeline('x = -x;')
cw.writeline('var h = unchecked(-(int)((x >= int.MaxValue) ? (x % int.MaxValue) : x));')
cw.writeline('if (h == -1) return -2;')
cw.writeline('return h;')
cw.exit_block()
cw.writeline('return unchecked((int)((x >= int.MaxValue) ? (x % int.MaxValue) : x));')
elif ty.max == Int32.MaxValue:
cw.writeline("// for perf we use an if for the 3 int values with abs(x) >= int.MaxValue")
cw.writeline('if (x == -1 || x == int.MinValue) return -2;')
cw.writeline('if (x == int.MaxValue || x == int.MinValue + 1) return 0;')
cw.writeline('return unchecked((int)x);')
else:
if ty.is_signed:
cw.writeline('if (x == -1) return -2;')
cw.writeline('return unchecked((int)x);')
cw.exit_block()
if ty.max > Int32.MaxValue:
cw.enter_block('public static BigInteger __index__(%s x)' % (ty.name))
cw.writeline('return unchecked((BigInteger)x);')
cw.exit_block()
else:
cw.enter_block('public static int __index__(%s x)' % (ty.name))
cw.writeline('return unchecked((int)x);')
cw.exit_block()
binop_decl = """\
[SpecialName]
public static %(return_type)s %(method_name)s(%(rtype)s x, %(ltype)s y)"""
simple_body = "return x %(symbol)s y;"
cast_simple_body = "return (%(type)s)(x %(symbol)s y);"
simple_compare_body = "return x == y ? 0 : x > y ? 1 : -1;"
overflow1_body = """\
%(bigger_type)s result = (%(bigger_type)s)(((%(bigger_type)s)x) %(symbol)s ((%(bigger_type)s)y));
if (%(type)s.MinValue <= result && result <= %(type)s.MaxValue) {
return %(type_to_object)s(result)%(type_to_object_footer)s;
} else {
return result;
}"""
overflow2_body = """\
try {
return %(type_to_object)s(checked(x %(symbol)s y))%(type_to_object_footer)s;
} catch (OverflowException) {
return %(bigger_type)sOps.%(method_name)s((%(bigger_type)s)x, (%(bigger_type)s)y);
}"""
overflow3_body = """\
long result = (long)x %(symbol)s y;
if (%(type)s.MinValue <= result && result <= %(type)s.MaxValue) {
return %(type_to_object)s(result)%(type_to_object_footer)s;
}
return %(bigger_type)sOps.%(method_name)s((%(bigger_type)s)x, (%(bigger_type)s)y);"""
unsigned_signed_body = "return %(bigger_signed)sOps.%(method_name)s((%(bigger_signed)s)x, (%(bigger_signed)s)y);"
int_divide_body = "return FloorDivide(x, y);"
float_divide_body = "return TrueDivide(x, y);"
float_floor_divide_body = """\
if (y == 0) throw PythonOps.ZeroDivisionError();
return (%(type)s)Math.Floor(x / y);"""
float_true_divide_body = """\
if (y == 0) throw PythonOps.ZeroDivisionError();
return x / y;"""
int_true_divide_body = """\
return DoubleOps.TrueDivide((double)x, (double)y);"""
div_body = """\
if (y == -1 && x == %(type)s.MinValue) {
return -(%(bigger_type)s)%(type)s.MinValue;
} else {
return (%(type)s)MathUtils.FloorDivideUnchecked(x, y);
}"""
#rshift, mod
cast_helper_body = "return (%(type)s)%(op_type)sOps.%(method_name)s((%(op_type)s)x, (%(rop_type)s)y);"
#lshift, pow
helper_body = "return %(op_type)sOps.%(method_name)s((%(op_type)s)x, (%(rop_type)s)y);"
def write_binop_raw(cw, body, name, ty, **kws):
kws1 = dict(return_type=ty.name, method_name=name, rtype=ty.name, ltype=ty.name)
kws1.update(kws)
#print kws1
cw.enter_block(binop_decl, **kws1)
cw.write(body, **kws1)
cw.exit_block()
def write_binop1(cw, body, name, ty, **kws):
write_binop1_general(write_binop_raw, cw, body, name, ty, **kws)
def write_binop1_general(func, cw, body, name, ty, **kws):
func(cw, body, name, ty, **kws)
if not ty.is_signed:
oty = ty.get_signed()
if 'return_type' not in kws or kws['return_type'] == ty.name:
if name == 'FloorDivide':
kws['return_type'] = 'object'
else:
kws['return_type'] = cw.kws['bigger_signed']
kws['ltype'] = oty.name
func(cw, unsigned_signed_body, name, ty, **kws)
kws['ltype'] = ty.name
kws['rtype'] = oty.name
func(cw, unsigned_signed_body, name, ty, **kws)
def write_rich_comp_raw(cw, body, name, ty, **kws):
kws1 = dict(return_type=ty.name, method_name=name, rtype=ty.name, ltype=ty.name)
kws1.update(kws)
assert body.startswith("return")
cw.write("""[SpecialName]
public static %(return_type)s %(method_name)s(%(rtype)s x, %(ltype)s y) =>""" + body[6:], **kws1)
def write_rich_comp(cw, body, name, ty, **kws):
write_rich_comp_general(write_rich_comp_raw, cw, body, name, ty, **kws)
def write_rich_comp_general(func, cw, body, name, ty, **kws):
func(cw, body, name, ty, **kws)
if not ty.is_signed:
oty = ty.get_signed()
kws['ltype'] = oty.name
if cw.kws.get('bigger_signed') == "BigInteger":
func(cw, unsigned_signed_body, name, ty, **kws)
else:
func(cw, body, name, ty, **kws)
def gen_binaryops(cw, ty):
cw.writeline()
cw.write("// Binary Operations - Arithmetic")
if ty.name != 'Complex64':
for symbol, name in [('+', 'Add'), ('-', 'Subtract'), ('*', 'Multiply')]:
if ty.is_float:
write_binop1(cw, simple_body, name, ty, symbol=symbol)
else:
if ty.name == "Int32":
body = overflow3_body
elif ty.get_overflow_type() == bigint:
body = overflow2_body
else:
body = overflow1_body
write_binop1(cw, body, name, ty, return_type='object', symbol=symbol)
if ty.is_float:
write_binop1(cw, float_true_divide_body, "TrueDivide", ty)
write_binop1(cw, float_floor_divide_body, "FloorDivide", ty)
else:
write_binop1(cw, int_true_divide_body, "TrueDivide", ty, return_type='double')
if ty.name not in ['BigInteger', 'Int32']:
if ty.is_signed:
write_binop1(cw, div_body, 'FloorDivide', ty, return_type='object')
write_binop1(cw, cast_helper_body, 'Mod', ty)
else:
write_binop1(cw, cast_simple_body, 'FloorDivide', ty, symbol='/')
write_binop1(cw, cast_simple_body, 'Mod', ty, symbol='%')
write_binop1(cw, helper_body, 'Power', ty, return_type='object')
if not ty.is_float:
cw.writeline()
cw.write("// Binary Operations - Bitwise")
if ty.name not in ["BigInteger"]:
ltypes = [('[NotNull]BigInteger', 'BigInteger')]
if ty.size < Int32.MaxValue:
ltypes.append( ('Int32', 'Int32') )
for ltype, optype in ltypes:
write_binop_raw(cw, helper_body, "LeftShift", ty, return_type='object', op_type=optype, rop_type=optype, ltype=ltype)
write_binop_raw(cw, cast_helper_body, "RightShift", ty, op_type=optype, rop_type=optype, ltype=ltype)
for symbol, name in [('&', 'BitwiseAnd'), ('|', 'BitwiseOr'), ('^', 'ExclusiveOr')]:
write_binop1(cw, cast_simple_body, name, ty, symbol=symbol)
cw.writeline()
cw.write("// Binary Operations - Comparisons")
for symbol, name in [('<', 'LessThan'), ('<=', 'LessThanOrEqual'), ('>', 'GreaterThan'), ('>=', 'GreaterThanOrEqual'), ('==', 'Equals'), ('!=', 'NotEquals')]:
write_rich_comp(cw, simple_body, name, ty, symbol=symbol, return_type='bool')
implicit_conv = """\
[SpecialName, ImplicitConversionMethod]
public static %(otype)s ConvertTo%(otype)s(%(type)s x) {
return (%(otype)s)x;
}"""
explicit_conv = """\
[SpecialName, ExplicitConversionMethod]
public static %(otype)s ConvertTo%(otype)s(%(type)s x) {
if (%(otype)s.MinValue <= x && x <= %(otype)s.MaxValue) {
return (%(otype)s)x;
}
throw Converter.CannotConvertOverflow("%(otype)s", x);
}"""
explicit_conv_to_unsigned_from_signed = """\
[SpecialName, ExplicitConversionMethod]
public static %(otype)s ConvertTo%(otype)s(%(type)s x) {
if (x >= 0) {
return (%(otype)s)x;
}
throw Converter.CannotConvertOverflow("%(otype)s", x);
}"""
explicit_conv_tosigned_from_unsigned = """\
[SpecialName, ExplicitConversionMethod]
public static %(otype)s ConvertTo%(otype)s(%(type)s x) {
if (x <= (%(type)s)%(otype)s.MaxValue) {
return (%(otype)s)x;
}
throw Converter.CannotConvertOverflow("%(otype)s", x);
}"""
def write_conversion(cw, ty, oty):
if ty.is_implicit(oty):
cw.write(implicit_conv, otype=oty.name)
elif ty.is_signed and not oty.is_signed and ty.size <= oty.size:
cw.write(explicit_conv_to_unsigned_from_signed, otype=oty.name)
elif not ty.is_signed and oty.is_signed:
cw.write(explicit_conv_tosigned_from_unsigned, otype=oty.name)
else:
cw.write(explicit_conv, otype=oty.name)
def gen_conversions(cw, ty):
cw.writeline()
cw.write("// Conversion operators")
for oty in types[:-2]:
if oty == ty: continue
write_conversion(cw, ty, oty)
identity_property_method = """\
[PropertyMethod, SpecialName]
public static %(type)s Get%(method_name)s(%(type)s x) {
return x;
}"""
const_property_method = """\
[PropertyMethod, SpecialName]
public static %(type)s Get%(method_name)s(%(type)s x) {
return (%(type)s)%(const)s;
}"""
# const=None indicates an identity property, i.e. a property that returns 'self'
def write_property(cw, ty, name, const=None):
if const == None:
cw.write(identity_property_method, type=ty.name, method_name=name)
else:
cw.write(const_property_method, type=ty.name, method_name=name, const=const)
def gen_api(cw, ty):
if ty.name in ["BigInteger", "Complex"]:
return
cw.writeline()
cw.write("// Public API - Numerics")
write_property(cw, ty, "real")
write_property(cw, ty, "imag", const="0")
cw.write(simple_identity_method, type=ty.name, method_name="conjugate")
if ty.is_float:
pass
else:
write_property(cw, ty, "numerator")
write_property(cw, ty, "denominator", const="1")
cast = "(int)"
counter = "BitLength"
if ty.size >= 4294967296:
# 32- or 64-bit type
cast = ""
if not ty.is_signed:
counter += "Unsigned"
cw.enter_block('public static int bit_length(%s value)' % ty.name)
cw.write('return MathUtils.%s(%svalue);' % (counter, cast))
cw.exit_block()
type_header = """\
[StaticExtensionMethod]
public static object __new__(PythonType cls) {
return __new__(cls, default(%(type)s));
}
[StaticExtensionMethod]
public static object __new__(PythonType cls, object value) {
if (cls != DynamicHelpers.GetPythonTypeFromType(typeof(%(type)s))) {
throw PythonOps.TypeError("%(type)s.__new__: first argument must be %(type)s type.");
}
IConvertible valueConvertible;
if ((valueConvertible = value as IConvertible) != null) {
switch (valueConvertible.GetTypeCode()) {
case TypeCode.Byte: return (%(type)s)(Byte)value;
case TypeCode.SByte: return (%(type)s)(SByte)value;
case TypeCode.Int16: return (%(type)s)(Int16)value;
case TypeCode.UInt16: return (%(type)s)(UInt16)value;
case TypeCode.Int32: return (%(type)s)(Int32)value;
case TypeCode.UInt32: return (%(type)s)(UInt32)value;
case TypeCode.Int64: return (%(type)s)(Int64)value;
case TypeCode.UInt64: return (%(type)s)(UInt64)value;
case TypeCode.Single: return (%(type)s)(Single)value;
case TypeCode.Double: return (%(type)s)(Double)value;
}
}
if (value is String) {
return %(type)s.Parse((String)value);
} else if (value is BigInteger) {
return (%(type)s)(BigInteger)value;
} else if (value is Extensible<BigInteger>) {
return (%(type)s)((Extensible<BigInteger>)value).Value;
} else if (value is Extensible<double>) {
return (%(type)s)((Extensible<double>)value).Value;
}
throw PythonOps.ValueError("invalid value for %(type)s.__new__");
}"""
def gen_header(cw, ty):
if ty.name not in ['Int32', 'Double', 'Single', 'BigInteger', 'Complex64']:
cw.write(type_header)
def gen_type(cw, ty):
cw.kws.update(ty.get_dict())
extra = ""
cw.enter_block("public static partial %(extra)sclass %(type)sOps", extra=extra)
gen_header(cw, ty)
gen_unaryops(cw, ty)
gen_binaryops(cw, ty)
gen_conversions(cw, ty)
gen_api(cw, ty)
cw.exit_block()
cw.writeline()
def gen_all(cw):
for ty in types[:-2]: #don't generate complex or BigInteger
gen_type(cw, ty)
def main():
return generate(
("IntOps", gen_all),
)
if __name__ == "__main__":
main()
| apache-2.0 |
huangkuan/hack | lib/pyasn1_modules/rfc2315.py | 127 | 8894 | #
# PKCS#7 message syntax
#
# ASN.1 source from:
# http://www.trl.ibm.com/projects/xml/xss4j/data/asn1/grammars/pkcs7.asn
#
# Sample captures from:
# openssl crl2pkcs7 -nocrl -certfile cert1.cer -out outfile.p7b
#
from pyasn1.type import tag,namedtype,namedval,univ,constraint,char,useful
from pyasn1_modules.rfc2459 import *
class Attribute(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('type', AttributeType()),
namedtype.NamedType('values', univ.SetOf(componentType=AttributeValue()))
)
class AttributeValueAssertion(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('attributeType', AttributeType()),
namedtype.NamedType('attributeValue', AttributeValue())
)
pkcs_7 = univ.ObjectIdentifier('1.2.840.113549.1.7')
data = univ.ObjectIdentifier('1.2.840.113549.1.7.1')
signedData = univ.ObjectIdentifier('1.2.840.113549.1.7.2')
envelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.3')
signedAndEnvelopedData = univ.ObjectIdentifier('1.2.840.113549.1.7.4')
digestedData = univ.ObjectIdentifier('1.2.840.113549.1.7.5')
encryptedData = univ.ObjectIdentifier('1.2.840.113549.1.7.6')
class ContentType(univ.ObjectIdentifier): pass
class ContentEncryptionAlgorithmIdentifier(AlgorithmIdentifier): pass
class EncryptedContent(univ.OctetString): pass
class EncryptedContentInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.NamedType('contentEncryptionAlgorithm', ContentEncryptionAlgorithmIdentifier()),
namedtype.OptionalNamedType('encryptedContent', EncryptedContent().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class Version(univ.Integer): pass # overrides x509.Version
class EncryptedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
)
class DigestAlgorithmIdentifier(AlgorithmIdentifier): pass
class DigestAlgorithmIdentifiers(univ.SetOf):
componentType = DigestAlgorithmIdentifier()
class Digest(univ.OctetString): pass
class ContentInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('contentType', ContentType()),
namedtype.OptionalNamedType('content', univ.Any().subtype(explicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class DigestedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.NamedType('contentInfo', ContentInfo()),
namedtype.NamedType('digest', Digest)
)
class IssuerAndSerialNumber(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('serialNumber', CertificateSerialNumber())
)
class KeyEncryptionAlgorithmIdentifier(AlgorithmIdentifier): pass
class EncryptedKey(univ.OctetString): pass
class RecipientInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('keyEncryptionAlgorithm', KeyEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedKey', EncryptedKey())
)
class RecipientInfos(univ.SetOf):
componentType = RecipientInfo()
class Attributes(univ.SetOf):
componentType = Attribute()
class ExtendedCertificateInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('certificate', Certificate()),
namedtype.NamedType('attributes', Attributes())
)
class SignatureAlgorithmIdentifier(AlgorithmIdentifier): pass
class Signature(univ.BitString): pass
class ExtendedCertificate(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('extendedCertificateInfo', ExtendedCertificateInfo()),
namedtype.NamedType('signatureAlgorithm', SignatureAlgorithmIdentifier()),
namedtype.NamedType('signature', Signature())
)
class ExtendedCertificateOrCertificate(univ.Choice):
componentType = namedtype.NamedTypes(
namedtype.NamedType('certificate', Certificate()),
namedtype.NamedType('extendedCertificate', ExtendedCertificate().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0)))
)
class ExtendedCertificatesAndCertificates(univ.SetOf):
componentType = ExtendedCertificateOrCertificate()
class SerialNumber(univ.Integer): pass
class CRLEntry(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('userCertificate', SerialNumber()),
namedtype.NamedType('revocationDate', useful.UTCTime())
)
class TBSCertificateRevocationList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('signature', AlgorithmIdentifier()),
namedtype.NamedType('issuer', Name()),
namedtype.NamedType('lastUpdate', useful.UTCTime()),
namedtype.NamedType('nextUpdate', useful.UTCTime()),
namedtype.OptionalNamedType('revokedCertificates', univ.SequenceOf(componentType=CRLEntry()))
)
class CertificateRevocationList(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('tbsCertificateRevocationList', TBSCertificateRevocationList()),
namedtype.NamedType('signatureAlgorithm', AlgorithmIdentifier()),
namedtype.NamedType('signature', univ.BitString())
)
class CertificateRevocationLists(univ.SetOf):
componentType = CertificateRevocationList()
class DigestEncryptionAlgorithmIdentifier(AlgorithmIdentifier): pass
class EncryptedDigest(univ.OctetString): pass
class SignerInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('issuerAndSerialNumber', IssuerAndSerialNumber()),
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.OptionalNamedType('authenticatedAttributes', Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.NamedType('digestEncryptionAlgorithm', DigestEncryptionAlgorithmIdentifier()),
namedtype.NamedType('encryptedDigest', EncryptedDigest()),
namedtype.OptionalNamedType('unauthenticatedAttributes', Attributes().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1)))
)
class SignerInfos(univ.SetOf):
componentType = SignerInfo()
class SignedAndEnvelopedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('recipientInfos', RecipientInfos()),
namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo()),
namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('signerInfos', SignerInfos())
)
class EnvelopedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('recipientInfos', RecipientInfos()),
namedtype.NamedType('encryptedContentInfo', EncryptedContentInfo())
)
class DigestInfo(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('digestAlgorithm', DigestAlgorithmIdentifier()),
namedtype.NamedType('digest', Digest())
)
class SignedData(univ.Sequence):
componentType = namedtype.NamedTypes(
namedtype.NamedType('version', Version()),
namedtype.NamedType('digestAlgorithms', DigestAlgorithmIdentifiers()),
namedtype.NamedType('contentInfo', ContentInfo()),
namedtype.OptionalNamedType('certificates', ExtendedCertificatesAndCertificates().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 0))),
namedtype.OptionalNamedType('crls', CertificateRevocationLists().subtype(implicitTag=tag.Tag(tag.tagClassContext, tag.tagFormatConstructed, 1))),
namedtype.NamedType('signerInfos', SignerInfos())
)
class Data(univ.OctetString): pass
| apache-2.0 |
chriskuehl/pre-commit | pre_commit/clientlib/validate_manifest.py | 2 | 2857 | from __future__ import unicode_literals
from pre_commit.clientlib.validate_base import get_run_function
from pre_commit.clientlib.validate_base import get_validator
from pre_commit.clientlib.validate_base import is_regex_valid
from pre_commit.languages.all import all_languages
class InvalidManifestError(ValueError):
pass
MANIFEST_JSON_SCHEMA = {
'type': 'array',
'minItems': 1,
'items': {
'type': 'object',
'properties': {
'id': {'type': 'string'},
'always_run': {'type': 'boolean', 'default': False},
'name': {'type': 'string'},
'description': {'type': 'string', 'default': ''},
'entry': {'type': 'string'},
'exclude': {'type': 'string', 'default': '^$'},
'language': {'type': 'string'},
'language_version': {'type': 'string', 'default': 'default'},
'minimum_pre_commit_version': {
'type': 'string', 'default': '0.0.0',
},
'files': {'type': 'string'},
'stages': {
'type': 'array',
'default': [],
'items': {
'type': 'string',
},
},
'args': {
'type': 'array',
'default': [],
'items': {
'type': 'string',
},
},
'additional_dependencies': {
'type': 'array',
'items': {'type': 'string'},
},
},
'required': ['id', 'name', 'entry', 'language', 'files'],
},
}
def validate_languages(hook_config):
if hook_config['language'] not in all_languages:
raise InvalidManifestError(
'Expected language {0} for {1} to be one of {2!r}'.format(
hook_config['id'],
hook_config['language'],
all_languages,
)
)
def validate_files(hook_config):
if not is_regex_valid(hook_config['files']):
raise InvalidManifestError(
'Invalid files regex at {0}: {1}'.format(
hook_config['id'], hook_config['files'],
)
)
if not is_regex_valid(hook_config.get('exclude', '')):
raise InvalidManifestError(
'Invalid exclude regex at {0}: {1}'.format(
hook_config['id'], hook_config['exclude'],
)
)
def additional_manifest_check(obj):
for hook_config in obj:
validate_languages(hook_config)
validate_files(hook_config)
load_manifest = get_validator(
MANIFEST_JSON_SCHEMA,
InvalidManifestError,
additional_manifest_check,
)
run = get_run_function(
'Manifest filenames.',
load_manifest,
InvalidManifestError,
)
if __name__ == '__main__':
exit(run())
| mit |
jawed123/flask | tests/test_user_error_handler.py | 150 | 3483 | # -*- coding: utf-8 -*-
from werkzeug.exceptions import Forbidden, InternalServerError
import flask
def test_error_handler_no_match():
app = flask.Flask(__name__)
class CustomException(Exception):
pass
@app.errorhandler(CustomException)
def custom_exception_handler(e):
assert isinstance(e, CustomException)
return 'custom'
@app.errorhandler(500)
def handle_500(e):
return type(e).__name__
@app.route('/custom')
def custom_test():
raise CustomException()
@app.route('/keyerror')
def key_error():
raise KeyError()
c = app.test_client()
assert c.get('/custom').data == b'custom'
assert c.get('/keyerror').data == b'KeyError'
def test_error_handler_subclass():
app = flask.Flask(__name__)
class ParentException(Exception):
pass
class ChildExceptionUnregistered(ParentException):
pass
class ChildExceptionRegistered(ParentException):
pass
@app.errorhandler(ParentException)
def parent_exception_handler(e):
assert isinstance(e, ParentException)
return 'parent'
@app.errorhandler(ChildExceptionRegistered)
def child_exception_handler(e):
assert isinstance(e, ChildExceptionRegistered)
return 'child-registered'
@app.route('/parent')
def parent_test():
raise ParentException()
@app.route('/child-unregistered')
def unregistered_test():
raise ChildExceptionUnregistered()
@app.route('/child-registered')
def registered_test():
raise ChildExceptionRegistered()
c = app.test_client()
assert c.get('/parent').data == b'parent'
assert c.get('/child-unregistered').data == b'parent'
assert c.get('/child-registered').data == b'child-registered'
def test_error_handler_http_subclass():
app = flask.Flask(__name__)
class ForbiddenSubclassRegistered(Forbidden):
pass
class ForbiddenSubclassUnregistered(Forbidden):
pass
@app.errorhandler(403)
def code_exception_handler(e):
assert isinstance(e, Forbidden)
return 'forbidden'
@app.errorhandler(ForbiddenSubclassRegistered)
def subclass_exception_handler(e):
assert isinstance(e, ForbiddenSubclassRegistered)
return 'forbidden-registered'
@app.route('/forbidden')
def forbidden_test():
raise Forbidden()
@app.route('/forbidden-registered')
def registered_test():
raise ForbiddenSubclassRegistered()
@app.route('/forbidden-unregistered')
def unregistered_test():
raise ForbiddenSubclassUnregistered()
c = app.test_client()
assert c.get('/forbidden').data == b'forbidden'
assert c.get('/forbidden-unregistered').data == b'forbidden'
assert c.get('/forbidden-registered').data == b'forbidden-registered'
def test_error_handler_blueprint():
bp = flask.Blueprint('bp', __name__)
@bp.errorhandler(500)
def bp_exception_handler(e):
return 'bp-error'
@bp.route('/error')
def bp_test():
raise InternalServerError()
app = flask.Flask(__name__)
@app.errorhandler(500)
def app_exception_handler(e):
return 'app-error'
@app.route('/error')
def app_test():
raise InternalServerError()
app.register_blueprint(bp, url_prefix='/bp')
c = app.test_client()
assert c.get('/error').data == b'app-error'
assert c.get('/bp/error').data == b'bp-error'
| bsd-3-clause |
janezhango/BigDataMachineLearning | py/jprobe.py | 1 | 18802 | #!/usr/bin/python
import random, jenkinsapi, getpass, re, os, argparse, shutil, json, logging, sys
import string
from jenkinsapi.jenkins import Jenkins
# only used when we wanted to see what objects were available (below)
from see import see
DO_LAST_GOOD = False
# using the env variables to force jenkinsapi to use proxy..but after to clear to avoid
# problems in other python stuff that uses requests!
def clear_env():
# need to set environment variables for proxy server if going to sm box
# or clear them if not!
if os.environ.get('HTTPS_PROXY'):
print "removing HTTPS_PROXY os env variable so requests won't use it"
del os.environ['HTTPS_PROXY']
if os.environ.get('HTTP_PROXY'):
print "removing HTTP_PROXY os env variable so requests won't use it"
del os.environ['HTTP_PROXY']
import sys
def my_hook(type, value, traceback):
print 'hooked the exception so we can clear env variables'
clear_env()
print 'Type:', type
print 'Value:', value
print 'Traceback:', traceback
raise Exception
sys.excepthook = my_hook
parse = argparse.ArgumentParser()
group = parse.add_mutually_exclusive_group()
group.add_argument('-e', help="job number from a list of ec2 known jobs", type=int, action='store', default=None)
group.add_argument('-x', help="job number from a list of 164 known jobs", type=int, action='store', default=None)
group.add_argument('-s', help="job number from a list of sm known jobs", type=int, action='store', default=None)
group.add_argument('-j', '--jobname', help="jobname. Correct url is found", action='store', default=None)
parse.add_argument('-l', '--logging', help="turn on logging.DEBUG msgs to see allUrls used", action='store_true')
parse.add_argument('-v', '--verbose', help="dump the last N stdout from the failed jobs", action='store_true')
group.add_argument('-c', help="do a hardwired special job copy between jenkins", type=int, action='store', default=None)
args = parse.parse_args()
print "creates jsandbox (cleaned), and puts aTxt.txt and aConsole.txt in there, along with artifacts"
print " also creates fails* and regress* in there"
# can refer to this by zero-based index with -n 0 or -n 1 etc
# or by job name with -j h2o_master_test
allowedJobsX = [
'h2o_master_test',
'h2o_release_tests',
'h2o_release_tests2',
'h2o_release_tests_164',
'h2o_release_tests_c10_only',
'h2o_perf_test',
'h2o_release_Runit',
]
allowedJobsE = [
'h2o.tests.single.jvm',
'h2o.tests.single.jvm.fvec',
'h2o.multi.vm.temporary',
'h2o.tests.ec2.multi.jvm',
'h2o.tests.ec2.multi.jvm.fvec',
'h2o.tests.ec2.hosts',
]
allowedJobsS = [
'sm_testdir_single_jvm',
'sm_testdir_single_jvm_fvec',
'sm_testdir_multi_jvm',
'sm_testdir_hosts',
'sm_test_NN2_mnist',
]
allUrls = {
'ec2': 'http://test.0xdata.com',
'164': 'http://192.168.1.164:8080',
'sm': 'http://10.71.0.163:8080',
}
all164Jobs = ['do all', 'h2o_master_test', 'h2o_master_test2', 'h2o_perf_test', 'h2o_private_json_vers_Runit', 'h2o_release_Runit', 'h2o_release_tests', 'h2o_release_tests2', 'h2o_release_tests_164', 'h2o_release_tests_c10_only', 'h2o_release_tests_cdh3', 'h2o_release_tests_cdh4', 'h2o_release_tests_cdh4_yarn', 'h2o_release_tests_cdh5', 'h2o_release_tests_cdh5_yarn', 'h2o_release_tests_hdp1.3', 'h2o_release_tests_hdp2.0.6', 'h2o_release_tests_mapr', 'selenium12']
allEc2Jobs = ['generic.h2o.build.branch', 'h2o.branch.api-dev', 'h2o.branch.cliffc-drf', 'h2o.branch.hilbert', 'h2o.branch.jobs', 'h2o.branch.jobs1', 'h2o.branch.json_versioning', 'h2o.branch.rel-ito', 'h2o.build', 'h2o.build.api-dev', 'h2o.build.gauss', 'h2o.build.godel', 'h2o.build.h2oscala', 'h2o.build.hilbert', 'h2o.build.jobs', 'h2o.build.master', 'h2o.build.rel-ito', 'h2o.build.rel-ivory', 'h2o.build.rel-iwasawa', 'h2o.build.rel-jacobi', 'h2o.build.rel-jordan', 'h2o.build.rest_api_versioning', 'h2o.build.ux-client', 'h2o.build.va_defaults_renamed', 'h2o.clone', 'h2o.datasets', 'h2o.download.latest', 'h2o.ec2.start', 'h2o.ec2.stop', 'h2o.findbugs', 'h2o.multi.vm.temporary', 'h2o.multi.vm.temporary.cliffc-no-limits', 'h2o.nightly', 'h2o.nightly.1', 'h2o.nightly.cliffc-lock', 'h2o.nightly.ec2', 'h2o.nightly.ec2.cliffc-no-limits', 'h2o.nightly.ec2.erdos', 'h2o.nightly.ec2.hilbert', 'h2o.nightly.ec2.rel-ito', 'h2o.nightly.ec2.rel-jacobi', 'h2o.nightly.ec2.rel-jordan', 'h2o.nightly.fourier', 'h2o.nightly.godel', 'h2o.nightly.multi.vm', 'h2o.nightly.rel-ivory', 'h2o.nightly.rel-iwasawa', 'h2o.nightly.rel-jacobi', 'h2o.nightly.rel-jordan', 'h2o.nightly.va_defaults_renamed', 'h2o.post.push', 'h2o.private.nightly', 'h2o.tests.ec2', 'h2o.tests.ec2.hosts', 'h2o.tests.ec2.multi.jvm', 'h2o.tests.ec2.multi.jvm.fvec', 'h2o.tests.golden', 'h2o.tests.junit', 'h2o.tests.multi.jvm', 'h2o.tests.multi.jvm.fvec', 'h2o.tests.single.jvm', 'h2o.tests.single.jvm.fvec', 'h2o.tests.test']
allSmJobs = [
'sm_testdir_single_jvm',
'sm_testdir_single_jvm_fvec',
'sm_testdir_multi_jvm',
'sm_testdir_hosts',
'sm_test_NN2_mnist',
]
# jenkinsapi:
# This library wraps up that interface as more
# conventional python objects in order to make many
# Jenkins oriented tasks easier to automate.
# http://pythonhosted.org//jenkinsapi
# https://pypi.python.org/pypi/jenkinsapi
# Project source code: github: https://github.com/salimfadhley/jenkinsapi
# Project documentation: https://jenkinsapi.readthedocs.org/en/latest/
#************************************************
if args.logging:
logging.basicConfig(level=logging.DEBUG)
if args.jobname and (args.e or args.x or args.s):
raise Exception("Don't use both -j and -x or -e or -s args")
# default ec2 0
jobname = None
if args.e is not None:
if args.e<0 or args.e>(len(allowedJobsE)-1):
raise Exception("ec2 job number %s is outside allowed range: 0-%s" % \
(args.e, len(allowedJobsE)-1))
jobname = allowedJobsE[args.e]
if args.x is not None:
if args.x<0 or args.x>(len(allowedJobsX)-1):
raise Exception("0xdata job number %s is outside allowed range: 0-%s" % \
(args.x, len(allowedJobsX)-1))
jobname = allowedJobsX[args.x]
if args.s is not None:
if args.s<0 or args.s>(len(allowedJobsS)-1):
raise Exception("sm job number %s is outside allowed range: 0-%s" % \
(args.s, len(allowedJobsS)-1))
jobname = allowedJobsS[args.s]
if args.jobname:
if args.jobname not in allowedJobs:
raise Exception("%s not in list of legal jobs" % args.jobname)
jobname = args.jobname
if not (args.jobname or args.x or args.e or args.s):
# prompt the user
subtract = 0
prefix = "-e"
eDone = False
xDone = False
while not jobname:
allAllowedJobs = allowedJobsE + allowedJobsX + allowedJobsS
for j, job in enumerate(allAllowedJobs):
# first boundary
if not eDone and j==(subtract + len(allowedJobsE)):
subtract += len(allowedJobsE)
prefix = "-x"
eDone = True
# second boundary
if not xDone and j==(subtract + len(allowedJobsX)):
subtract += len(allowedJobsX)
prefix = "-s"
xDone = True
print prefix, j-subtract, " [%s]: %s" % (j, job)
userInput = int(raw_input("Enter number (0 to %s): " % (len(allAllowedJobs)-1) ))
if userInput >=0 and userInput <= len(allAllowedJobs):
jobname = allAllowedJobs[userInput]
# defaults
if jobname in allEc2Jobs:
machine = 'ec2'
elif jobname in all164Jobs:
machine = '164'
elif jobname in allSmJobs:
machine = 'sm'
print "Setting up proxy server for sm"
os.environ['HTTP_PROXY'] = 'http://172.16.0.3:8888'
os.environ['HTTPS_PROXY'] = 'https://172.16.0.3:8888'
else:
raise Exception("%s not in lists of known jobs" % jobname)
if machine not in allUrls:
raise Exception("%s not in allUrls dict" % machine)
jenkins_url = allUrls[machine]
print "machine:", machine
#************************************************
def clean_sandbox(LOG_DIR="sandbox"):
if os.path.exists(LOG_DIR):
shutil.rmtree(LOG_DIR)
# it should have been removed, but on error it might still be there
if not os.path.exists(LOG_DIR):
os.mkdir(LOG_DIR)
return LOG_DIR
#************************************************
# get the username/pswd from files in the user's .ec2 dir (don't want cleartext here)
# prompt if doesn't exist
def login(machine='164'):
def getit(k):
if not os.path.isfile(k):
print "you probably should create this file to avoid typing %s" % k
return None
else:
with open(k) as f:
lines = f.read().splitlines()
return lines[0]
home = os.path.expanduser("~")
username = getit(home + '/.ec2/jenkins_user_' + machine)
pswd = getit(home + '/.ec2/jenkins_pswd_' + machine)
if not username:
username = raw_input("Username [%s]: " % getpass.getuser())
if not pswd:
pswd = getpass.getpass()
return username, pswd
#************************************************8
username, password = login(machine)
LOG_DIR = clean_sandbox("sandbox")
def dump_json(j):
return json.dumps(j, sort_keys=True, indent=2)
#************************************************8
J = Jenkins(jenkins_url, username, password)
print "\nCurrent jobs available at %s" % jenkins_url
print J.keys()
print "\nChecking this job:", J[jobname]
job = J[jobname]
print "\nGetting %s job config" % jobname
print job.get_config
print "\nlast good build:"
lgb = job.get_last_good_build()
print "\nlast good build revision:"
print lgb.get_revision()
from jenkinsapi.api import get_latest_complete_build
from jenkinsapi.api import get_latest_test_results
# print "************************HELLO****************************"
# print get_latest_complete_build(jenkins_url, jobname, username=username, password=password)
# print "************************HELLO****************************"
# get_latest_test_results(jenkinsurl, jobname, username=None, password=None)[source]
# search_artifact_by_regexp.py
if 1==0:
expr = "commands.log"
print("testing search_artifact_by_regexp with expression %s") % expr
from jenkinsapi.api import search_artifact_by_regexp
artifact_regexp = re.compile(expr) # A file name I want.
result = search_artifact_by_regexp(jenkins_url, jobname, artifact_regexp)
print("tested search_artifact_by_regexp", (repr(result)))
# print "last_stable_buildnumber", job.get_last_stable_buildnumber()
print "last_good_buildnumber", job.get_last_good_buildnumber()
# print "last_failed_buildnumber", job.get_last_failed_buildnumber()
print "last_buildnumber", job.get_last_buildnumber()
if DO_LAST_GOOD:
print "Using last_good_buildnumber %s for result set" % job.get_last_good_buildnumber()
build = job.get_build(job.get_last_good_buildnumber())
else:
print "Using last_buildnumber %s for result set" % job.get_last_buildnumber()
build = job.get_build(job.get_last_buildnumber())
af = build.get_artifacts()
dict_af = build.get_artifact_dict()
# for looking at object in json
# import h2o_util
# s = h2o_util.json_repr(dict_af, curr_depth=0, max_depth=12)
# print dump_json(s)
buildstatus = build.get_status()
print "build get_status", buildstatus
buildname = build.name
print "build name", buildname
buildnumber = build.get_number()
print "build number", buildnumber
buildrevision = build.get_revision()
print "build revision", buildrevision
buildbranch = build.get_revision_branch()
print "build revision branch", buildbranch
buildduration = build.get_duration()
print "build duration", buildduration
buildupstream = build.get_upstream_job_name()
print "build upstream job name", buildupstream
buildgood = build.is_good()
print "build is_good", buildgood
buildtimestamp = build.get_timestamp()
print "build timestamp", buildtimestamp
consoleTxt = open(LOG_DIR + '/console.txt', "a")
print "getting build console (how to buffer this write?)"
print "probably better to figure how to save it as file"
c = build.get_console()
consoleTxt.write(c)
consoleTxt.close()
print "build has result set", build.has_resultset()
print "build get result set"
rs = build.get_resultset()
print "build result set name", rs.name
# print "build result set items", rs.items()
print #****************************************
# print dump_json(item)
# print "build result set keys", rs.keys()
aTxt = open(LOG_DIR + '/artifacts.txt', "a")
# have just a json string in the result set?
# rs.items is a generator?
#****************************************************************************
PRINTALL = False
# keep count of status counts
# 2014-03-19 07:26:15+00:00
# buildtimestampe is a datetime object
see(buildtimestamp)
t = buildtimestamp
# hour minute
hm = "%s_%s" % (t.hour, t.minute)
# hour minute second
hms = "%s_%s" % (hm, t.second)
failName = "%s_%s_%s_%s%s" % ("fail", jobname, buildnumber, hm, ".txt")
print "failName:", failName
regressName = "%s_%s_%s_%s%s" % ("regress", jobname, buildnumber, hm, ".txt")
print "regressName:", regressName
fixedName = "%s_%s_%s_%s%s" % ("fixed", jobname, buildnumber, hm, ".txt")
print "fixedName:", fixedName
stats = {}
def fprint (*args):
# emulate printing each as string, then join with spaces
s = ["%s" % a for a in args]
line = " ".join(s)
fTxt.write(line + "\n")
print line
def printStuff():
e1 = "\n******************************************************************************"
e2 = "%s %s %s" % (i, jobname, v)
fprint(e1)
fprint(e2)
# print "\n", k, "\n"
# print "\n", v, "\n"
# to see what you can get
# print see(v)
# print dir(v)
# print vars(v)
# .age .className .duration .errorDetails .errorStackTrace .failedSince
# .identifier() .name .skipped .skippedMessage .status .stderr .stdout
fprint (i, "v.duration", v.duration)
fprint (i, "v.errorStackTrace", v.errorStackTrace)
fprint (i, "v.failedSince", v.failedSince)
if args.verbose:
fprint (i, "v.stderr", v.stderr)
# lines = v.stdout.splitlines()
# keep newlines in the list elements
if not v.stdout:
fprint ("v.stdout is empty")
else:
fprint ("len(v.stdout):", len(v.stdout))
# have to fix the \n and \tat in the strings
stdout = v.stdout
# json string has the actual '\' and 'n' or 'tat' chars
stdout = string.replace(stdout,'\\n', '\n');
stdout = string.replace(stdout,'\\tat', '\t');
# don't need double newlines
stdout = string.replace(stdout,'\n\n', '\n');
lineList = stdout.splitlines()
fprint ("len(lineList):", len(lineList))
num = min(20, len(lineList))
if num!=0:
# print i, "Last %s lineList of stdout %s" % (num, "\n".join(lineList[-num]))
fprint (i, "Last %s lineList of stdout\n" % num)
fprint ("\n".join(lineList[-num:]))
else:
fprint ("v.stdout is empty")
#******************************************************
for i, (k, v) in enumerate(rs.items()):
if v.status in stats:
stats[v.status] += 1
else:
stats[v.status] = 1
# print rs.name
e1 = "\n******************************************************************************"
e2 = "%s %s %s" % (i, jobname, v)
aTxt.write(e1+"\n")
aTxt.write(e2+"\n")
# only if not PASSED
if v.status == 'FAILED':
fTxt = open(LOG_DIR + "/" + failName, "a")
printStuff()
fTxt.close()
if v.status == 'REGRESSION':
fTxt = open(LOG_DIR + "/" + regressName, "a")
printStuff()
fTxt.close()
if v.status == 'FIXED':
fTxt = open(LOG_DIR + "/" + fixedName, "a")
printStuff()
fTxt.close()
if PRINTALL:
fprint (i, "k", k)
fprint (i, "v", v)
fprint (i, "v.errorDetails", v.errorDetails)
fprint (i, "v.age", v.age)
fprint (i, "v.className", v.className)
fprint (i, "v.identifier()", v.identifier())
fprint (i, "v.name", v.name)
fprint (i, "v.skipped", v.age)
fprint (i, "v.skippedMessage", v.skippedMessage)
fprint (i, "v.status", v.status)
fprint (i, "v.stdout", v.stdout)
#****************************************************************************
# print "dict_af", dict_af
if 1==1:
for a in af:
# print "a.keys():", a.keys()
# txt = a.get_data()
e = "%s %s %s %s\n" % ("#", a.filename, a.url, "########### artifact saved ####################")
# print e,
aTxt.write(e+"\n")
# get the h2o output from the runit runs
# a.save_to_dir(LOG_DIR)
consoleTxt.close()
# print txt
# a.save_to_dir('./sandbox')
# print txt[0]
aTxt.close()
print "#***********************************************"
print "Build:", buildname
print buildtimestamp
print "Status:", buildstatus
if buildgood:
print "Build is good"
else:
print "Build is bad"
print "Build number", buildnumber
# print buildrevision
print buildbranch
print "Duration", buildduration
print "Upstream job", buildupstream
print "Test summary"
for s in stats:
print s, stats[s]
# rename the sandbox
dirname = "%s_%s_%s_%s" % ("sandbox", jobname, buildnumber, hm)
if os.path.exists(dirname):
shutil.rmtree(dirname)
os.rename(LOG_DIR, dirname)
print "Results are in", dirname
print "#***********************************************"
clear_env()
# from jenkins.py, we can copy jobs?
# def jobs(self):
# def get_jobs(self):
# def get_jobs_info(self):
# def get_job(self, jobname):
# def has_job(self, jobname):
# def create_job(self, jobname, config_):
# Create a job
# :param jobname: name of new job, str
# :param config: configuration of new job, xml
# :return: new Job obj
# def copy_job(self, jobname, newjobname):
# def build_job(self, jobname, params=None):
# Invoke a build by job name
# :param jobname: name of exist job, str
# :param params: the job params, dict
# :return: none
# def delete_job(self, jobname):
# def rename_job(self, jobname, newjobname):
# load config calls get_config?
# def load_config(self):
# def get_config(self):
# '''Returns the config.xml from the job'''
# def get_config_xml_url(self):
# def update_config(self, config):
# def create(self, job_name, config):
# Create a job
# :param jobname: name of new job, str
# :param config: configuration of new job, xml
# :return: new Job obj
| apache-2.0 |
mmagnus/rna-pdb-tools | rna_tools/tools/rna_alignment/utils/rna_alignment_process_id.py | 2 | 1932 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
"""
from __future__ import print_function
import argparse
def get_parser():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument("-v", "--verbose",
action="store_true", help="be verbose")
parser.add_argument("-d", "--debug",
action="store_true", help="be verbose")
parser.add_argument('--id-width', type=int, default=70)
parser.add_argument('--sep', default='|')
parser.add_argument("alignment")
return parser
if __name__ == '__main__':
parser = get_parser()
args = parser.parse_args()
name_width = args.id_width
for l in open(args.alignment):
if l:
if args.debug:
print(l)
if not l.startswith('#') and not l.startswith('//'):
if args.debug: print(l.split())
id, seq = l.split()
id = id.replace('[', '|').replace(']', '|')
if args.debug: print(id)
species, group = ('','')
if len(id.split('|')) > 2:
species, group = id.split('|')[:2]
if args.debug: print(group, species)
id = group + '|' + species
else:
id = id[0]
# Leishmania-major-strain-Friedlin[Euglenozoa]FR796420.1|1.0|CUCU-AUG/1-7
line = id.ljust(name_width) + seq.strip()
print(line)
elif '#=GC RF_cons' in l:
ss = l.replace('#=GC RF_cons', '')
print('#=GC RF_cons'.ljust(name_width) + ss.strip())
elif '#=GC SS_cons' in l:
ss = l.replace('#=GC SS_cons', '')
print('#=GC SS_cons'.ljust(name_width) + ss.strip())
else:
print(l.strip())
| gpl-3.0 |
alexlee188/ghpsdr3-alex | trunk/src/sdr1000/test/src/cygwin/sdr1khw.py | 12 | 5048 | # This file was created automatically by SWIG 1.3.29.
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _sdr1khw
import new
new_instancemethod = new.instancemethod
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
PIO_IC01 = _sdr1khw.PIO_IC01
PIO_IC03 = _sdr1khw.PIO_IC03
PIO_IC08 = _sdr1khw.PIO_IC08
PIO_IC11 = _sdr1khw.PIO_IC11
PIO_NONE = _sdr1khw.PIO_NONE
RFE_IC07 = _sdr1khw.RFE_IC07
RFE_IC09 = _sdr1khw.RFE_IC09
RFE_IC10 = _sdr1khw.RFE_IC10
RFE_IC11 = _sdr1khw.RFE_IC11
SER = _sdr1khw.SER
SCK = _sdr1khw.SCK
SCLR_NOT = _sdr1khw.SCLR_NOT
DCDR_NE = _sdr1khw.DCDR_NE
DDSWRB = _sdr1khw.DDSWRB
DDSRESET = _sdr1khw.DDSRESET
COMP_PD = _sdr1khw.COMP_PD
BYPASS_PLL = _sdr1khw.BYPASS_PLL
BYPASS_SINC = _sdr1khw.BYPASS_SINC
class SDR1000(_object):
__swig_setmethods__ = {}
__setattr__ = lambda self, name, value: _swig_setattr(self, SDR1000, name, value)
__swig_getmethods__ = {}
__getattr__ = lambda self, name: _swig_getattr(self, SDR1000, name)
__repr__ = _swig_repr
def __init__(self, *args):
this = _sdr1khw.new_SDR1000(*args)
try: self.this.append(this)
except: self.this = this
__swig_destroy__ = _sdr1khw.delete_SDR1000
__del__ = lambda self : None;
def Latch(*args): return _sdr1khw.SDR1000_Latch(*args)
def SRLoad(*args): return _sdr1khw.SDR1000_SRLoad(*args)
def WriteDDS(*args): return _sdr1khw.SDR1000_WriteDDS(*args)
def ResetDDS(*args): return _sdr1khw.SDR1000_ResetDDS(*args)
__swig_setmethods__["sdr1kusb"] = _sdr1khw.SDR1000_sdr1kusb_set
__swig_getmethods__["sdr1kusb"] = _sdr1khw.SDR1000_sdr1kusb_get
if _newclass:sdr1kusb = property(_sdr1khw.SDR1000_sdr1kusb_get, _sdr1khw.SDR1000_sdr1kusb_set)
__swig_setmethods__["usb"] = _sdr1khw.SDR1000_usb_set
__swig_getmethods__["usb"] = _sdr1khw.SDR1000_usb_get
if _newclass:usb = property(_sdr1khw.SDR1000_usb_get, _sdr1khw.SDR1000_usb_set)
def StandBy(*args): return _sdr1khw.SDR1000_StandBy(*args)
def PowerOn(*args): return _sdr1khw.SDR1000_PowerOn(*args)
def StatusPort(*args): return _sdr1khw.SDR1000_StatusPort(*args)
def UpdateHW(*args): return _sdr1khw.SDR1000_UpdateHW(*args)
def SetFreq(*args): return _sdr1khw.SDR1000_SetFreq(*args)
def SetBPF(*args): return _sdr1khw.SDR1000_SetBPF(*args)
def SetLPF(*args): return _sdr1khw.SDR1000_SetLPF(*args)
def SetPALPF(*args): return _sdr1khw.SDR1000_SetPALPF(*args)
def SetMute(*args): return _sdr1khw.SDR1000_SetMute(*args)
def SetINAOn(*args): return _sdr1khw.SDR1000_SetINAOn(*args)
def SetATTOn(*args): return _sdr1khw.SDR1000_SetATTOn(*args)
def SetTRX_TR(*args): return _sdr1khw.SDR1000_SetTRX_TR(*args)
def SetRFE_TR(*args): return _sdr1khw.SDR1000_SetRFE_TR(*args)
def SetPA_TR(*args): return _sdr1khw.SDR1000_SetPA_TR(*args)
def SetXVTR_TR(*args): return _sdr1khw.SDR1000_SetXVTR_TR(*args)
def SetXVTR_RF(*args): return _sdr1khw.SDR1000_SetXVTR_RF(*args)
def SetX2(*args): return _sdr1khw.SDR1000_SetX2(*args)
def SetImpOn(*args): return _sdr1khw.SDR1000_SetImpOn(*args)
def SetPA_Bias(*args): return _sdr1khw.SDR1000_SetPA_Bias(*args)
def SetClockRefFreq(*args): return _sdr1khw.SDR1000_SetClockRefFreq(*args)
def SetFreqCalOffset(*args): return _sdr1khw.SDR1000_SetFreqCalOffset(*args)
def SetSpurReductionMask(*args): return _sdr1khw.SDR1000_SetSpurReductionMask(*args)
def DoImpulse(*args): return _sdr1khw.SDR1000_DoImpulse(*args)
def PA_ReadADC(*args): return _sdr1khw.SDR1000_PA_ReadADC(*args)
def ATU_Tune(*args): return _sdr1khw.SDR1000_ATU_Tune(*args)
def ReadDDSReg(*args): return _sdr1khw.SDR1000_ReadDDSReg(*args)
def WriteDDSReg(*args): return _sdr1khw.SDR1000_WriteDDSReg(*args)
SDR1000_swigregister = _sdr1khw.SDR1000_swigregister
SDR1000_swigregister(SDR1000)
cvar = _sdr1khw.cvar
| gpl-3.0 |
damdam-s/OCB | addons/account/wizard/account_report_common_journal.py | 385 | 2942 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_common_journal_report(osv.osv_memory):
_name = 'account.common.journal.report'
_description = 'Account Common Journal Report'
_inherit = "account.common.report"
_columns = {
'amount_currency': fields.boolean("With Currency", help="Print Report with the currency column if the currency differs from the company currency."),
}
def _build_contexts(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
result = super(account_common_journal_report, self)._build_contexts(cr, uid, ids, data, context=context)
if data['form']['filter'] == 'filter_date':
cr.execute('SELECT period_id FROM account_move_line WHERE date >= %s AND date <= %s', (data['form']['date_from'], data['form']['date_to']))
result['periods'] = map(lambda x: x[0], cr.fetchall())
elif data['form']['filter'] == 'filter_period':
result['periods'] = self.pool.get('account.period').build_ctx_periods(cr, uid, data['form']['period_from'], data['form']['period_to'])
return result
def pre_print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data['form'].update(self.read(cr, uid, ids, ['amount_currency'], context=context)[0])
fy_ids = data['form']['fiscalyear_id'] and [data['form']['fiscalyear_id']] or self.pool.get('account.fiscalyear').search(cr, uid, [('state', '=', 'draft')], context=context)
period_list = data['form']['periods'] or self.pool.get('account.period').search(cr, uid, [('fiscalyear_id', 'in', fy_ids)], context=context)
data['form']['active_ids'] = self.pool.get('account.journal.period').search(cr, uid, [('journal_id', 'in', data['form']['journal_ids']), ('period_id', 'in', period_list)], context=context)
return data
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
EzyInsights/Diamond | src/collectors/userscripts/userscripts.py | 51 | 3524 | # coding=utf-8
"""
Runs third party scripts and collects their output.
Scripts need to be +x and should output metrics in the form of
```
metric.path.a 1
metric.path.b 2
metric.path.c 3
```
They are not passed any arguments and if they return an error code,
no metrics are collected.
#### Dependencies
* [subprocess](http://docs.python.org/library/subprocess.html)
"""
import diamond.collector
import diamond.convertor
import os
import subprocess
class UserScriptsCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(UserScriptsCollector,
self).get_default_config_help()
config_help.update({
'scripts_path': "Path to find the scripts to run",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UserScriptsCollector, self).get_default_config()
config.update({
'path': '.',
'scripts_path': '/etc/diamond/user_scripts/',
'floatprecision': 4,
})
return config
def collect(self):
scripts_path = self.config['scripts_path']
if not os.access(scripts_path, os.R_OK):
return None
for script in os.listdir(scripts_path):
absolutescriptpath = os.path.join(scripts_path, script)
executable = os.access(absolutescriptpath, os.X_OK)
is_file = os.path.isfile(absolutescriptpath)
if is_file:
if not executable:
self.log.info("%s is not executable" % absolutescriptpath)
continue
else:
# Don't bother logging skipped non-file files (typically
# directories)
continue
out = None
self.log.debug("Executing %s" % absolutescriptpath)
try:
proc = subprocess.Popen([absolutescriptpath],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(out, err) = proc.communicate()
except subprocess.CalledProcessError, e:
self.log.error("%s error launching: %s; skipping" %
(absolutescriptpath, e))
continue
if proc.returncode:
self.log.error("%s return exit value %s; skipping" %
(absolutescriptpath, proc.returncode))
if not out:
self.log.info("%s return no output" % absolutescriptpath)
continue
if err:
self.log.error("%s return error output: %s" %
(absolutescriptpath, err))
# Use filter to remove empty lines of output
for line in filter(None, out.split('\n')):
# Ignore invalid lines
try:
name, value = line.split()
float(value)
except ValueError:
self.log.error("%s returned error output: %s" %
(absolutescriptpath, line))
continue
name, value = line.split()
floatprecision = 0
if "." in value:
floatprecision = self.config['floatprecision']
self.publish(name, value, precision=floatprecision)
| mit |
meabsence/python-for-android | python-build/python-libs/gdata/tests/gdata_tests/blogger/service_test.py | 128 | 3578 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests to exercise server interactions for blogger."""
__author__ = 'api.jscudder (Jeffrey Scudder)'
import unittest
import getpass
import atom
from gdata import test_data
import gdata.blogger
import gdata.blogger.service
username = ''
password = ''
test_blog_id = ''
class BloggerCrudTests(unittest.TestCase):
def setUp(self):
self.client = gdata.blogger.service.BloggerService(email=username,
password=password, source='GoogleInc-PythonBloggerUnitTests-1')
# TODO: if the test_blog_id is not set, get the list of the user's blogs
# and prompt for which blog to add the test posts to.
self.client.ProgrammaticLogin()
def testPostDraftUpdateAndDelete(self):
new_entry = gdata.blogger.BlogPostEntry(title=atom.Title(
text='Unit Test Post'))
new_entry.content = atom.Content('text', None, 'Hello World')
# Make this post a draft so it will not appear publicly on the blog.
new_entry.control = atom.Control(draft=atom.Draft(text='yes'))
new_entry.AddLabel('test')
posted = self.client.AddPost(new_entry, blog_id=test_blog_id)
self.assertEquals(posted.title.text, new_entry.title.text)
# Should be one category in the posted entry for the 'test' label.
self.assertEquals(len(posted.category), 1)
self.assert_(isinstance(posted, gdata.blogger.BlogPostEntry))
# Change the title and add more labels.
posted.title.text = 'Updated'
posted.AddLabel('second')
updated = self.client.UpdatePost(entry=posted)
self.assertEquals(updated.title.text, 'Updated')
self.assertEquals(len(updated.category), 2)
# Cleanup and delete the draft blog post.
self.client.DeletePost(entry=posted)
def testAddComment(self):
# Create a test post to add comments to.
new_entry = gdata.blogger.BlogPostEntry(title=atom.Title(
text='Comments Test Post'))
new_entry.content = atom.Content('text', None, 'Hello Comments')
target_post = self.client.AddPost(new_entry, blog_id=test_blog_id)
blog_id = target_post.GetBlogId()
post_id = target_post.GetPostId()
new_comment = gdata.blogger.CommentEntry()
new_comment.content = atom.Content(text='Test comment')
posted = self.client.AddComment(new_comment, blog_id=blog_id,
post_id=post_id)
self.assertEquals(posted.content.text, new_comment.content.text)
# Cleanup and delete the comment test blog post.
self.client.DeletePost(entry=target_post)
class BloggerQueryTests(unittest.TestCase):
def testConstructBlogQuery(self):
pass
def testConstructBlogQuery(self):
pass
def testConstructBlogQuery(self):
pass
if __name__ == '__main__':
print ('NOTE: Please run these tests only with a test account. ' +
'The tests may delete or update your data.')
username = raw_input('Please enter your username: ')
password = getpass.getpass()
test_blog_id = raw_input('Please enter the blog id for the test blog: ')
unittest.main()
| apache-2.0 |
OpenUpgrade/OpenUpgrade | openerp/addons/base/res/res_font.py | 322 | 7559 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013 OpenERP SA (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from reportlab.pdfbase import ttfonts
from openerp.modules.registry import RegistryManager
from openerp.osv import fields, osv
from openerp.report.render.rml2pdf import customfonts
import logging
"""This module allows the mapping of some system-available TTF fonts to
the reportlab engine.
This file could be customized per distro (although most Linux/Unix ones)
should have the same filenames, only need the code below).
Due to an awful configuration that ships with reportlab at many Linux
and Ubuntu distros, we have to override the search path, too.
"""
_logger = logging.getLogger(__name__)
# Alternatives for the [broken] builtin PDF fonts. Default order chosen to match
# the pre-v8 mapping from openerp.report.render.rml2pdf.customfonts.CustomTTFonts.
# Format: [ (BuiltinFontFamily, mode, [AlternativeFontName, ...]), ...]
BUILTIN_ALTERNATIVES = [
('Helvetica', "normal", ["DejaVuSans", "LiberationSans"]),
('Helvetica', "bold", ["DejaVuSans-Bold", "LiberationSans-Bold"]),
('Helvetica', 'italic', ["DejaVuSans-Oblique", "LiberationSans-Italic"]),
('Helvetica', 'bolditalic', ["DejaVuSans-BoldOblique", "LiberationSans-BoldItalic"]),
('Times', 'normal', ["LiberationSerif", "DejaVuSerif"]),
('Times', 'bold', ["LiberationSerif-Bold", "DejaVuSerif-Bold"]),
('Times', 'italic', ["LiberationSerif-Italic", "DejaVuSerif-Italic"]),
('Times', 'bolditalic', ["LiberationSerif-BoldItalic", "DejaVuSerif-BoldItalic"]),
('Courier', 'normal', ["FreeMono", "DejaVuSansMono"]),
('Courier', 'bold', ["FreeMonoBold", "DejaVuSansMono-Bold"]),
('Courier', 'italic', ["FreeMonoOblique", "DejaVuSansMono-Oblique"]),
('Courier', 'bolditalic', ["FreeMonoBoldOblique", "DejaVuSansMono-BoldOblique"]),
]
class res_font(osv.Model):
_name = "res.font"
_description = 'Fonts available'
_order = 'family,name,id'
_rec_name = 'family'
_columns = {
'family': fields.char("Font family", required=True),
'name': fields.char("Font Name", required=True),
'path': fields.char("Path", required=True),
'mode': fields.char("Mode", required=True),
}
_sql_constraints = [
('name_font_uniq', 'unique(family, name)', 'You can not register two fonts with the same name'),
]
def font_scan(self, cr, uid, lazy=False, context=None):
"""Action of loading fonts
In lazy mode will scan the filesystem only if there is no founts in the database and sync if no font in CustomTTFonts
In not lazy mode will force scan filesystem and sync
"""
if lazy:
# lazy loading, scan only if no fonts in db
found_fonts_ids = self.search(cr, uid, [('path', '!=', '/dev/null')], context=context)
if not found_fonts_ids:
# no scan yet or no font found on the system, scan the filesystem
self._scan_disk(cr, uid, context=context)
elif len(customfonts.CustomTTFonts) == 0:
# CustomTTFonts list is empty
self._sync(cr, uid, context=context)
else:
self._scan_disk(cr, uid, context=context)
return True
def _scan_disk(self, cr, uid, context=None):
"""Scan the file system and register the result in database"""
found_fonts = []
for font_path in customfonts.list_all_sysfonts():
try:
font = ttfonts.TTFontFile(font_path)
_logger.debug("Found font %s at %s", font.name, font_path)
found_fonts.append((font.familyName, font.name, font_path, font.styleName))
except Exception, ex:
_logger.warning("Could not register Font %s: %s", font_path, ex)
for family, name, path, mode in found_fonts:
if not self.search(cr, uid, [('family', '=', family), ('name', '=', name)], context=context):
self.create(cr, uid, {
'family': family, 'name': name,
'path': path, 'mode': mode,
}, context=context)
# remove fonts not present on the disk anymore
existing_font_names = [name for (family, name, path, mode) in found_fonts]
inexistant_fonts = self.search(cr, uid, [('name', 'not in', existing_font_names), ('path', '!=', '/dev/null')], context=context)
if inexistant_fonts:
self.unlink(cr, uid, inexistant_fonts, context=context)
RegistryManager.signal_caches_change(cr.dbname)
self._sync(cr, uid, context=context)
return True
def _sync(self, cr, uid, context=None):
"""Set the customfonts.CustomTTFonts list to the content of the database"""
customfonts.CustomTTFonts = []
local_family_modes = set()
local_font_paths = {}
found_fonts_ids = self.search(cr, uid, [('path', '!=', '/dev/null')], context=context)
for font in self.browse(cr, uid, found_fonts_ids, context=None):
local_family_modes.add((font.family, font.mode))
local_font_paths[font.name] = font.path
customfonts.CustomTTFonts.append((font.family, font.name, font.path, font.mode))
# Attempt to remap the builtin fonts (Helvetica, Times, Courier) to better alternatives
# if available, because they only support a very small subset of unicode
# (missing 'č' for example)
for builtin_font_family, mode, alts in BUILTIN_ALTERNATIVES:
if (builtin_font_family, mode) not in local_family_modes:
# No local font exists with that name, try alternatives
for altern_font in alts:
if local_font_paths.get(altern_font):
altern_def = (builtin_font_family, altern_font,
local_font_paths[altern_font], mode)
customfonts.CustomTTFonts.append(altern_def)
_logger.debug("Builtin remapping %r", altern_def)
break
else:
_logger.warning("No local alternative found for builtin font `%s` (%s mode)."
"Consider installing the DejaVu fonts if you have problems "
"with unicode characters in RML reports",
builtin_font_family, mode)
return True
def clear_caches(self):
"""Force worker to resync at next report loading by setting an empty font list"""
customfonts.CustomTTFonts = []
return super(res_font, self).clear_caches()
| agpl-3.0 |
aviciimaxwell/odoo | addons/project/res_config.py | 232 | 4551 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (C) 2004-2012 OpenERP S.A. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
class project_configuration(osv.osv_memory):
_name = 'project.config.settings'
_inherit = 'res.config.settings'
_columns = {
'module_sale_service': fields.boolean('Generate tasks from sale orders',
help='This feature automatically creates project tasks from service products in sale orders. '
'More precisely, tasks are created for procurement lines with product of type \'Service\', '
'procurement method \'Make to Order\', and supply method \'Manufacture\'.\n'
'-This installs the module sale_service.'),
'module_pad': fields.boolean("Use integrated collaborative note pads on task",
help='Lets the company customize which Pad installation should be used to link to new pads '
'(for example: http://ietherpad.com/).\n'
'-This installs the module pad.'),
'module_project_timesheet': fields.boolean("Record timesheet lines per tasks",
help='This allows you to transfer the entries under tasks defined for Project Management to '
'the timesheet line entries for particular date and user, with the effect of creating, '
'editing and deleting either ways.\n'
'-This installs the module project_timesheet.'),
'module_project_issue': fields.boolean("Track issues and bugs",
help='Provides management of issues/bugs in projects.\n'
'-This installs the module project_issue.'),
'time_unit': fields.many2one('product.uom', 'Working time unit', required=True,
help='This will set the unit of measure used in projects and tasks.\n'
'Changing the unit will only impact new entries.'),
'module_project_issue_sheet': fields.boolean("Invoice working time on issues",
help='Provides timesheet support for the issues/bugs management in project.\n'
'-This installs the module project_issue_sheet.'),
'group_tasks_work_on_tasks': fields.boolean("Log work activities on tasks",
implied_group='project.group_tasks_work_on_tasks',
help="Allows you to compute work on tasks."),
'group_time_work_estimation_tasks': fields.boolean("Manage time estimation on tasks",
implied_group='project.group_time_work_estimation_tasks',
help="Allows you to compute Time Estimation on tasks."),
'group_manage_delegation_task': fields.boolean("Allow task delegation",
implied_group='project.group_delegate_task',
help="Allows you to delegate tasks to other users."),
}
def get_default_time_unit(self, cr, uid, fields, context=None):
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
return {'time_unit': user.company_id.project_time_mode_id.id}
def set_time_unit(self, cr, uid, ids, context=None):
config = self.browse(cr, uid, ids[0], context)
user = self.pool.get('res.users').browse(cr, uid, uid, context)
user.company_id.write({'project_time_mode_id': config.time_unit.id})
def onchange_time_estimation_project_timesheet(self, cr, uid, ids, group_time_work_estimation_tasks, module_project_timesheet):
if group_time_work_estimation_tasks or module_project_timesheet:
return {'value': {'group_tasks_work_on_tasks': True}}
return {}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
yannickcr/CouchPotatoServer | couchpotato/core/media/movie/providers/trailer/youtube_dl/extractor/lynda.py | 19 | 8369 | from __future__ import unicode_literals
import re
import json
from .subtitles import SubtitlesInfoExtractor
from .common import InfoExtractor
from ..utils import (
compat_urllib_parse,
compat_urllib_request,
ExtractorError,
int_or_none,
compat_str,
)
class LyndaIE(SubtitlesInfoExtractor):
IE_NAME = 'lynda'
IE_DESC = 'lynda.com videos'
_VALID_URL = r'https?://www\.lynda\.com/[^/]+/[^/]+/\d+/(\d+)-\d\.html'
_LOGIN_URL = 'https://www.lynda.com/login/login.aspx'
_NETRC_MACHINE = 'lynda'
_SUCCESSFUL_LOGIN_REGEX = r'isLoggedIn: true'
_TIMECODE_REGEX = r'\[(?P<timecode>\d+:\d+:\d+[\.,]\d+)\]'
ACCOUNT_CREDENTIALS_HINT = 'Use --username and --password options to provide lynda.com account credentials.'
_TEST = {
'url': 'http://www.lynda.com/Bootstrap-tutorials/Using-exercise-files/110885/114408-4.html',
'md5': 'ecfc6862da89489161fb9cd5f5a6fac1',
'info_dict': {
'id': '114408',
'ext': 'mp4',
'title': 'Using the exercise files',
'duration': 68
}
}
def _real_initialize(self):
self._login()
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group(1)
page = self._download_webpage('http://www.lynda.com/ajax/player?videoId=%s&type=video' % video_id, video_id,
'Downloading video JSON')
video_json = json.loads(page)
if 'Status' in video_json:
raise ExtractorError('lynda returned error: %s' % video_json['Message'], expected=True)
if video_json['HasAccess'] is False:
raise ExtractorError(
'Video %s is only available for members. ' % video_id + self.ACCOUNT_CREDENTIALS_HINT, expected=True)
video_id = compat_str(video_json['ID'])
duration = video_json['DurationInSeconds']
title = video_json['Title']
formats = []
fmts = video_json.get('Formats')
if fmts:
formats.extend([
{
'url': fmt['Url'],
'ext': fmt['Extension'],
'width': fmt['Width'],
'height': fmt['Height'],
'filesize': fmt['FileSize'],
'format_id': str(fmt['Resolution'])
} for fmt in fmts])
prioritized_streams = video_json.get('PrioritizedStreams')
if prioritized_streams:
formats.extend([
{
'url': video_url,
'width': int_or_none(format_id),
'format_id': format_id,
} for format_id, video_url in prioritized_streams['0'].items()
])
self._sort_formats(formats)
if self._downloader.params.get('listsubtitles', False):
self._list_available_subtitles(video_id, page)
return
subtitles = self._fix_subtitles(self.extract_subtitles(video_id, page))
return {
'id': video_id,
'title': title,
'duration': duration,
'subtitles': subtitles,
'formats': formats
}
def _login(self):
(username, password) = self._get_login_info()
if username is None:
return
login_form = {
'username': username,
'password': password,
'remember': 'false',
'stayPut': 'false'
}
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(login_form))
login_page = self._download_webpage(request, None, 'Logging in as %s' % username)
# Not (yet) logged in
m = re.search(r'loginResultJson = \'(?P<json>[^\']+)\';', login_page)
if m is not None:
response = m.group('json')
response_json = json.loads(response)
state = response_json['state']
if state == 'notlogged':
raise ExtractorError('Unable to login, incorrect username and/or password', expected=True)
# This is when we get popup:
# > You're already logged in to lynda.com on two devices.
# > If you log in here, we'll log you out of another device.
# So, we need to confirm this.
if state == 'conflicted':
confirm_form = {
'username': '',
'password': '',
'resolve': 'true',
'remember': 'false',
'stayPut': 'false',
}
request = compat_urllib_request.Request(self._LOGIN_URL, compat_urllib_parse.urlencode(confirm_form))
login_page = self._download_webpage(request, None, 'Confirming log in and log out from another device')
if re.search(self._SUCCESSFUL_LOGIN_REGEX, login_page) is None:
raise ExtractorError('Unable to log in')
def _fix_subtitles(self, subtitles):
if subtitles is None:
return subtitles # subtitles not requested
fixed_subtitles = {}
for k, v in subtitles.items():
subs = json.loads(v)
if len(subs) == 0:
continue
srt = ''
for pos in range(0, len(subs) - 1):
seq_current = subs[pos]
m_current = re.match(self._TIMECODE_REGEX, seq_current['Timecode'])
if m_current is None:
continue
seq_next = subs[pos + 1]
m_next = re.match(self._TIMECODE_REGEX, seq_next['Timecode'])
if m_next is None:
continue
appear_time = m_current.group('timecode')
disappear_time = m_next.group('timecode')
text = seq_current['Caption']
srt += '%s\r\n%s --> %s\r\n%s' % (str(pos), appear_time, disappear_time, text)
if srt:
fixed_subtitles[k] = srt
return fixed_subtitles
def _get_available_subtitles(self, video_id, webpage):
url = 'http://www.lynda.com/ajax/player?videoId=%s&type=transcript' % video_id
sub = self._download_webpage(url, None, False)
sub_json = json.loads(sub)
return {'en': url} if len(sub_json) > 0 else {}
class LyndaCourseIE(InfoExtractor):
IE_NAME = 'lynda:course'
IE_DESC = 'lynda.com online courses'
# Course link equals to welcome/introduction video link of same course
# We will recognize it as course link
_VALID_URL = r'https?://(?:www|m)\.lynda\.com/(?P<coursepath>[^/]+/[^/]+/(?P<courseid>\d+))-\d\.html'
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
course_path = mobj.group('coursepath')
course_id = mobj.group('courseid')
page = self._download_webpage('http://www.lynda.com/ajax/player?courseId=%s&type=course' % course_id,
course_id, 'Downloading course JSON')
course_json = json.loads(page)
if 'Status' in course_json and course_json['Status'] == 'NotFound':
raise ExtractorError('Course %s does not exist' % course_id, expected=True)
unaccessible_videos = 0
videos = []
(username, _) = self._get_login_info()
# Might want to extract videos right here from video['Formats'] as it seems 'Formats' is not provided
# by single video API anymore
for chapter in course_json['Chapters']:
for video in chapter['Videos']:
if username is None and video['HasAccess'] is False:
unaccessible_videos += 1
continue
videos.append(video['ID'])
if unaccessible_videos > 0:
self._downloader.report_warning('%s videos are only available for members and will not be downloaded. '
% unaccessible_videos + LyndaIE.ACCOUNT_CREDENTIALS_HINT)
entries = [
self.url_result('http://www.lynda.com/%s/%s-4.html' %
(course_path, video_id),
'Lynda')
for video_id in videos]
course_title = course_json['Title']
return self.playlist_result(entries, course_id, course_title) | gpl-3.0 |
quinox/weblate | weblate/accounts/migrations/0010_auto_20150819_1457.py | 9 | 1462 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('accounts', '0009_auto_20150630_1213'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='language',
field=models.CharField(max_length=10, verbose_name='Interface Language', choices=[(b'az', 'Az\u0259rbaycan'), (b'be', '\u0411\u0435\u043b\u0430\u0440\u0443\u0441\u043a\u0430\u044f'), (b'be@latin', 'Bie\u0142aruskaja'), (b'br', 'Brezhoneg'), (b'ca', 'Catal\xe0'), (b'cs', '\u010ce\u0161tina'), (b'da', 'Dansk'), (b'de', 'Deutsch'), (b'en', 'English'), (b'el', '\u0395\u03bb\u03bb\u03b7\u03bd\u03b9\u03ba\u03ac'), (b'es', 'Espa\xf1ol'), (b'fi', 'Suomi'), (b'fr', 'Fran\xe7ais'), (b'fy', 'Frysk'), (b'gl', 'Galego'), (b'he', '\u05e2\u05d1\u05e8\u05d9\u05ea'), (b'hu', 'Magyar'), (b'id', b'Indonesia'), (b'ja', '\u65e5\u672c\u8a9e'), (b'ko', '\ud55c\uad6d\uc5b4'), (b'ksh', 'K\xf6lsch'), (b'nl', 'Nederlands'), (b'pl', 'Polski'), (b'pt', 'Portugu\xeas'), (b'pt_BR', 'Portugu\xeas brasileiro'), (b'ru', '\u0420\u0443\u0441\u0441\u043a\u0438\u0439'), (b'sk', 'Sloven\u010dina'), (b'sl', 'Sloven\u0161\u010dina'), (b'sv', 'Svenska'), (b'tr', 'T\xfcrk\xe7e'), (b'uk', '\u0423\u043a\u0440\u0430\u0457\u043d\u0441\u044c\u043a\u0430'), (b'zh_CN', '\u7b80\u4f53\u5b57'), (b'zh_TW', '\u6b63\u9ad4\u5b57')]),
),
]
| gpl-3.0 |
abhishekgahlot/or-tools | examples/python/ski_assignment.py | 34 | 3787 | # Copyright 2010 Hakan Kjellerstrand hakank@bonetmail.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Ski assignment in Google CP Solver.
From Jeffrey Lee Hellrung, Jr.:
PIC 60, Fall 2008 Final Review, December 12, 2008
http://www.math.ucla.edu/~jhellrun/course_files/Fall%25202008/PIC%252060%2520-%2520Data%2520Structures%2520and%2520Algorithms/final_review.pdf
'''
5. Ski Optimization! Your job at Snapple is pleasant but in the winter
you've decided to become a ski bum. You've hooked up with the Mount
Baldy Ski Resort. They'll let you ski all winter for free in exchange
for helping their ski rental shop with an algorithm to assign skis to
skiers. Ideally, each skier should obtain a pair of skis whose height
matches his or her own height exactly. Unfortunately, this is generally
not possible. We define the disparity between a skier and his or her
skis to be the absolute value of the difference between the height of
the skier and the pair of skis. Our objective is to find an assignment
of skis to skiers that minimizes the sum of the disparities.
...
Illustrate your algorithm by explicitly filling out the A[i, j] table
for the following sample data:
* Ski heights: 1, 2, 5, 7, 13, 21.
* Skier heights: 3, 4, 7, 11, 18.
'''
Compare with the following models:
* Comet : http://www.hakank.org/comet/ski_assignment.co
* MiniZinc: http://hakank.org/minizinc/ski_assignment.mzn
* ECLiPSe : http://www.hakank.org/eclipse/ski_assignment.ecl
* SICStus: http://hakank.org/sicstus/ski_assignment.pl
* Gecode: http://hakank.org/gecode/ski_assignment.cpp
This model was created by Hakan Kjellerstrand (hakank@bonetmail.com)
Also see my other Google CP Solver models:
http://www.hakank.org/google_or_tools/
"""
import sys
from ortools.constraint_solver import pywrapcp
def main():
# Create the solver.
solver = pywrapcp.Solver('Ski assignment')
#
# data
#
num_skis = 6
num_skiers = 5
ski_heights = [1, 2, 5, 7, 13, 21]
skier_heights = [3, 4, 7, 11, 18]
#
# variables
#
# which ski to choose for each skier
x = [solver.IntVar(0, num_skis - 1, 'x[%i]' % i)
for i in range(num_skiers)]
z = solver.IntVar(0, sum(ski_heights), 'z')
#
# constraints
#
solver.Add(solver.AllDifferent(x))
z_tmp = [abs(solver.Element(ski_heights, x[i]) - skier_heights[i])
for i in range(num_skiers)]
solver.Add(z == sum(z_tmp))
# objective
objective = solver.Minimize(z, 1)
#
# search and result
#
db = solver.Phase(x,
solver.INT_VAR_DEFAULT,
solver.INT_VALUE_DEFAULT)
solver.NewSearch(db, [objective])
num_solutions = 0
while solver.NextSolution():
num_solutions += 1
print 'total differences:', z.Value()
for i in range(num_skiers):
x_val = x[i].Value()
ski_height = ski_heights[x[i].Value()]
diff = ski_height - skier_heights[i]
print 'Skier %i: Ski %i with length %2i (diff: %2i)' %\
(i, x_val, ski_height, diff)
print
solver.EndSearch()
print
print 'num_solutions:', num_solutions
print 'failures:', solver.Failures()
print 'branches:', solver.Branches()
print 'WallTime:', solver.WallTime()
if __name__ == '__main__':
main()
| apache-2.0 |
takis/django | tests/redirects_tests/tests.py | 336 | 3396 | from django import http
from django.conf import settings
from django.contrib.redirects.middleware import RedirectFallbackMiddleware
from django.contrib.redirects.models import Redirect
from django.contrib.sites.models import Site
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase, modify_settings, override_settings
from django.utils import six
@modify_settings(MIDDLEWARE_CLASSES={'append':
'django.contrib.redirects.middleware.RedirectFallbackMiddleware'})
@override_settings(APPEND_SLASH=False, SITE_ID=1)
class RedirectTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=settings.SITE_ID)
def test_model(self):
r1 = Redirect.objects.create(
site=self.site, old_path='/initial', new_path='/new_target')
self.assertEqual(six.text_type(r1), "/initial ---> /new_target")
def test_redirect(self):
Redirect.objects.create(
site=self.site, old_path='/initial', new_path='/new_target')
response = self.client.get('/initial')
self.assertRedirects(response,
'/new_target', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial')
self.assertRedirects(response,
'/new_target/', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash_and_query_string(self):
Redirect.objects.create(
site=self.site, old_path='/initial/?foo', new_path='/new_target/')
response = self.client.get('/initial?foo')
self.assertRedirects(response,
'/new_target/', status_code=301, target_status_code=404)
def test_response_gone(self):
"""When the redirect target is '', return a 410"""
Redirect.objects.create(
site=self.site, old_path='/initial', new_path='')
response = self.client.get('/initial')
self.assertEqual(response.status_code, 410)
@modify_settings(INSTALLED_APPS={'remove': 'django.contrib.sites'})
def test_sites_not_installed(self):
with self.assertRaises(ImproperlyConfigured):
RedirectFallbackMiddleware()
class OverriddenRedirectFallbackMiddleware(RedirectFallbackMiddleware):
# Use HTTP responses different from the defaults
response_gone_class = http.HttpResponseForbidden
response_redirect_class = http.HttpResponseRedirect
@modify_settings(MIDDLEWARE_CLASSES={'append':
'redirects_tests.tests.OverriddenRedirectFallbackMiddleware'})
@override_settings(SITE_ID=1)
class OverriddenRedirectMiddlewareTests(TestCase):
def setUp(self):
self.site = Site.objects.get(pk=settings.SITE_ID)
def test_response_gone_class(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 403)
def test_response_redirect_class(self):
Redirect.objects.create(
site=self.site, old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial/')
self.assertEqual(response.status_code, 302)
| bsd-3-clause |
jjs0sbw/CSPLN | scripts/create_web_apps_win.py | 1 | 6147 | '''
<license>
CSPLN_MaryKeelerEdition; Manages images to which notes can be added.
Copyright (C) 2015, Thomas Kercheval
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
___________________________________________________________</license>
Description:
For creating CSPLN webapps for WINDOWS, from scaffolding.
Inputs:
Version number, of MKE_vxx_xx_xx scaffolding file.
Where each x corresponds to a current version number.
Input as "xx_xx_xx"
Number of web applications
Outputs:
Web applications, number depends on Input.
Puts web2py.py in each web_app (not included in windows version).
Puts scaffolding (current app version) into each web2py frame.
Renames scaffolding application to 'MKE_Static_Name'.
Renames 'MKE_PT_.bat' shortcut to match application number.
Currently:
To do:
Done:
Delete examples and welcome apps in web2py framework.
'''
import os, sys, shutil
from the_decider import resolve_relative_path as resolve_path
def check_file_exist(path):
"""Check if the file at the given path exists."""
if os.path.exists(path):
pass
else:
sys.exit('File {} doesn\'t exist'.format(path))
return None
def grab_out_paths(num_apps):
"""
From the number of applications necessary, create a list
of pathnames where we will create windows applications.
"""
out_dir = resolve_path(__file__,'../apps/web_apps/win/{pat}')
project_part = 'P{}'
out_paths = []
for num in range(1, num_apps + 1):
strin = project_part.format(str(num))
print "{part}, preparing for generation.".format(part=strin)
out_paths.append(out_dir.format(pat=strin))
return out_paths
def grab_web2py_frame():
"""Grab the path of the web2py framework and check its existence."""
webframe = resolve_path(__file__,'../apps/scaffolding/win/web2py')
webdotpy = resolve_path(__file__,'../apps/scaffolding/common/web2py.py')
check_file_exist(webdotpy)
check_file_exist(webframe)
return webframe, webdotpy
def grab_scaffold_app(version):
"""Grab the path of our scaffolding and check its existence."""
mkever = '../apps/scaffolding/version/MKE_v{}'.format(version)
mkever = resolve_path(__file__, mkever)
check_file_exist(mkever)
return mkever
def copy_webframez(num_apps):
"""
For each path where we intend to create a linux application,
create a copy of the web2py framework and a modified copy
of web2py.py.
"""
webframe, webdotpy = grab_web2py_frame()
out_paths = grab_out_paths(num_apps)
for path in out_paths:
shutil.copytree(webframe, os.path.join(path, 'web2py'))
next_path = os.path.join(path, 'web2py')
shutil.copy(webdotpy, next_path)
print ' web2py frame copied to: {}'.format(path)
print ' web2py.py copied to: {}'.format(next_path)
return out_paths
def modify_out_paths(int_paths):
"""
Modifies the out_paths from the locations of the web2py framework
to where our applications will be generated.
"""
mod_out = []
addition = 'web2py/applications'
for path in int_paths:
new_path = os.path.join(path, addition)
mod_out.append(new_path)
return mod_out
def grab_filename_from_path(in_path):
"""Input a path, return last chunck."""
import ntpath
head, tail = ntpath.split(in_path)
return tail or ntpath.basename(head)
def create_bat(out_path, num):
"""Creates a bat file to start the web2py server."""
mkebat = resolve_path(__file__, '../apps/scaffolding/common/MKE_PT_.bat')
check_file_exist(mkebat)
shutil.copy(mkebat, out_path)
old_name = os.path.join(out_path, 'MKE_PT_.bat')
exe_name = 'MKE_PT_{}.bat'.format(num)
new_name = os.path.join(out_path, exe_name)
print " ...P{} bat file created...".format(num)
os.rename(old_name, new_name)
return None
def rename_exe(path, num):
"""Renames exe files."""
old_name = os.path.join(path, 'web2py.exe')
exe_name = 'MKE_PT_{}.exe'.format(num)
new_name = os.path.join(path, exe_name)
print old_name, new_name
os.rename(old_name, new_name)
return None
def modify_webframez(out_paths, num_apps):
"""For every webframe, create a bat file."""
assert len(out_paths) == int(num_apps)
num = 1
print " Creating *.bat shortcuts..."
for path in out_paths:
new_path = os.path.join(path, 'web2py')
create_bat(new_path, num)
num += 1
return None
def copy_app(version, out_paths):
"""
Creates an application for every copy of the web2py framework,
from scaffolding application.
"""
scaff_app = grab_scaffold_app(version)
filename = grab_filename_from_path(scaff_app)
for path in out_paths:
shutil.copytree(scaff_app, os.path.join(path, filename))
old_name = os.path.join(path, filename)
new_name = os.path.join(path, 'MKE_Static_Name')
os.rename(old_name, new_name)
return None
def deploy_scaffolding(version, num_apps):
"""
Deploys the web2py framework and the current version of our
scaffolding, as many times as is necessary.
"""
print "\n Creating Windows applications...\n" + "_"*79
out_paths = copy_webframez(num_apps)
modify_webframez(out_paths, num_apps)
new_paths = modify_out_paths(out_paths)
copy_app(version, new_paths)
print "_"*79
return None
if __name__ == "__main__":
NUM_APPS = 10
VERSION = '00_01_02'
deploy_scaffolding(VERSION, NUM_APPS)
| gpl-3.0 |
tipabu/swift | test/unit/container/test_reconciler.py | 2 | 80935 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import numbers
import mock
import operator
import time
import unittest
import socket
import os
import errno
import itertools
import random
from collections import defaultdict
from datetime import datetime
import six
from six.moves import urllib
from swift.container import reconciler
from swift.container.server import gen_resp_headers
from swift.common.direct_client import ClientException
from swift.common import swob
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.utils import split_path, Timestamp, encode_timestamps
from test.unit import debug_logger, FakeRing, fake_http_connect
from test.unit.common.middleware import helpers
def timestamp_to_last_modified(timestamp):
return datetime.utcfromtimestamp(
float(Timestamp(timestamp))).strftime('%Y-%m-%dT%H:%M:%S.%f')
def container_resp_headers(**kwargs):
return HeaderKeyDict(gen_resp_headers(kwargs))
class FakeStoragePolicySwift(object):
def __init__(self):
self.storage_policy = defaultdict(helpers.FakeSwift)
self._mock_oldest_spi_map = {}
def __getattribute__(self, name):
try:
return object.__getattribute__(self, name)
except AttributeError:
return getattr(self.storage_policy[None], name)
def __call__(self, env, start_response):
method = env['REQUEST_METHOD']
path = env['PATH_INFO']
_, acc, cont, obj = split_path(env['PATH_INFO'], 0, 4,
rest_with_last=True)
if not obj:
policy_index = None
else:
policy_index = self._mock_oldest_spi_map.get(cont, 0)
# allow backend policy override
if 'HTTP_X_BACKEND_STORAGE_POLICY_INDEX' in env:
policy_index = int(env['HTTP_X_BACKEND_STORAGE_POLICY_INDEX'])
try:
return self.storage_policy[policy_index].__call__(
env, start_response)
except KeyError:
pass
if method == 'PUT':
resp_class = swob.HTTPCreated
else:
resp_class = swob.HTTPNotFound
self.storage_policy[policy_index].register(
method, path, resp_class, {}, '')
return self.storage_policy[policy_index].__call__(
env, start_response)
class FakeInternalClient(reconciler.InternalClient):
def __init__(self, listings):
self.app = FakeStoragePolicySwift()
self.user_agent = 'fake-internal-client'
self.request_tries = 1
self.use_replication_network = True
self.parse(listings)
def parse(self, listings):
self.accounts = defaultdict(lambda: defaultdict(list))
for item, timestamp in listings.items():
# XXX this interface is stupid
if isinstance(timestamp, tuple):
timestamp, content_type = timestamp
else:
timestamp, content_type = timestamp, 'application/x-put'
storage_policy_index, path = item
if six.PY2 and isinstance(path, six.text_type):
path = path.encode('utf-8')
account, container_name, obj_name = split_path(
path, 0, 3, rest_with_last=True)
self.accounts[account][container_name].append(
(obj_name, storage_policy_index, timestamp, content_type))
for account_name, containers in self.accounts.items():
for con in containers:
self.accounts[account_name][con].sort(key=lambda t: t[0])
for account, containers in self.accounts.items():
account_listing_data = []
account_path = '/v1/%s' % account
for container, objects in containers.items():
container_path = account_path + '/' + container
container_listing_data = []
for entry in objects:
(obj_name, storage_policy_index,
timestamp, content_type) = entry
if storage_policy_index is None and not obj_name:
# empty container
continue
obj_path = swob.str_to_wsgi(
container_path + '/' + obj_name)
ts = Timestamp(timestamp)
headers = {'X-Timestamp': ts.normal,
'X-Backend-Timestamp': ts.internal}
# register object response
self.app.storage_policy[storage_policy_index].register(
'GET', obj_path, swob.HTTPOk, headers)
self.app.storage_policy[storage_policy_index].register(
'DELETE', obj_path, swob.HTTPNoContent, {})
# container listing entry
last_modified = timestamp_to_last_modified(timestamp)
# some tests setup mock listings using floats, some use
# strings, so normalize here
if isinstance(timestamp, numbers.Number):
timestamp = '%f' % timestamp
if six.PY2:
obj_name = obj_name.decode('utf-8')
timestamp = timestamp.decode('utf-8')
obj_data = {
'bytes': 0,
# listing data is unicode
'name': obj_name,
'last_modified': last_modified,
'hash': timestamp,
'content_type': content_type,
}
container_listing_data.append(obj_data)
container_listing_data.sort(key=operator.itemgetter('name'))
# register container listing response
container_headers = {}
container_qry_string = helpers.normalize_query_string(
'?format=json&marker=&end_marker=&prefix=')
self.app.register('GET', container_path + container_qry_string,
swob.HTTPOk, container_headers,
json.dumps(container_listing_data))
if container_listing_data:
obj_name = container_listing_data[-1]['name']
# client should quote and encode marker
end_qry_string = helpers.normalize_query_string(
'?format=json&marker=%s&end_marker=&prefix=' % (
urllib.parse.quote(obj_name.encode('utf-8'))))
self.app.register('GET', container_path + end_qry_string,
swob.HTTPOk, container_headers,
json.dumps([]))
self.app.register('DELETE', container_path,
swob.HTTPConflict, {}, '')
# simple account listing entry
container_data = {'name': container}
account_listing_data.append(container_data)
# register account response
account_listing_data.sort(key=operator.itemgetter('name'))
account_headers = {}
account_qry_string = '?format=json&marker=&end_marker=&prefix='
self.app.register('GET', account_path + account_qry_string,
swob.HTTPOk, account_headers,
json.dumps(account_listing_data))
end_qry_string = '?format=json&marker=%s&end_marker=&prefix=' % (
urllib.parse.quote(account_listing_data[-1]['name']))
self.app.register('GET', account_path + end_qry_string,
swob.HTTPOk, account_headers,
json.dumps([]))
class TestReconcilerUtils(unittest.TestCase):
def setUp(self):
self.fake_ring = FakeRing()
reconciler.direct_get_container_policy_index.reset()
def test_parse_raw_obj(self):
got = reconciler.parse_raw_obj({
'name': "2:/AUTH_bob/con/obj",
'hash': Timestamp(2017551.49350).internal,
'last_modified': timestamp_to_last_modified(2017551.49352),
'content_type': 'application/x-delete',
})
self.assertEqual(got['q_policy_index'], 2)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 2017551.49350)
self.assertEqual(got['q_record'], 2017551.49352)
self.assertEqual(got['q_op'], 'DELETE')
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# the 'hash' field in object listing has the raw 'created_at' value
# which could be a composite of timestamps
timestamp_str = encode_timestamps(Timestamp(1234.20190),
Timestamp(1245.20190),
Timestamp(1256.20190),
explicit=True)
got = reconciler.parse_raw_obj({
'name': "1:/AUTH_bob/con/obj",
'hash': timestamp_str,
'last_modified': timestamp_to_last_modified(1234.20192),
'content_type': 'application/x-put',
})
self.assertEqual(got['q_policy_index'], 1)
self.assertEqual(got['account'], 'AUTH_bob')
self.assertEqual(got['container'], 'con')
self.assertEqual(got['obj'], 'obj')
self.assertEqual(got['q_ts'], 1234.20190)
self.assertEqual(got['q_record'], 1234.20192)
self.assertEqual(got['q_op'], 'PUT')
# negative test
obj_info = {
'name': "1:/AUTH_bob/con/obj",
'hash': Timestamp(1234.20190).internal,
'last_modified': timestamp_to_last_modified(1234.20192),
}
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'foo'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
obj_info['content_type'] = 'appliation/x-post'
self.assertRaises(ValueError, reconciler.parse_raw_obj, obj_info)
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'bogus'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': '-1:/AUTH_test/container'})
self.assertRaises(ValueError, reconciler.parse_raw_obj,
{'name': 'asdf:/AUTH_test/c/obj'})
self.assertRaises(KeyError, reconciler.parse_raw_obj,
{'name': '0:/AUTH_test/c/obj',
'content_type': 'application/x-put'})
def test_get_container_policy_index(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
for permutation in itertools.permutations((0, 1, 2)):
reconciler.direct_get_container_policy_index.reset()
resp_headers = [stub_resp_headers[i] for i in permutation]
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
test_values = [(info['x-storage-policy-index'],
info['x-backend-status-changed-at']) for
info in resp_headers]
self.assertEqual(oldest_spi, 0,
"oldest policy index wrong "
"for permutation %r" % test_values)
def test_get_container_policy_index_with_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_change_at=next(ts),
storage_policy_index=2,
),
container_resp_headers(
status_changed_at=next(ts),
storage_policy_index=1,
),
# old timestamp, but 500 should be ignored...
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(0).internal,
storage_policy_index=0,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_with_socket_error(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_with_too_many_errors(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
http_status=500, http_reason='Server Error',
http_headers=container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertIsNone(oldest_spi)
def test_get_container_policy_index_for_deleted(self):
mock_path = 'swift.container.reconciler.direct_head_container'
headers = container_resp_headers(
status_changed_at=Timestamp.now().internal,
storage_policy_index=1,
)
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=headers,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_for_recently_deleted(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_recreated(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# old put, no recreate
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# recently deleted
ClientException(
'Container Not Found',
http_status=404, http_reason='Not Found',
http_headers=container_resp_headers(
put_timestamp=next(ts),
delete_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
),
# recently recreated
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 2)
def test_get_container_policy_index_for_recently_split_brain(self):
ts = itertools.count(int(time.time()))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
# oldest put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=0,
),
# old recreate
container_resp_headers(
delete_timestamp=next(ts),
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=1,
),
# recently put
container_resp_headers(
delete_timestamp=0,
put_timestamp=next(ts),
status_changed_at=next(ts),
storage_policy_index=2,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 1)
def test_get_container_policy_index_cache(self):
now = time.time()
ts = itertools.count(int(now))
mock_path = 'swift.container.reconciler.direct_head_container'
stub_resp_headers = [
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=1,
),
container_resp_headers(
status_changed_at=Timestamp(next(ts)).internal,
storage_policy_index=0,
),
]
random.shuffle(stub_resp_headers)
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
self.assertEqual(oldest_spi, 0)
# re-mock with errors
stub_resp_headers = [
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
]
with mock.patch('time.time', new=lambda: now):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# still cached
self.assertEqual(oldest_spi, 0)
# propel time forward
the_future = now + 31
with mock.patch('time.time', new=lambda: the_future):
with mock.patch(mock_path) as direct_head:
direct_head.side_effect = stub_resp_headers
oldest_spi = reconciler.direct_get_container_policy_index(
self.fake_ring, 'a', 'con')
# expired
self.assertIsNone(oldest_spi)
def test_direct_delete_container_entry(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
x_timestamp = Timestamp.now()
headers = {'x-timestamp': x_timestamp.internal}
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o', headers=headers)
self.assertEqual(len(connect_args), 3)
for args in connect_args:
self.assertEqual(args['method'], 'DELETE')
self.assertEqual(args['path'], '/a/c/o')
self.assertEqual(args['headers'].get('x-timestamp'),
headers['x-timestamp'])
def test_direct_delete_container_entry_with_errors(self):
# setup mock direct_delete
mock_path = \
'swift.container.reconciler.direct_delete_container_object'
stub_resp = [
None,
socket.error(errno.ECONNREFUSED, os.strerror(errno.ECONNREFUSED)),
ClientException(
'Container Server blew up',
'10.0.0.12', 6201, 'sdj', 404, 'Not Found'
),
]
mock_direct_delete = mock.MagicMock()
mock_direct_delete.side_effect = stub_resp
with mock.patch(mock_path, mock_direct_delete), \
mock.patch('eventlet.greenpool.DEBUG', False):
rv = reconciler.direct_delete_container_entry(
self.fake_ring, 'a', 'c', 'o')
self.assertIsNone(rv)
self.assertEqual(len(mock_direct_delete.mock_calls), 3)
def test_add_to_reconciler_queue(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
required_headers = ('x-content-type', 'x-etag')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
self.assertEqual(args['headers']['X-Content-Type'],
'application/x-delete')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_force(self):
mock_path = 'swift.common.direct_client.http_connect'
connect_args = []
def test_connect(ipaddr, port, device, partition, method, path,
headers=None, query_string=None):
connect_args.append({
'ipaddr': ipaddr, 'port': port, 'device': device,
'partition': partition, 'method': method, 'path': path,
'headers': headers, 'query_string': query_string})
fake_hc = fake_http_connect(200, 200, 200, give_connect=test_connect)
now = time.time()
with mock.patch(mock_path, fake_hc), \
mock.patch('swift.container.reconciler.time.time',
lambda: now):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT',
force=True)
self.assertTrue(ret)
self.assertEqual(ret, str(int(5948918.63946 // 3600 * 3600)))
self.assertEqual(len(connect_args), 3)
required_headers = ('x-size', 'x-content-type')
for args in connect_args:
self.assertEqual(args['headers']['X-Timestamp'],
Timestamp(now).internal)
self.assertEqual(args['headers']['X-Etag'], '5948918.63946')
self.assertEqual(args['path'],
'/.misplaced_objects/5947200/17:/a/c/o')
for header in required_headers:
self.assertTrue(header in args['headers'],
'%r was missing request headers %r' % (
header, args['headers']))
def test_add_to_reconciler_queue_fails(self):
mock_path = 'swift.common.direct_client.http_connect'
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(507)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'PUT')
self.assertFalse(ret)
def test_add_to_reconciler_queue_socket_error(self):
mock_path = 'swift.common.direct_client.http_connect'
exc = socket.error(errno.ECONNREFUSED,
os.strerror(errno.ECONNREFUSED))
fake_connects = [fake_http_connect(200),
fake_http_connect(200, raise_timeout_exc=True),
fake_http_connect(500, raise_exc=exc)]
def fake_hc(*a, **kw):
return fake_connects.pop()(*a, **kw)
with mock.patch(mock_path, fake_hc):
ret = reconciler.add_to_reconciler_queue(
self.fake_ring, 'a', 'c', 'o', 17, 5948918.63946, 'DELETE')
self.assertFalse(ret)
def listing_qs(marker):
return helpers.normalize_query_string(
"?format=json&marker=%s&end_marker=&prefix=" %
urllib.parse.quote(marker.encode('utf-8')))
class TestReconciler(unittest.TestCase):
maxDiff = None
def setUp(self):
self.logger = debug_logger()
conf = {}
with mock.patch('swift.container.reconciler.InternalClient'):
self.reconciler = reconciler.ContainerReconciler(conf)
self.reconciler.logger = self.logger
self.start_interval = int(time.time() // 3600 * 3600)
self.current_container_path = '/v1/.misplaced_objects/%d' % (
self.start_interval) + listing_qs('')
def _mock_listing(self, objects):
self.reconciler.swift = FakeInternalClient(objects)
self.fake_swift = self.reconciler.swift.app
def _mock_oldest_spi(self, container_oldest_spi_map):
self.fake_swift._mock_oldest_spi_map = container_oldest_spi_map
def _run_once(self):
"""
Helper method to run the reconciler once with appropriate direct-client
mocks in place.
Returns the list of direct-deleted container entries in the format
[(acc1, con1, obj1), ...]
"""
def mock_oldest_spi(ring, account, container_name):
return self.fake_swift._mock_oldest_spi_map.get(container_name, 0)
items = {
'direct_get_container_policy_index': mock_oldest_spi,
'direct_delete_container_entry': mock.DEFAULT,
}
mock_time_iter = itertools.count(self.start_interval)
with mock.patch.multiple(reconciler, **items) as mocks:
self.mock_delete_container_entry = \
mocks['direct_delete_container_entry']
with mock.patch('time.time', lambda: next(mock_time_iter)):
self.reconciler.run_once()
return [c[1][1:4] for c in
mocks['direct_delete_container_entry'].mock_calls]
def test_invalid_queue_name(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/bogus"): 3618.84187,
})
deleted_container_entries = self._run_once()
# we try to find something useful
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('bogus'))])
# but only get the bogus record
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# and just leave it on the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertFalse(deleted_container_entries)
def test_invalid_queue_name_marches_onward(self):
# there's something useful there on the queue
self._mock_listing({
(None, "/.misplaced_objects/3600/00000bogus"): 3600.0000,
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# we get all the queue entries we can
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and one is garbage
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
# but the other is workable
self.assertEqual(self.reconciler.stats['noop_object'], 1)
# so pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_queue_name_with_policy_index_delimiter_in_name(self):
q_path = '.misplaced_objects/3600'
obj_path = "AUTH_bob/c:sneaky/o1:sneaky"
# there's something useful there on the queue
self._mock_listing({
(None, "/%s/1:/%s" % (q_path, obj_path)): 3618.84187,
(1, '/%s' % obj_path): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we find the misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_path))])
# move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path),
('DELETE', '/v1/%s' % obj_path)])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path),
('PUT', '/v1/%s' % obj_path)])
# clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and pop the queue for that one
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries, [(
'.misplaced_objects', '3600', '1:/%s' % obj_path)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_unable_to_direct_get_oldest_storage_policy(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
})
# the reconciler gets "None" if we can't quorum the container
self._mock_oldest_spi({'c': None})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but can't really say where to go looking
self.assertEqual(self.reconciler.stats['unavailable_container'], 1)
# we don't clean up anything
self.assertEqual(self.reconciler.stats['cleanup_object'], 0)
# and we definitely should not pop_queue
self.assertFalse(deleted_container_entries)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('PUT', '/v1/AUTH_bob/c/o1')])
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2))
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_the_other_direction(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/0:/AUTH_bob/c/o1"): 3618.84187,
(0, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('0:/AUTH_bob/c/o1'))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[1].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '0:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_with_unicode_and_spaces(self):
# the "name" in listings and the unicode string passed to all
# functions where we call them with (account, container, obj)
obj_name = u"AUTH_bob/c \u062a/o1 \u062a"
# anytime we talk about a call made to swift for a path
if six.PY2:
obj_path = obj_name.encode('utf-8')
else:
obj_path = obj_name.encode('utf-8').decode('latin-1')
# this mock expects unquoted unicode because it handles container
# listings as well as paths
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/%s" % obj_name): 3618.84187,
(1, "/%s" % obj_name): 3618.84187,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
# listing_qs encodes and quotes - so give it name
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/%s' % obj_name))])
# moves it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
# these calls are to the real path
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/%s' % obj_path), # 2
('DELETE', '/v1/%s' % obj_path)]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/%s' % obj_path), # 1
('PUT', '/v1/%s' % obj_path)]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# we PUT the object in the right place with q_ts + offset 2
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3618.84187, offset=1).internal)
self.assertEqual(
delete_headers.get('X-Backend-Storage-Policy-Index'), '1')
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
# this mock received the name, it's encoded down in buffered_http
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/%s' % obj_name)])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_delete(self):
q_ts = time.time()
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): (
Timestamp(q_ts).internal, 'application/x-delete'),
# object exists in "correct" storage policy - slightly older
(0, "/AUTH_bob/c/o1"): Timestamp(q_ts - 1).internal,
})
self._mock_oldest_spi({'c': 0})
# the tombstone exists in the enqueued storage policy
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# delete it
self.assertEqual(self.reconciler.stats['delete_attempt'], 1)
self.assertEqual(self.reconciler.stats['delete_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'),
('DELETE', '/v1/AUTH_bob/c/o1')])
reconcile_headers = self.fake_swift.storage_policy[0].headers[1]
# we DELETE the object in the right place with q_ts + offset 2
self.assertEqual(reconcile_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=2).internal)
# cleans up the old
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# we DELETE the object from the wrong place with source_ts + offset 1
# timestamp to make sure the change takes effect
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(q_ts, offset=1))
# and when we're done, we pop the entry from the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_enqueued_for_the_correct_dest_noop(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3618.84187,
(1, "/AUTH_bob/c/o1"): 3618.84187,
})
self._mock_oldest_spi({'c': 1}) # already in the right spot!
deleted_container_entries = self._run_once()
# nothing to see here
self.assertEqual(self.reconciler.stats['noop_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# so we just pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_newer_than_queue_entry(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.234567, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.234567, offset=2))
# src object is cleaned up
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.123456, offset=1))
# and queue is popped
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_src_object_older_than_queue_entry(self):
# should be some sort of retry case
q_ts = time.time()
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
(1, '/AUTH_bob/c/o1'): q_ts - 1, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_with_slightly_newer_tombstone(self):
# should be some sort of retry case
q_ts = float(Timestamp.now())
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPNotFound,
{'X-Backend-Timestamp': Timestamp(q_ts, offset=2).internal})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_src_object_unavailable_server_error(self):
# should be some sort of retry case
q_ts = float(Timestamp.now())
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_bob/c/o1" % q_path): q_ts,
})
self._mock_oldest_spi({'c': 0})
self.fake_swift.storage_policy[1].register(
'GET', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/%s' % q_path + listing_qs('')),
('GET', '/v1/%s' % q_path +
listing_qs('1:/AUTH_bob/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but no object copy is attempted
self.assertEqual(self.reconciler.stats['unavailable_source'], 1)
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# src object is un-modified
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# queue is un-changed, we'll have to retry
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_fails_cleanup(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3600.123456,
(1, '/AUTH_bob/c/o1'): 3600.123457, # slightly newer
})
self._mock_oldest_spi({'c': 0}) # destination
# make the DELETE blow up
self.fake_swift.storage_policy[1].register(
'DELETE', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# proceed with the move
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# .. with source timestamp + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(3600.123457, offset=2))
# we try to cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
# ... with q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3600.12346, offset=1))
# but cleanup fails!
self.assertEqual(self.reconciler.stats['cleanup_failed'], 1)
# so the queue is not popped
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
# and we'll have to retry
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_src_object_is_forever_gone(self):
# oh boy, hate to be here - this is an oldy
q_ts = self.start_interval - self.reconciler.reclaim_age - 1
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): q_ts,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# but it's gone :\
self.assertEqual(self.reconciler.stats['lost_source'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')])
# gah, look, even if it was out there somewhere - we've been at this
# two weeks and haven't found it. We can't just keep looking forever,
# so... we're done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
# dunno if this is helpful, but FWIW we don't throw tombstones?
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
self.assertEqual(self.reconciler.stats['success'], 1) # lol
def test_object_move_dest_already_moved(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019,
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so no attempt to read the source is made, but we do cleanup
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and wipe our hands of it
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_newer_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/3600/1:/AUTH_bob/c/o1"): 3679.2019,
(1, "/AUTH_bob/c/o1"): 3679.2019,
(0, "/AUTH_bob/c/o1"): 3679.2019 + 1, # slightly newer
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we look for misplaced objects...
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('3600')),
('GET', '/v1/.misplaced_objects/3600' + listing_qs('')),
('GET', '/v1/.misplaced_objects/3600' +
listing_qs('1:/AUTH_bob/c/o1'))])
# but we found it already in the right place!
self.assertEqual(self.reconciler.stats['found_object'], 1)
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1')])
# so not attempt to read is made, but we do cleanup
self.assertEqual(self.reconciler.stats['copy_attempt'], 0)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('DELETE', '/v1/AUTH_bob/c/o1')])
delete_headers = self.fake_swift.storage_policy[1].headers[0]
# rather we just clean up the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(3679.2019, offset=1))
# and since we cleaned up the old object, so this counts as done
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '3600', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_dest_object_older_than_queue_entry(self):
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.38393,
(1, "/AUTH_bob/c/o1"): 36123.38393,
(0, "/AUTH_bob/c/o1"): 36123.38393 - 1, # slightly older
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
# we found a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and since our version is *newer*, we overwrite
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(self.reconciler.stats['copy_success'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1'), # 2
('DELETE', '/v1/AUTH_bob/c/o1')]) # 4
delete_headers = self.fake_swift.storage_policy[1].headers[1]
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
# ... with a q_ts + offset 2
put_headers = self.fake_swift.storage_policy[0].headers[1]
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=2))
# then clean the dark matter
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 1)
self.assertEqual(self.reconciler.stats['cleanup_success'], 1)
# ... with a q_ts + offset 1
self.assertEqual(delete_headers.get('X-Timestamp'),
Timestamp(36123.38393, offset=1))
# and pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 1)
self.assertEqual(deleted_container_entries,
[('.misplaced_objects', '36000', '1:/AUTH_bob/c/o1')])
self.assertEqual(self.reconciler.stats['success'], 1)
def test_object_move_put_fails(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest fail!
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', swob.HTTPServiceUnavailable, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and try to move it, but it fails
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it failed
self.assertEqual(self.reconciler.stats['copy_success'], 0)
self.assertEqual(self.reconciler.stats['copy_failed'], 1)
# ... so we don't clean up the source
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['unhandled_errors'], 0)
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_put_blows_up_crazy_town(self):
# setup the cluster
self._mock_listing({
(None, "/.misplaced_objects/36000/1:/AUTH_bob/c/o1"): 36123.383925,
(1, "/AUTH_bob/c/o1"): 36123.383925,
})
self._mock_oldest_spi({'c': 0})
# make the put to dest blow up crazy town
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[0].register(
'PUT', '/v1/AUTH_bob/c/o1', blow_up, {})
# turn the crank
deleted_container_entries = self._run_once()
# we find a misplaced object
self.assertEqual(self.reconciler.stats['misplaced_object'], 1)
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs('36000')),
('GET', '/v1/.misplaced_objects/36000' + listing_qs('')),
('GET', '/v1/.misplaced_objects/36000' +
listing_qs('1:/AUTH_bob/c/o1'))])
# and attempt to move it
self.assertEqual(self.reconciler.stats['copy_attempt'], 1)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_bob/c/o1')]) # 2
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_bob/c/o1'), # 1
('PUT', '/v1/AUTH_bob/c/o1')]) # 3
put_headers = self.fake_swift.storage_policy[0].headers[1]
# ...with q_ts + offset 2 (20-microseconds)
self.assertEqual(put_headers.get('X-Timestamp'),
Timestamp(36123.383925, offset=2))
# but it blows up hard
self.assertEqual(self.reconciler.stats['unhandled_error'], 1)
# so we don't cleanup
self.assertEqual(self.reconciler.stats['cleanup_attempt'], 0)
# and we don't pop the queue
self.assertEqual(self.reconciler.stats['pop_queue'], 0)
self.assertEqual(deleted_container_entries, [])
self.assertEqual(self.reconciler.stats['retry'], 1)
def test_object_move_no_such_object_no_tombstone_recent(self):
q_ts = float(Timestamp.now())
container = str(int(q_ts // 3600 * 3600))
q_path = '.misplaced_objects/%s' % container
self._mock_listing({
(None, "/%s/1:/AUTH_jeb/c/o1" % q_path): q_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1')),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is recent enough that there could easily be
# tombstones on offline nodes or something, so we'll just leave it
# here and try again later
self.assertEqual(deleted_container_entries, [])
def test_object_move_no_such_object_no_tombstone_ancient(self):
queue_ts = float(Timestamp.now()) - \
self.reconciler.reclaim_age * 1.1
container = str(int(queue_ts // 3600 * 3600))
self._mock_listing({
(
None, "/.misplaced_objects/%s/1:/AUTH_jeb/c/o1" % container
): queue_ts
})
self._mock_oldest_spi({'c': 0})
deleted_container_entries = self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('1:/AUTH_jeb/c/o1'))])
self.assertEqual(
self.fake_swift.storage_policy[0].calls,
[('HEAD', '/v1/AUTH_jeb/c/o1')],
)
self.assertEqual(
self.fake_swift.storage_policy[1].calls,
[('GET', '/v1/AUTH_jeb/c/o1')],
)
# the queue entry is old enough that the tombstones, if any, have
# probably been reaped, so we'll just give up
self.assertEqual(
deleted_container_entries,
[('.misplaced_objects', container, '1:/AUTH_jeb/c/o1')])
def test_delete_old_empty_queue_containers(self):
ts = time.time() - self.reconciler.reclaim_age * 1.1
container = str(int(ts // 3600 * 3600))
older_ts = ts - 3600
older_container = str(int(older_ts // 3600 * 3600))
self._mock_listing({
(None, "/.misplaced_objects/%s/" % container): 0,
(None, "/.misplaced_objects/%s/something" % older_container): 0,
})
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(container)),
('GET', '/v1/.misplaced_objects/%s' % container + listing_qs('')),
('DELETE', '/v1/.misplaced_objects/%s' % container),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('')),
('GET', '/v1/.misplaced_objects/%s' % older_container +
listing_qs('something'))])
self.assertEqual(self.reconciler.stats['invalid_record'], 1)
def test_iter_over_old_containers_in_reverse(self):
step = reconciler.MISPLACED_OBJECTS_CONTAINER_DIVISOR
now = self.start_interval
containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
containers.append(container_name)
# add some old containers too
now -= self.reconciler.reclaim_age
old_containers = []
for i in range(10):
container_ts = int(now - step * i)
container_name = str(container_ts // 3600 * 3600)
old_containers.append(container_name)
containers.sort()
old_containers.sort()
all_containers = old_containers + containers
self._mock_listing(dict((
(None, "/.misplaced_objects/%s/" % container), 0
) for container in all_containers))
deleted_container_entries = self._run_once()
self.assertEqual(deleted_container_entries, [])
last_container = all_containers[-1]
account_listing_calls = [
('GET', '/v1/.misplaced_objects' + listing_qs('')),
('GET', '/v1/.misplaced_objects' + listing_qs(last_container)),
]
new_container_calls = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(containers)
][1:] # current_container get's skipped the second time around...
old_container_listings = [
('GET', '/v1/.misplaced_objects/%s' % container +
listing_qs('')) for container in reversed(old_containers)
]
old_container_deletes = [
('DELETE', '/v1/.misplaced_objects/%s' % container)
for container in reversed(old_containers)
]
old_container_calls = list(itertools.chain(*zip(
old_container_listings, old_container_deletes)))
self.assertEqual(self.fake_swift.calls,
[('GET', self.current_container_path)] +
account_listing_calls + new_container_calls +
old_container_calls)
def test_error_in_iter_containers(self):
self._mock_listing({})
# make the listing return an error
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
swob.HTTPServiceUnavailable, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors, [
'Error listing containers in account '
'.misplaced_objects (Unexpected response: '
'503 Service Unavailable)'])
def test_unhandled_exception_in_reconcile(self):
self._mock_listing({})
# make the listing blow up
def blow_up(*args, **kwargs):
raise Exception('kaboom!')
self.fake_swift.storage_policy[None].register(
'GET', '/v1/.misplaced_objects' + listing_qs(''),
blow_up, {})
self._run_once()
self.assertEqual(
self.fake_swift.calls,
[('GET', self.current_container_path),
('GET', '/v1/.misplaced_objects' + listing_qs(''))])
self.assertEqual(self.reconciler.stats, {})
errors = self.reconciler.logger.get_lines_for_level('error')
self.assertEqual(errors,
['Unhandled Exception trying to reconcile: '])
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
harisibrahimkv/django | tests/file_uploads/tests.py | 45 | 23742 | import base64
import hashlib
import os
import shutil
import sys
import tempfile as sys_tempfile
import unittest
from io import BytesIO, StringIO
from urllib.parse import quote
from django.core.files import temp as tempfile
from django.core.files.uploadedfile import SimpleUploadedFile
from django.http.multipartparser import MultiPartParser, parse_header
from django.test import SimpleTestCase, TestCase, client, override_settings
from django.utils.encoding import force_bytes
from . import uploadhandler
from .models import FileModel
UNICODE_FILENAME = 'test-0123456789_中文_Orléans.jpg'
MEDIA_ROOT = sys_tempfile.mkdtemp()
UPLOAD_TO = os.path.join(MEDIA_ROOT, 'test_upload')
@override_settings(MEDIA_ROOT=MEDIA_ROOT, ROOT_URLCONF='file_uploads.urls', MIDDLEWARE=[])
class FileUploadTests(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super().tearDownClass()
def test_simple_upload(self):
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
response = self.client.post('/upload/', post_data)
self.assertEqual(response.status_code, 200)
def test_large_upload(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".file1") as file1, file(suffix=".file2") as file2:
file1.write(b'a' * (2 ** 21))
file1.seek(0)
file2.write(b'a' * (10 * 2 ** 20))
file2.seek(0)
post_data = {
'name': 'Ringo',
'file_field1': file1,
'file_field2': file2,
}
for key in list(post_data):
try:
post_data[key + '_hash'] = hashlib.sha1(post_data[key].read()).hexdigest()
post_data[key].seek(0)
except AttributeError:
post_data[key + '_hash'] = hashlib.sha1(force_bytes(post_data[key])).hexdigest()
response = self.client.post('/verify/', post_data)
self.assertEqual(response.status_code, 200)
def _test_base64_upload(self, content, encode=base64.b64encode):
payload = client.FakePayload("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="test.txt"',
'Content-Type: application/octet-stream',
'Content-Transfer-Encoding: base64',
'']))
payload.write(b"\r\n" + encode(force_bytes(content)) + b"\r\n")
payload.write('--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo_content/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.json()['file'], content)
def test_base64_upload(self):
self._test_base64_upload("This data will be transmitted base64-encoded.")
def test_big_base64_upload(self):
self._test_base64_upload("Big data" * 68000) # > 512Kb
def test_big_base64_newlines_upload(self):
self._test_base64_upload("Big data" * 68000, encode=base64.encodebytes)
def test_unicode_file_name(self):
with sys_tempfile.TemporaryDirectory() as temp_dir:
# This file contains Chinese symbols and an accented char in the name.
with open(os.path.join(temp_dir, UNICODE_FILENAME), 'w+b') as file1:
file1.write(b'b' * (2 ** 10))
file1.seek(0)
response = self.client.post('/unicode_name/', {'file_unicode': file1})
self.assertEqual(response.status_code, 200)
def test_unicode_file_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file_unicode"; filename*=UTF-8\'\'%s' % quote(UNICODE_FILENAME),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
]))
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_unicode_name_rfc2231(self):
"""
Test receiving file upload when filename is encoded with RFC2231
(#22971).
"""
payload = client.FakePayload()
payload.write(
'\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name*=UTF-8\'\'file_unicode; filename*=UTF-8\'\'%s' % quote(
UNICODE_FILENAME
),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n',
'\r\n--' + client.BOUNDARY + '--\r\n'
])
)
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/unicode_name/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
def test_blank_filenames(self):
"""
Receiving file upload when filename is blank (before and after
sanitization) should be okay.
"""
# The second value is normalized to an empty name by
# MultiPartParser.IE_sanitize()
filenames = ['', 'C:\\Windows\\']
payload = client.FakePayload()
for i, name in enumerate(filenames):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
self.assertEqual(response.status_code, 200)
# Empty filenames should be ignored
received = response.json()
for i, name in enumerate(filenames):
self.assertIsNone(received.get('file%s' % i))
def test_dangerous_file_names(self):
"""Uploaded file names should be sanitized before ever reaching the view."""
# This test simulates possible directory traversal attacks by a
# malicious uploader We have to do some monkeybusiness here to construct
# a malicious payload with an invalid file name (containing os.sep or
# os.pardir). This similar to what an attacker would need to do when
# trying such an attack.
scary_file_names = [
"/tmp/hax0rd.txt", # Absolute path, *nix-style.
"C:\\Windows\\hax0rd.txt", # Absolute path, win-style.
"C:/Windows/hax0rd.txt", # Absolute path, broken-style.
"\\tmp\\hax0rd.txt", # Absolute path, broken in a different way.
"/tmp\\hax0rd.txt", # Absolute path, broken by mixing.
"subdir/hax0rd.txt", # Descendant path, *nix-style.
"subdir\\hax0rd.txt", # Descendant path, win-style.
"sub/dir\\hax0rd.txt", # Descendant path, mixed.
"../../hax0rd.txt", # Relative path, *nix-style.
"..\\..\\hax0rd.txt", # Relative path, win-style.
"../..\\hax0rd.txt" # Relative path, mixed.
]
payload = client.FakePayload()
for i, name in enumerate(scary_file_names):
payload.write('\r\n'.join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file%s"; filename="%s"' % (i, name),
'Content-Type: application/octet-stream',
'',
'You got pwnd.\r\n'
]))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
# The filenames should have been sanitized by the time it got to the view.
received = response.json()
for i, name in enumerate(scary_file_names):
got = received["file%s" % i]
self.assertEqual(got, "hax0rd.txt")
def test_filename_overflow(self):
"""File names over 256 characters (dangerous on some platforms) get fixed up."""
long_str = 'f' * 300
cases = [
# field name, filename, expected
('long_filename', '%s.txt' % long_str, '%s.txt' % long_str[:251]),
('long_extension', 'foo.%s' % long_str, '.%s' % long_str[:254]),
('no_extension', long_str, long_str[:255]),
('no_filename', '.%s' % long_str, '.%s' % long_str[:254]),
('long_everything', '%s.%s' % (long_str, long_str), '.%s' % long_str[:254]),
]
payload = client.FakePayload()
for name, filename, _ in cases:
payload.write("\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="{}"; filename="{}"',
'Content-Type: application/octet-stream',
'',
'Oops.',
''
]).format(name, filename))
payload.write('\r\n--' + client.BOUNDARY + '--\r\n')
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': "/echo/",
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
response = self.client.request(**r)
result = response.json()
for name, _, expected in cases:
got = result[name]
self.assertEqual(expected, got, 'Mismatch for {}'.format(name))
self.assertLess(len(got), 256,
"Got a long file name (%s characters)." % len(got))
def test_file_content(self):
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'no content')
no_content_type.seek(0)
simple_file.write(b'text content')
simple_file.seek(0)
simple_file.content_type = 'text/plain'
string_io = StringIO('string content')
bytes_io = BytesIO(b'binary content')
response = self.client.post('/echo_content/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
'string': string_io,
'binary': bytes_io,
})
received = response.json()
self.assertEqual(received['no_content_type'], 'no content')
self.assertEqual(received['simple_file'], 'text content')
self.assertEqual(received['string'], 'string content')
self.assertEqual(received['binary'], 'binary content')
def test_content_type_extra(self):
"""Uploaded files may have content type parameters available."""
file = tempfile.NamedTemporaryFile
with file(suffix=".ctype_extra") as no_content_type, file(suffix=".ctype_extra") as simple_file:
no_content_type.write(b'something')
no_content_type.seek(0)
simple_file.write(b'something')
simple_file.seek(0)
simple_file.content_type = 'text/plain; test-key=test_value'
response = self.client.post('/echo_content_type_extra/', {
'no_content_type': no_content_type,
'simple_file': simple_file,
})
received = response.json()
self.assertEqual(received['no_content_type'], {})
self.assertEqual(received['simple_file'], {'test-key': 'test_value'})
def test_truncated_multipart_handled_gracefully(self):
"""
If passed an incomplete multipart message, MultiPartParser does not
attempt to read beyond the end of the stream, and simply will handle
the part that can be parsed gracefully.
"""
payload_str = "\r\n".join([
'--' + client.BOUNDARY,
'Content-Disposition: form-data; name="file"; filename="foo.txt"',
'Content-Type: application/octet-stream',
'',
'file contents'
'--' + client.BOUNDARY + '--',
'',
])
payload = client.FakePayload(payload_str[:-10])
r = {
'CONTENT_LENGTH': len(payload),
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': payload,
}
self.assertEqual(self.client.request(**r).json(), {})
def test_empty_multipart_handled_gracefully(self):
"""
If passed an empty multipart message, MultiPartParser will return
an empty QueryDict.
"""
r = {
'CONTENT_LENGTH': 0,
'CONTENT_TYPE': client.MULTIPART_CONTENT,
'PATH_INFO': '/echo/',
'REQUEST_METHOD': 'POST',
'wsgi.input': client.FakePayload(b''),
}
self.assertEqual(self.client.request(**r).json(), {})
def test_custom_upload_handler(self):
file = tempfile.NamedTemporaryFile
with file() as smallfile, file() as bigfile:
# A small file (under the 5M quota)
smallfile.write(b'a' * (2 ** 21))
smallfile.seek(0)
# A big file (over the quota)
bigfile.write(b'a' * (10 * 2 ** 20))
bigfile.seek(0)
# Small file posting should work.
self.assertIn('f', self.client.post('/quota/', {'f': smallfile}).json())
# Large files don't go through.
self.assertNotIn('f', self.client.post("/quota/", {'f': bigfile}).json())
def test_broken_custom_upload_handler(self):
with tempfile.NamedTemporaryFile() as file:
file.write(b'a' * (2 ** 21))
file.seek(0)
# AttributeError: You cannot alter upload handlers after the upload has been processed.
with self.assertRaises(AttributeError):
self.client.post('/quota/broken/', {'f': file})
def test_fileupload_getlist(self):
file = tempfile.NamedTemporaryFile
with file() as file1, file() as file2, file() as file2a:
file1.write(b'a' * (2 ** 23))
file1.seek(0)
file2.write(b'a' * (2 * 2 ** 18))
file2.seek(0)
file2a.write(b'a' * (5 * 2 ** 20))
file2a.seek(0)
response = self.client.post('/getlist_count/', {
'file1': file1,
'field1': 'test',
'field2': 'test3',
'field3': 'test5',
'field4': 'test6',
'field5': 'test7',
'file2': (file2, file2a)
})
got = response.json()
self.assertEqual(got.get('file1'), 1)
self.assertEqual(got.get('file2'), 2)
def test_fileuploads_closed_at_request_end(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/t/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# The files were parsed.
self.assertTrue(hasattr(request, '_files'))
file = request._files['file']
self.assertTrue(file.closed)
files = request._files.getlist('file2')
self.assertTrue(files[0].closed)
self.assertTrue(files[1].closed)
def test_no_parsing_triggered_by_fd_closing(self):
file = tempfile.NamedTemporaryFile
with file() as f1, file() as f2a, file() as f2b:
response = self.client.post('/fd_closing/f/', {
'file': f1,
'file2': (f2a, f2b),
})
request = response.wsgi_request
# The fd closing logic doesn't trigger parsing of the stream
self.assertFalse(hasattr(request, '_files'))
def test_file_error_blocking(self):
"""
The server should not block when there are upload errors (bug #8622).
This can happen if something -- i.e. an exception handler -- tries to
access POST while handling an error in parsing POST. This shouldn't
cause an infinite loop!
"""
class POSTAccessingHandler(client.ClientHandler):
"""A handler that'll access POST during an exception."""
def handle_uncaught_exception(self, request, resolver, exc_info):
ret = super().handle_uncaught_exception(request, resolver, exc_info)
request.POST # evaluate
return ret
# Maybe this is a little more complicated that it needs to be; but if
# the django.test.client.FakePayload.read() implementation changes then
# this test would fail. So we need to know exactly what kind of error
# it raises when there is an attempt to read more than the available bytes:
try:
client.FakePayload(b'a').read(2)
except Exception as err:
reference_error = err
# install the custom handler that tries to access request.POST
self.client.handler = POSTAccessingHandler()
with open(__file__, 'rb') as fp:
post_data = {
'name': 'Ringo',
'file_field': fp,
}
try:
self.client.post('/upload_errors/', post_data)
except reference_error.__class__ as err:
self.assertFalse(
str(err) == str(reference_error),
"Caught a repeated exception that'll cause an infinite loop in file uploads."
)
except Exception as err:
# CustomUploadError is the error that should have been raised
self.assertEqual(err.__class__, uploadhandler.CustomUploadError)
def test_filename_case_preservation(self):
"""
The storage backend shouldn't mess with the case of the filenames
uploaded.
"""
# Synthesize the contents of a file upload with a mixed case filename
# so we don't have to carry such a file in the Django tests source code
# tree.
vars = {'boundary': 'oUrBoUnDaRyStRiNg'}
post_data = [
'--%(boundary)s',
'Content-Disposition: form-data; name="file_field"; filename="MiXeD_cAsE.txt"',
'Content-Type: application/octet-stream',
'',
'file contents\n'
'',
'--%(boundary)s--\r\n',
]
response = self.client.post(
'/filename_case/',
'\r\n'.join(post_data) % vars,
'multipart/form-data; boundary=%(boundary)s' % vars
)
self.assertEqual(response.status_code, 200)
id = int(response.content)
obj = FileModel.objects.get(pk=id)
# The name of the file uploaded and the file stored in the server-side
# shouldn't differ.
self.assertEqual(os.path.basename(obj.testfile.path), 'MiXeD_cAsE.txt')
@override_settings(MEDIA_ROOT=MEDIA_ROOT)
class DirectoryCreationTests(SimpleTestCase):
"""
Tests for error handling during directory creation
via _save_FIELD_file (ticket #6450)
"""
@classmethod
def setUpClass(cls):
super().setUpClass()
if not os.path.isdir(MEDIA_ROOT):
os.makedirs(MEDIA_ROOT)
@classmethod
def tearDownClass(cls):
shutil.rmtree(MEDIA_ROOT)
super().tearDownClass()
def setUp(self):
self.obj = FileModel()
@unittest.skipIf(sys.platform == 'win32', "Python on Windows doesn't have working os.chmod().")
def test_readonly_root(self):
"""Permission errors are not swallowed"""
os.chmod(MEDIA_ROOT, 0o500)
self.addCleanup(os.chmod, MEDIA_ROOT, 0o700)
with self.assertRaises(PermissionError):
self.obj.testfile.save('foo.txt', SimpleUploadedFile('foo.txt', b'x'), save=False)
def test_not_a_directory(self):
"""The correct IOError is raised when the upload directory name exists but isn't a directory"""
# Create a file with the upload directory name
open(UPLOAD_TO, 'wb').close()
self.addCleanup(os.remove, UPLOAD_TO)
with self.assertRaises(IOError) as exc_info:
with SimpleUploadedFile('foo.txt', b'x') as file:
self.obj.testfile.save('foo.txt', file, save=False)
# The test needs to be done on a specific string as IOError
# is raised even without the patch (just not early enough)
self.assertEqual(exc_info.exception.args[0], "%s exists and is not a directory." % UPLOAD_TO)
class MultiParserTests(unittest.TestCase):
def test_empty_upload_handlers(self):
# We're not actually parsing here; just checking if the parser properly
# instantiates with empty upload handlers.
MultiPartParser({
'CONTENT_TYPE': 'multipart/form-data; boundary=_foo',
'CONTENT_LENGTH': '1'
}, StringIO('x'), [], 'utf-8')
def test_rfc2231_parsing(self):
test_data = (
(b"Content-Type: application/x-stuff; title*=us-ascii'en-us'This%20is%20%2A%2A%2Afun%2A%2A%2A",
"This is ***fun***"),
(b"Content-Type: application/x-stuff; title*=UTF-8''foo-%c3%a4.html",
"foo-ä.html"),
(b"Content-Type: application/x-stuff; title*=iso-8859-1''foo-%E4.html",
"foo-ä.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
def test_rfc2231_wrong_title(self):
"""
Test wrongly formatted RFC 2231 headers (missing double single quotes).
Parsing should not crash (#24209).
"""
test_data = (
(b"Content-Type: application/x-stuff; title*='This%20is%20%2A%2A%2Afun%2A%2A%2A",
b"'This%20is%20%2A%2A%2Afun%2A%2A%2A"),
(b"Content-Type: application/x-stuff; title*='foo.html",
b"'foo.html"),
(b"Content-Type: application/x-stuff; title*=bar.html",
b"bar.html"),
)
for raw_line, expected_title in test_data:
parsed = parse_header(raw_line)
self.assertEqual(parsed[1]['title'], expected_title)
| bsd-3-clause |
Endika/edx-platform | common/djangoapps/xmodule_django/models.py | 46 | 6250 | """
Useful django models for implementing XBlock infrastructure in django.
"""
import warnings
from django.db import models
from django.core.exceptions import ValidationError
from opaque_keys.edx.keys import CourseKey, UsageKey, BlockTypeKey
class NoneToEmptyManager(models.Manager):
"""
A :class:`django.db.models.Manager` that has a :class:`NoneToEmptyQuerySet`
as its `QuerySet`, initialized with a set of specified `field_names`.
"""
def __init__(self):
"""
Args:
field_names: The list of field names to initialize the :class:`NoneToEmptyQuerySet` with.
"""
super(NoneToEmptyManager, self).__init__()
def get_queryset(self):
"""
Returns the result of NoneToEmptyQuerySet instead of a regular QuerySet.
"""
return NoneToEmptyQuerySet(self.model, using=self._db)
class NoneToEmptyQuerySet(models.query.QuerySet):
"""
A :class:`django.db.query.QuerySet` that replaces `None` values passed to `filter` and `exclude`
with the corresponding `Empty` value for all fields with an `Empty` attribute.
This is to work around Django automatically converting `exact` queries for `None` into
`isnull` queries before the field has a chance to convert them to queries for it's own
empty value.
"""
def _filter_or_exclude(self, *args, **kwargs):
for name in self.model._meta.get_all_field_names():
field_object, _model, direct, _m2m = self.model._meta.get_field_by_name(name)
if direct and hasattr(field_object, 'Empty'):
for suffix in ('', '_exact'):
key = '{}{}'.format(name, suffix)
if key in kwargs and kwargs[key] is None:
kwargs[key] = field_object.Empty
return super(NoneToEmptyQuerySet, self)._filter_or_exclude(*args, **kwargs)
def _strip_object(key):
"""
Strips branch and version info if the given key supports those attributes.
"""
if hasattr(key, 'version_agnostic') and hasattr(key, 'for_branch'):
return key.for_branch(None).version_agnostic()
else:
return key
def _strip_value(value, lookup='exact'):
"""
Helper function to remove the branch and version information from the given value,
which could be a single object or a list.
"""
if lookup == 'in':
stripped_value = [_strip_object(el) for el in value]
else:
stripped_value = _strip_object(value)
return stripped_value
class OpaqueKeyField(models.CharField):
"""
A django field for storing OpaqueKeys.
The baseclass will return the value from the database as a string, rather than an instance
of an OpaqueKey, leaving the application to determine which key subtype to parse the string
as.
Subclasses must specify a KEY_CLASS attribute, in which case the field will use :meth:`from_string`
to parse the key string, and will return an instance of KEY_CLASS.
"""
description = "An OpaqueKey object, saved to the DB in the form of a string."
__metaclass__ = models.SubfieldBase
Empty = object()
KEY_CLASS = None
def __init__(self, *args, **kwargs):
if self.KEY_CLASS is None:
raise ValueError('Must specify KEY_CLASS in OpaqueKeyField subclasses')
super(OpaqueKeyField, self).__init__(*args, **kwargs)
def to_python(self, value):
if value is self.Empty or value is None:
return None
assert isinstance(value, (basestring, self.KEY_CLASS)), \
"%s is not an instance of basestring or %s" % (value, self.KEY_CLASS)
if value == '':
# handle empty string for models being created w/o fields populated
return None
if isinstance(value, basestring):
return self.KEY_CLASS.from_string(value)
else:
return value
def get_prep_lookup(self, lookup, value):
if lookup == 'isnull':
raise TypeError('Use {0}.Empty rather than None to query for a missing {0}'.format(self.__class__.__name__))
return super(OpaqueKeyField, self).get_prep_lookup(
lookup,
# strip key before comparing
_strip_value(value, lookup)
)
def get_prep_value(self, value):
if value is self.Empty or value is None:
return '' # CharFields should use '' as their empty value, rather than None
assert isinstance(value, self.KEY_CLASS), "%s is not an instance of %s" % (value, self.KEY_CLASS)
return unicode(_strip_value(value))
def validate(self, value, model_instance):
"""Validate Empty values, otherwise defer to the parent"""
# raise validation error if the use of this field says it can't be blank but it is
if not self.blank and value is self.Empty:
raise ValidationError(self.error_messages['blank'])
else:
return super(OpaqueKeyField, self).validate(value, model_instance)
def run_validators(self, value):
"""Validate Empty values, otherwise defer to the parent"""
if value is self.Empty:
return
return super(OpaqueKeyField, self).run_validators(value)
class CourseKeyField(OpaqueKeyField):
"""
A django Field that stores a CourseKey object as a string.
"""
description = "A CourseKey object, saved to the DB in the form of a string"
KEY_CLASS = CourseKey
class UsageKeyField(OpaqueKeyField):
"""
A django Field that stores a UsageKey object as a string.
"""
description = "A Location object, saved to the DB in the form of a string"
KEY_CLASS = UsageKey
class LocationKeyField(UsageKeyField):
"""
A django Field that stores a UsageKey object as a string.
"""
def __init__(self, *args, **kwargs):
warnings.warn("LocationKeyField is deprecated. Please use UsageKeyField instead.", stacklevel=2)
super(LocationKeyField, self).__init__(*args, **kwargs)
class BlockTypeKeyField(OpaqueKeyField):
"""
A django Field that stores a BlockTypeKey object as a string.
"""
description = "A BlockTypeKey object, saved to the DB in the form of a string."
KEY_CLASS = BlockTypeKey
| agpl-3.0 |
FreekingDean/home-assistant | homeassistant/components/cover/__init__.py | 5 | 7553 | """
Support for Cover devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/cover/
"""
import os
import logging
import voluptuous as vol
from homeassistant.config import load_yaml_config_file
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA # noqa
import homeassistant.helpers.config_validation as cv
from homeassistant.components import group
from homeassistant.const import (
SERVICE_OPEN_COVER, SERVICE_CLOSE_COVER, SERVICE_SET_COVER_POSITION,
SERVICE_STOP_COVER, SERVICE_OPEN_COVER_TILT, SERVICE_CLOSE_COVER_TILT,
SERVICE_STOP_COVER_TILT, SERVICE_SET_COVER_TILT_POSITION, STATE_OPEN,
STATE_CLOSED, STATE_UNKNOWN, ATTR_ENTITY_ID)
DOMAIN = 'cover'
SCAN_INTERVAL = 15
GROUP_NAME_ALL_COVERS = 'all covers'
ENTITY_ID_ALL_COVERS = group.ENTITY_ID_FORMAT.format('all_covers')
ENTITY_ID_FORMAT = DOMAIN + '.{}'
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_POSITION = 'current_position'
ATTR_CURRENT_TILT_POSITION = 'current_tilt_position'
ATTR_POSITION = 'position'
ATTR_TILT_POSITION = 'tilt_position'
COVER_SERVICE_SCHEMA = vol.Schema({
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
})
COVER_SET_COVER_POSITION_SCHEMA = COVER_SERVICE_SCHEMA.extend({
vol.Required(ATTR_POSITION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=100)),
})
COVER_SET_COVER_TILT_POSITION_SCHEMA = COVER_SERVICE_SCHEMA.extend({
vol.Required(ATTR_TILT_POSITION):
vol.All(vol.Coerce(int), vol.Range(min=0, max=100)),
})
SERVICE_TO_METHOD = {
SERVICE_OPEN_COVER: {'method': 'open_cover'},
SERVICE_CLOSE_COVER: {'method': 'close_cover'},
SERVICE_SET_COVER_POSITION: {
'method': 'set_cover_position',
'schema': COVER_SET_COVER_POSITION_SCHEMA},
SERVICE_STOP_COVER: {'method': 'stop_cover'},
SERVICE_OPEN_COVER_TILT: {'method': 'open_cover_tilt'},
SERVICE_CLOSE_COVER_TILT: {'method': 'close_cover_tilt'},
SERVICE_STOP_COVER_TILT: {'method': 'stop_cover_tilt'},
SERVICE_SET_COVER_TILT_POSITION: {
'method': 'set_cover_tilt_position',
'schema': COVER_SET_COVER_TILT_POSITION_SCHEMA},
}
def is_closed(hass, entity_id=None):
"""Return if the cover is closed based on the statemachine."""
entity_id = entity_id or ENTITY_ID_ALL_COVERS
return hass.states.is_state(entity_id, STATE_CLOSED)
def open_cover(hass, entity_id=None):
"""Open all or specified cover."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_OPEN_COVER, data)
def close_cover(hass, entity_id=None):
"""Close all or specified cover."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_CLOSE_COVER, data)
def set_cover_position(hass, position, entity_id=None):
"""Move to specific position all or specified cover."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_POSITION] = position
hass.services.call(DOMAIN, SERVICE_SET_COVER_POSITION, data)
def stop_cover(hass, entity_id=None):
"""Stop all or specified cover."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_STOP_COVER, data)
def open_cover_tilt(hass, entity_id=None):
"""Open all or specified cover tilt."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_OPEN_COVER_TILT, data)
def close_cover_tilt(hass, entity_id=None):
"""Close all or specified cover tilt."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_CLOSE_COVER_TILT, data)
def set_cover_tilt_position(hass, tilt_position, entity_id=None):
"""Move to specific tilt position all or specified cover."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else {}
data[ATTR_TILT_POSITION] = tilt_position
hass.services.call(DOMAIN, SERVICE_SET_COVER_TILT_POSITION, data)
def stop_cover_tilt(hass, entity_id=None):
"""Stop all or specified cover tilt."""
data = {ATTR_ENTITY_ID: entity_id} if entity_id else None
hass.services.call(DOMAIN, SERVICE_STOP_COVER_TILT, data)
def setup(hass, config):
"""Track states and offer events for covers."""
component = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL, GROUP_NAME_ALL_COVERS)
component.setup(config)
def handle_cover_service(service):
"""Handle calls to the cover services."""
method = SERVICE_TO_METHOD.get(service.service)
params = service.data.copy()
params.pop(ATTR_ENTITY_ID, None)
if not method:
return
covers = component.extract_from_service(service)
for cover in covers:
getattr(cover, method['method'])(**params)
for cover in covers:
if not cover.should_poll:
continue
cover.update_ha_state(True)
descriptions = load_yaml_config_file(
os.path.join(os.path.dirname(__file__), 'services.yaml'))
for service_name in SERVICE_TO_METHOD:
schema = SERVICE_TO_METHOD[service_name].get(
'schema', COVER_SERVICE_SCHEMA)
hass.services.register(DOMAIN, service_name, handle_cover_service,
descriptions.get(service_name), schema=schema)
return True
class CoverDevice(Entity):
"""Representation a cover."""
# pylint: disable=no-self-use
@property
def current_cover_position(self):
"""Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
pass
@property
def current_cover_tilt_position(self):
"""Return current position of cover tilt.
None is unknown, 0 is closed, 100 is fully open.
"""
pass
@property
def state(self):
"""Return the state of the cover."""
closed = self.is_closed
if closed is None:
return STATE_UNKNOWN
return STATE_CLOSED if closed else STATE_OPEN
@property
def state_attributes(self):
"""Return the state attributes."""
data = {}
current = self.current_cover_position
if current is not None:
data[ATTR_CURRENT_POSITION] = self.current_cover_position
current_tilt = self.current_cover_tilt_position
if current_tilt is not None:
data[ATTR_CURRENT_TILT_POSITION] = self.current_cover_tilt_position
return data
@property
def is_closed(self):
"""Return if the cover is closed or not."""
raise NotImplementedError()
def open_cover(self, **kwargs):
"""Open the cover."""
raise NotImplementedError()
def close_cover(self, **kwargs):
"""Close cover."""
raise NotImplementedError()
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
pass
def stop_cover(self, **kwargs):
"""Stop the cover."""
pass
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
pass
def close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
pass
def set_cover_tilt_position(self, **kwargs):
"""Move the cover tilt to a specific position."""
pass
def stop_cover_tilt(self, **kwargs):
"""Stop the cover."""
pass
| mit |
betoesquivel/CIE | flask/lib/python2.7/site-packages/werkzeug/security.py | 146 | 8483 | # -*- coding: utf-8 -*-
"""
werkzeug.security
~~~~~~~~~~~~~~~~~
Security related helpers such as secure password hashing tools.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
import os
import hmac
import hashlib
import posixpath
import codecs
from struct import Struct
from random import SystemRandom
from operator import xor
from itertools import starmap
from werkzeug._compat import range_type, PY2, text_type, izip, to_bytes, \
string_types, to_native
SALT_CHARS = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'
DEFAULT_PBKDF2_ITERATIONS = 1000
_pack_int = Struct('>I').pack
_builtin_safe_str_cmp = getattr(hmac, 'compare_digest', None)
_sys_rng = SystemRandom()
_os_alt_seps = list(sep for sep in [os.path.sep, os.path.altsep]
if sep not in (None, '/'))
def _find_hashlib_algorithms():
algos = getattr(hashlib, 'algorithms', None)
if algos is None:
algos = ('md5', 'sha1', 'sha224', 'sha256', 'sha384', 'sha512')
rv = {}
for algo in algos:
func = getattr(hashlib, algo, None)
if func is not None:
rv[algo] = func
return rv
_hash_funcs = _find_hashlib_algorithms()
def pbkdf2_hex(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Like :func:`pbkdf2_bin` but returns a hex encoded string.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha1.
"""
rv = pbkdf2_bin(data, salt, iterations, keylen, hashfunc)
return to_native(codecs.encode(rv, 'hex_codec'))
def pbkdf2_bin(data, salt, iterations=DEFAULT_PBKDF2_ITERATIONS,
keylen=None, hashfunc=None):
"""Returns a binary digest for the PBKDF2 hash algorithm of `data`
with the given `salt`. It iterates `iterations` time and produces a
key of `keylen` bytes. By default SHA-1 is used as hash function,
a different hashlib `hashfunc` can be provided.
.. versionadded:: 0.9
:param data: the data to derive.
:param salt: the salt for the derivation.
:param iterations: the number of iterations.
:param keylen: the length of the resulting key. If not provided
the digest size will be used.
:param hashfunc: the hash function to use. This can either be the
string name of a known hash function or a function
from the hashlib module. Defaults to sha1.
"""
if isinstance(hashfunc, string_types):
hashfunc = _hash_funcs[hashfunc]
elif not hashfunc:
hashfunc = hashlib.sha1
salt = to_bytes(salt)
mac = hmac.HMAC(to_bytes(data), None, hashfunc)
if not keylen:
keylen = mac.digest_size
def _pseudorandom(x, mac=mac):
h = mac.copy()
h.update(x)
return bytearray(h.digest())
buf = bytearray()
for block in range_type(1, -(-keylen // mac.digest_size) + 1):
rv = u = _pseudorandom(salt + _pack_int(block))
for i in range_type(iterations - 1):
u = _pseudorandom(bytes(u))
rv = bytearray(starmap(xor, izip(rv, u)))
buf.extend(rv)
return bytes(buf[:keylen])
def safe_str_cmp(a, b):
"""This function compares strings in somewhat constant time. This
requires that the length of at least one string is known in advance.
Returns `True` if the two strings are equal or `False` if they are not.
.. versionadded:: 0.7
"""
if isinstance(a, text_type):
a = a.encode('utf-8')
if isinstance(b, text_type):
b = b.encode('utf-8')
if _builtin_safe_str_cmp is not None:
return _builtin_safe_str_cmp(a, b)
if len(a) != len(b):
return False
rv = 0
if PY2:
for x, y in izip(a, b):
rv |= ord(x) ^ ord(y)
else:
for x, y in izip(a, b):
rv |= x ^ y
return rv == 0
def gen_salt(length):
"""Generate a random string of SALT_CHARS with specified ``length``."""
if length <= 0:
raise ValueError('requested salt of length <= 0')
return ''.join(_sys_rng.choice(SALT_CHARS) for _ in range_type(length))
def _hash_internal(method, salt, password):
"""Internal password hash helper. Supports plaintext without salt,
unsalted and salted passwords. In case salted passwords are used
hmac is used.
"""
if method == 'plain':
return password, method
if isinstance(password, text_type):
password = password.encode('utf-8')
if method.startswith('pbkdf2:'):
args = method[7:].split(':')
if len(args) not in (1, 2):
raise ValueError('Invalid number of arguments for PBKDF2')
method = args.pop(0)
iterations = args and int(args[0] or 0) or DEFAULT_PBKDF2_ITERATIONS
is_pbkdf2 = True
actual_method = 'pbkdf2:%s:%d' % (method, iterations)
else:
is_pbkdf2 = False
actual_method = method
hash_func = _hash_funcs.get(method)
if hash_func is None:
raise TypeError('invalid method %r' % method)
if is_pbkdf2:
if not salt:
raise ValueError('Salt is required for PBKDF2')
rv = pbkdf2_hex(password, salt, iterations,
hashfunc=hash_func)
elif salt:
if isinstance(salt, text_type):
salt = salt.encode('utf-8')
rv = hmac.HMAC(salt, password, hash_func).hexdigest()
else:
h = hash_func()
h.update(password)
rv = h.hexdigest()
return rv, actual_method
def generate_password_hash(password, method='pbkdf2:sha1', salt_length=8):
"""Hash a password with the given method and salt with with a string of
the given length. The format of the string returned includes the method
that was used so that :func:`check_password_hash` can check the hash.
The format for the hashed string looks like this::
method$salt$hash
This method can **not** generate unsalted passwords but it is possible
to set the method to plain to enforce plaintext passwords. If a salt
is used, hmac is used internally to salt the password.
If PBKDF2 is wanted it can be enabled by setting the method to
``pbkdf2:method:iterations`` where iterations is optional::
pbkdf2:sha1:2000$salt$hash
pbkdf2:sha1$salt$hash
:param password: the password to hash
:param method: the hash method to use (one that hashlib supports), can
optionally be in the format ``pbpdf2:<method>[:iterations]``
to enable PBKDF2.
:param salt_length: the length of the salt in letters
"""
salt = method != 'plain' and gen_salt(salt_length) or ''
h, actual_method = _hash_internal(method, salt, password)
return '%s$%s$%s' % (actual_method, salt, h)
def check_password_hash(pwhash, password):
"""check a password against a given salted and hashed password value.
In order to support unsalted legacy passwords this method supports
plain text passwords, md5 and sha1 hashes (both salted and unsalted).
Returns `True` if the password matched, `False` otherwise.
:param pwhash: a hashed string like returned by
:func:`generate_password_hash`
:param password: the plaintext password to compare against the hash
"""
if pwhash.count('$') < 2:
return False
method, salt, hashval = pwhash.split('$', 2)
return safe_str_cmp(_hash_internal(method, salt, password)[0], hashval)
def safe_join(directory, filename):
"""Safely join `directory` and `filename`. If this cannot be done,
this function returns ``None``.
:param directory: the base directory.
:param filename: the untrusted filename relative to that directory.
"""
filename = posixpath.normpath(filename)
for sep in _os_alt_seps:
if sep in filename:
return None
if os.path.isabs(filename) or filename.startswith('../'):
return None
return os.path.join(directory, filename)
| mit |
axbaretto/beam | sdks/python/apache_beam/transforms/combiners.py | 5 | 29015 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""A library of basic combiner PTransform subclasses."""
# pytype: skip-file
import copy
import heapq
import operator
import random
from typing import Any
from typing import Dict
from typing import Iterable
from typing import List
from typing import Set
from typing import Tuple
from typing import TypeVar
from typing import Union
from apache_beam import typehints
from apache_beam.transforms import core
from apache_beam.transforms import cy_combiners
from apache_beam.transforms import ptransform
from apache_beam.transforms import window
from apache_beam.transforms.display import DisplayDataItem
from apache_beam.typehints import with_input_types
from apache_beam.typehints import with_output_types
from apache_beam.utils.timestamp import Duration
from apache_beam.utils.timestamp import Timestamp
__all__ = [
'Count', 'Mean', 'Sample', 'Top', 'ToDict', 'ToList', 'ToSet', 'Latest'
]
# Type variables
T = TypeVar('T')
K = TypeVar('K')
V = TypeVar('V')
TimestampType = Union[int, float, Timestamp, Duration]
class CombinerWithoutDefaults(ptransform.PTransform):
"""Super class to inherit without_defaults to built-in Combiners."""
def __init__(self, has_defaults=True):
super(CombinerWithoutDefaults, self).__init__()
self.has_defaults = has_defaults
def with_defaults(self, has_defaults=True):
new = copy.copy(self)
new.has_defaults = has_defaults
return new
def without_defaults(self):
return self.with_defaults(False)
class Mean(object):
"""Combiners for computing arithmetic means of elements."""
class Globally(CombinerWithoutDefaults):
"""combiners.Mean.Globally computes the arithmetic mean of the elements."""
def expand(self, pcoll):
if self.has_defaults:
return pcoll | core.CombineGlobally(MeanCombineFn())
else:
return pcoll | core.CombineGlobally(MeanCombineFn()).without_defaults()
class PerKey(ptransform.PTransform):
"""combiners.Mean.PerKey finds the means of the values for each key."""
def expand(self, pcoll):
return pcoll | core.CombinePerKey(MeanCombineFn())
# TODO(laolu): This type signature is overly restrictive. This should be
# more general.
@with_input_types(Union[float, int])
@with_output_types(float)
class MeanCombineFn(core.CombineFn):
"""CombineFn for computing an arithmetic mean."""
def create_accumulator(self):
return (0, 0)
def add_input(self, sum_count, element):
(sum_, count) = sum_count
return sum_ + element, count + 1
def merge_accumulators(self, accumulators):
sums, counts = zip(*accumulators)
return sum(sums), sum(counts)
def extract_output(self, sum_count):
(sum_, count) = sum_count
if count == 0:
return float('NaN')
return sum_ / float(count)
def for_input_type(self, input_type):
if input_type is int:
return cy_combiners.MeanInt64Fn()
elif input_type is float:
return cy_combiners.MeanFloatFn()
return self
class Count(object):
"""Combiners for counting elements."""
class Globally(CombinerWithoutDefaults):
"""combiners.Count.Globally counts the total number of elements."""
def expand(self, pcoll):
if self.has_defaults:
return pcoll | core.CombineGlobally(CountCombineFn())
else:
return pcoll | core.CombineGlobally(CountCombineFn()).without_defaults()
class PerKey(ptransform.PTransform):
"""combiners.Count.PerKey counts how many elements each unique key has."""
def expand(self, pcoll):
return pcoll | core.CombinePerKey(CountCombineFn())
class PerElement(ptransform.PTransform):
"""combiners.Count.PerElement counts how many times each element occurs."""
def expand(self, pcoll):
paired_with_void_type = typehints.Tuple[pcoll.element_type, Any]
output_type = typehints.KV[pcoll.element_type, int]
return (
pcoll
| (
'%s:PairWithVoid' % self.label >> core.Map(
lambda x: (x, None)).with_output_types(paired_with_void_type))
| core.CombinePerKey(CountCombineFn()).with_output_types(output_type))
@with_input_types(Any)
@with_output_types(int)
class CountCombineFn(core.CombineFn):
"""CombineFn for computing PCollection size."""
def create_accumulator(self):
return 0
def add_input(self, accumulator, element):
return accumulator + 1
def add_inputs(self, accumulator, elements):
return accumulator + len(list(elements))
def merge_accumulators(self, accumulators):
return sum(accumulators)
def extract_output(self, accumulator):
return accumulator
class Top(object):
"""Combiners for obtaining extremal elements."""
# pylint: disable=no-self-argument
class Of(CombinerWithoutDefaults):
"""Obtain a list of the compare-most N elements in a PCollection.
This transform will retrieve the n greatest elements in the PCollection
to which it is applied, where "greatest" is determined by the comparator
function supplied as the compare argument.
"""
def __init__(self, n, key=None, reverse=False):
"""Creates a global Top operation.
The arguments 'key' and 'reverse' may be passed as keyword arguments,
and have the same meaning as for Python's sort functions.
Args:
n: number of elements to extract from pcoll.
key: (optional) a mapping of elements to a comparable key, similar to
the key argument of Python's sorting methods.
reverse: (optional) whether to order things smallest to largest, rather
than largest to smallest
"""
super(Top.Of, self).__init__()
self._n = n
self._key = key
self._reverse = reverse
def default_label(self):
return 'Top(%d)' % self._n
def expand(self, pcoll):
if pcoll.windowing.is_default():
# This is a more efficient global algorithm.
top_per_bundle = pcoll | core.ParDo(
_TopPerBundle(self._n, self._key, self._reverse))
# If pcoll is empty, we can't guerentee that top_per_bundle
# won't be empty, so inject at least one empty accumulator
# so that downstream is guerenteed to produce non-empty output.
empty_bundle = pcoll.pipeline | core.Create([(None, [])])
return ((top_per_bundle, empty_bundle) | core.Flatten()
| core.GroupByKey()
| core.ParDo(
_MergeTopPerBundle(self._n, self._key, self._reverse)))
else:
if self.has_defaults:
return pcoll | core.CombineGlobally(
TopCombineFn(self._n, self._key, self._reverse))
else:
return pcoll | core.CombineGlobally(
TopCombineFn(self._n, self._key,
self._reverse)).without_defaults()
class PerKey(ptransform.PTransform):
"""Identifies the compare-most N elements associated with each key.
This transform will produce a PCollection mapping unique keys in the input
PCollection to the n greatest elements with which they are associated, where
"greatest" is determined by the comparator function supplied as the compare
argument in the initializer.
"""
def __init__(self, n, key=None, reverse=False):
"""Creates a per-key Top operation.
The arguments 'key' and 'reverse' may be passed as keyword arguments,
and have the same meaning as for Python's sort functions.
Args:
n: number of elements to extract from pcoll.
key: (optional) a mapping of elements to a comparable key, similar to
the key argument of Python's sorting methods.
reverse: (optional) whether to order things smallest to largest, rather
than largest to smallest
"""
self._n = n
self._key = key
self._reverse = reverse
def default_label(self):
return 'TopPerKey(%d)' % self._n
def expand(self, pcoll):
"""Expands the transform.
Raises TypeCheckError: If the output type of the input PCollection is not
compatible with Tuple[A, B].
Args:
pcoll: PCollection to process
Returns:
the PCollection containing the result.
"""
return pcoll | core.CombinePerKey(
TopCombineFn(self._n, self._key, self._reverse))
@staticmethod
@ptransform.ptransform_fn
def Largest(pcoll, n, has_defaults=True):
"""Obtain a list of the greatest N elements in a PCollection."""
if has_defaults:
return pcoll | Top.Of(n)
else:
return pcoll | Top.Of(n).without_defaults()
@staticmethod
@ptransform.ptransform_fn
def Smallest(pcoll, n, has_defaults=True):
"""Obtain a list of the least N elements in a PCollection."""
if has_defaults:
return pcoll | Top.Of(n, reverse=True)
else:
return pcoll | Top.Of(n, reverse=True).without_defaults()
@staticmethod
@ptransform.ptransform_fn
def LargestPerKey(pcoll, n):
"""Identifies the N greatest elements associated with each key."""
return pcoll | Top.PerKey(n)
@staticmethod
@ptransform.ptransform_fn
def SmallestPerKey(pcoll, n, reverse=True):
"""Identifies the N least elements associated with each key."""
return pcoll | Top.PerKey(n, reverse=True)
@with_input_types(T)
@with_output_types(Tuple[None, List[T]])
class _TopPerBundle(core.DoFn):
def __init__(self, n, key, reverse):
self._n = n
self._compare = operator.gt if reverse else None
self._key = key
def start_bundle(self):
self._heap = []
def process(self, element):
if self._compare or self._key:
element = cy_combiners.ComparableValue(element, self._compare, self._key)
if len(self._heap) < self._n:
heapq.heappush(self._heap, element)
else:
heapq.heappushpop(self._heap, element)
def finish_bundle(self):
# Though sorting here results in more total work, this allows us to
# skip most elements in the reducer.
# Essentially, given s map bundles, we are trading about O(sn) compares in
# the (single) reducer for O(sn log n) compares across all mappers.
self._heap.sort()
# Unwrap to avoid serialization via pickle.
if self._compare or self._key:
yield window.GlobalWindows.windowed_value(
(None, [wrapper.value for wrapper in self._heap]))
else:
yield window.GlobalWindows.windowed_value((None, self._heap))
@with_input_types(Tuple[None, Iterable[List[T]]])
@with_output_types(List[T])
class _MergeTopPerBundle(core.DoFn):
def __init__(self, n, key, reverse):
self._n = n
self._compare = operator.gt if reverse else None
self._key = key
def process(self, key_and_bundles):
_, bundles = key_and_bundles
def push(hp, e):
if len(hp) < self._n:
heapq.heappush(hp, e)
return False
elif e < hp[0]:
# Because _TopPerBundle returns sorted lists, all other elements
# will also be smaller.
return True
else:
heapq.heappushpop(hp, e)
return False
if self._compare or self._key:
heapc = [] # type: List[cy_combiners.ComparableValue]
for bundle in bundles:
if not heapc:
heapc = [
cy_combiners.ComparableValue(element, self._compare, self._key)
for element in bundle
]
continue
for element in reversed(bundle):
if push(heapc,
cy_combiners.ComparableValue(element,
self._compare,
self._key)):
break
heapc.sort()
yield [wrapper.value for wrapper in reversed(heapc)]
else:
heap = []
for bundle in bundles:
if not heap:
heap = bundle
continue
for element in reversed(bundle):
if push(heap, element):
break
heap.sort()
yield heap[::-1]
@with_input_types(T)
@with_output_types(List[T])
class TopCombineFn(core.CombineFn):
"""CombineFn doing the combining for all of the Top transforms.
This CombineFn uses a key or comparison operator to rank the elements.
Args:
key: (optional) a mapping of elements to a comparable key, similar to
the key argument of Python's sorting methods.
reverse: (optional) whether to order things smallest to largest, rather
than largest to smallest
"""
def __init__(self, n, key=None, reverse=False):
self._n = n
self._compare = operator.gt if reverse else operator.lt
self._key = key
def _hydrated_heap(self, heap):
if heap:
first = heap[0]
if isinstance(first, cy_combiners.ComparableValue):
if first.requires_hydration:
for comparable in heap:
assert comparable.requires_hydration
comparable.hydrate(self._compare, self._key)
assert not comparable.requires_hydration
return heap
else:
return heap
else:
return [
cy_combiners.ComparableValue(element, self._compare, self._key)
for element in heap
]
else:
return heap
def display_data(self):
return {
'n': self._n,
'compare': DisplayDataItem(
self._compare.__name__ if hasattr(self._compare, '__name__') else
self._compare.__class__.__name__).drop_if_none()
}
# The accumulator type is a tuple
# (bool, Union[List[T], List[ComparableValue[T]])
# where the boolean indicates whether the second slot contains a List of T
# (False) or List of ComparableValue[T] (True). In either case, the List
# maintains heap invariance. When the contents of the List are
# ComparableValue[T] they either all 'requires_hydration' or none do.
# This accumulator representation allows us to minimize the data encoding
# overheads. Creation of ComparableValues is elided for performance reasons
# when there is no need for complicated comparison functions.
def create_accumulator(self, *args, **kwargs):
return (False, [])
def add_input(self, accumulator, element, *args, **kwargs):
# Caching to avoid paying the price of variadic expansion of args / kwargs
# when it's not needed (for the 'if' case below).
holds_comparables, heap = accumulator
if self._compare is not operator.lt or self._key:
heap = self._hydrated_heap(heap)
holds_comparables = True
else:
assert not holds_comparables
comparable = (
cy_combiners.ComparableValue(element, self._compare, self._key)
if holds_comparables else element)
if len(heap) < self._n:
heapq.heappush(heap, comparable)
else:
heapq.heappushpop(heap, comparable)
return (holds_comparables, heap)
def merge_accumulators(self, accumulators, *args, **kwargs):
result_heap = None
holds_comparables = None
for accumulator in accumulators:
holds_comparables, heap = accumulator
if self._compare is not operator.lt or self._key:
heap = self._hydrated_heap(heap)
holds_comparables = True
else:
assert not holds_comparables
if result_heap is None:
result_heap = heap
else:
for comparable in heap:
_, result_heap = self.add_input(
(holds_comparables, result_heap),
comparable.value if holds_comparables else comparable)
assert result_heap is not None and holds_comparables is not None
return (holds_comparables, result_heap)
def compact(self, accumulator, *args, **kwargs):
holds_comparables, heap = accumulator
# Unwrap to avoid serialization via pickle.
if holds_comparables:
return (False, [comparable.value for comparable in heap])
else:
return accumulator
def extract_output(self, accumulator, *args, **kwargs):
holds_comparables, heap = accumulator
if self._compare is not operator.lt or self._key:
if not holds_comparables:
heap = self._hydrated_heap(heap)
holds_comparables = True
else:
assert not holds_comparables
assert len(heap) <= self._n
heap.sort(reverse=True)
return [
comparable.value if holds_comparables else comparable
for comparable in heap
]
class Largest(TopCombineFn):
def default_label(self):
return 'Largest(%s)' % self._n
class Smallest(TopCombineFn):
def __init__(self, n):
super(Smallest, self).__init__(n, reverse=True)
def default_label(self):
return 'Smallest(%s)' % self._n
class Sample(object):
"""Combiners for sampling n elements without replacement."""
# pylint: disable=no-self-argument
class FixedSizeGlobally(CombinerWithoutDefaults):
"""Sample n elements from the input PCollection without replacement."""
def __init__(self, n):
super(Sample.FixedSizeGlobally, self).__init__()
self._n = n
def expand(self, pcoll):
if self.has_defaults:
return pcoll | core.CombineGlobally(SampleCombineFn(self._n))
else:
return pcoll | core.CombineGlobally(SampleCombineFn(
self._n)).without_defaults()
def display_data(self):
return {'n': self._n}
def default_label(self):
return 'FixedSizeGlobally(%d)' % self._n
class FixedSizePerKey(ptransform.PTransform):
"""Sample n elements associated with each key without replacement."""
def __init__(self, n):
self._n = n
def expand(self, pcoll):
return pcoll | core.CombinePerKey(SampleCombineFn(self._n))
def display_data(self):
return {'n': self._n}
def default_label(self):
return 'FixedSizePerKey(%d)' % self._n
@with_input_types(T)
@with_output_types(List[T])
class SampleCombineFn(core.CombineFn):
"""CombineFn for all Sample transforms."""
def __init__(self, n):
super(SampleCombineFn, self).__init__()
# Most of this combiner's work is done by a TopCombineFn. We could just
# subclass TopCombineFn to make this class, but since sampling is not
# really a kind of Top operation, we use a TopCombineFn instance as a
# helper instead.
self._top_combiner = TopCombineFn(n)
def setup(self):
self._top_combiner.setup()
def create_accumulator(self):
return self._top_combiner.create_accumulator()
def add_input(self, heap, element):
# Before passing elements to the Top combiner, we pair them with random
# numbers. The elements with the n largest random number "keys" will be
# selected for the output.
return self._top_combiner.add_input(heap, (random.random(), element))
def merge_accumulators(self, heaps):
return self._top_combiner.merge_accumulators(heaps)
def compact(self, heap):
return self._top_combiner.compact(heap)
def extract_output(self, heap):
# Here we strip off the random number keys we added in add_input.
return [e for _, e in self._top_combiner.extract_output(heap)]
def teardown(self):
self._top_combiner.teardown()
class _TupleCombineFnBase(core.CombineFn):
def __init__(self, *combiners):
self._combiners = [core.CombineFn.maybe_from_callable(c) for c in combiners]
self._named_combiners = combiners
def display_data(self):
combiners = [
c.__name__ if hasattr(c, '__name__') else c.__class__.__name__
for c in self._named_combiners
]
return {'combiners': str(combiners)}
def setup(self, *args, **kwargs):
for c in self._combiners:
c.setup(*args, **kwargs)
def create_accumulator(self, *args, **kwargs):
return [c.create_accumulator(*args, **kwargs) for c in self._combiners]
def merge_accumulators(self, accumulators, *args, **kwargs):
return [
c.merge_accumulators(a, *args, **kwargs) for c,
a in zip(self._combiners, zip(*accumulators))
]
def compact(self, accumulator, *args, **kwargs):
return [
c.compact(a, *args, **kwargs) for c,
a in zip(self._combiners, accumulator)
]
def extract_output(self, accumulator, *args, **kwargs):
return tuple([
c.extract_output(a, *args, **kwargs) for c,
a in zip(self._combiners, accumulator)
])
def teardown(self, *args, **kwargs):
for c in reversed(self._combiners):
c.teardown(*args, **kwargs)
class TupleCombineFn(_TupleCombineFnBase):
"""A combiner for combining tuples via a tuple of combiners.
Takes as input a tuple of N CombineFns and combines N-tuples by
combining the k-th element of each tuple with the k-th CombineFn,
outputting a new N-tuple of combined values.
"""
def add_input(self, accumulator, element, *args, **kwargs):
return [
c.add_input(a, e, *args, **kwargs) for c,
a,
e in zip(self._combiners, accumulator, element)
]
def with_common_input(self):
return SingleInputTupleCombineFn(*self._combiners)
class SingleInputTupleCombineFn(_TupleCombineFnBase):
"""A combiner for combining a single value via a tuple of combiners.
Takes as input a tuple of N CombineFns and combines elements by
applying each CombineFn to each input, producing an N-tuple of
the outputs corresponding to each of the N CombineFn's outputs.
"""
def add_input(self, accumulator, element, *args, **kwargs):
return [
c.add_input(a, element, *args, **kwargs) for c,
a in zip(self._combiners, accumulator)
]
class ToList(CombinerWithoutDefaults):
"""A global CombineFn that condenses a PCollection into a single list."""
def expand(self, pcoll):
if self.has_defaults:
return pcoll | self.label >> core.CombineGlobally(ToListCombineFn())
else:
return pcoll | self.label >> core.CombineGlobally(
ToListCombineFn()).without_defaults()
@with_input_types(T)
@with_output_types(List[T])
class ToListCombineFn(core.CombineFn):
"""CombineFn for to_list."""
def create_accumulator(self):
return []
def add_input(self, accumulator, element):
accumulator.append(element)
return accumulator
def merge_accumulators(self, accumulators):
return sum(accumulators, [])
def extract_output(self, accumulator):
return accumulator
class ToDict(CombinerWithoutDefaults):
"""A global CombineFn that condenses a PCollection into a single dict.
PCollections should consist of 2-tuples, notionally (key, value) pairs.
If multiple values are associated with the same key, only one of the values
will be present in the resulting dict.
"""
def expand(self, pcoll):
if self.has_defaults:
return pcoll | self.label >> core.CombineGlobally(ToDictCombineFn())
else:
return pcoll | self.label >> core.CombineGlobally(
ToDictCombineFn()).without_defaults()
@with_input_types(Tuple[K, V])
@with_output_types(Dict[K, V])
class ToDictCombineFn(core.CombineFn):
"""CombineFn for to_dict."""
def create_accumulator(self):
return dict()
def add_input(self, accumulator, element):
key, value = element
accumulator[key] = value
return accumulator
def merge_accumulators(self, accumulators):
result = dict()
for a in accumulators:
result.update(a)
return result
def extract_output(self, accumulator):
return accumulator
class ToSet(CombinerWithoutDefaults):
"""A global CombineFn that condenses a PCollection into a set."""
def expand(self, pcoll):
if self.has_defaults:
return pcoll | self.label >> core.CombineGlobally(ToSetCombineFn())
else:
return pcoll | self.label >> core.CombineGlobally(
ToSetCombineFn()).without_defaults()
@with_input_types(T)
@with_output_types(Set[T])
class ToSetCombineFn(core.CombineFn):
"""CombineFn for ToSet."""
def create_accumulator(self):
return set()
def add_input(self, accumulator, element):
accumulator.add(element)
return accumulator
def merge_accumulators(self, accumulators):
return set.union(*accumulators)
def extract_output(self, accumulator):
return accumulator
class _CurriedFn(core.CombineFn):
"""Wrapped CombineFn with extra arguments."""
def __init__(self, fn, args, kwargs):
self.fn = fn
self.args = args
self.kwargs = kwargs
def setup(self):
self.fn.setup(*self.args, **self.kwargs)
def create_accumulator(self):
return self.fn.create_accumulator(*self.args, **self.kwargs)
def add_input(self, accumulator, element):
return self.fn.add_input(accumulator, element, *self.args, **self.kwargs)
def merge_accumulators(self, accumulators):
return self.fn.merge_accumulators(accumulators, *self.args, **self.kwargs)
def compact(self, accumulator):
return self.fn.compact(accumulator, *self.args, **self.kwargs)
def extract_output(self, accumulator):
return self.fn.extract_output(accumulator, *self.args, **self.kwargs)
def teardown(self):
self.fn.teardown(*self.args, **self.kwargs)
def apply(self, elements):
return self.fn.apply(elements, *self.args, **self.kwargs)
def curry_combine_fn(fn, args, kwargs):
if not args and not kwargs:
return fn
else:
return _CurriedFn(fn, args, kwargs)
class PhasedCombineFnExecutor(object):
"""Executor for phases of combine operations."""
def __init__(self, phase, fn, args, kwargs):
self.combine_fn = curry_combine_fn(fn, args, kwargs)
if phase == 'all':
self.apply = self.full_combine
elif phase == 'add':
self.apply = self.add_only
elif phase == 'merge':
self.apply = self.merge_only
elif phase == 'extract':
self.apply = self.extract_only
elif phase == 'convert':
self.apply = self.convert_to_accumulator
else:
raise ValueError('Unexpected phase: %s' % phase)
def full_combine(self, elements):
return self.combine_fn.apply(elements)
def add_only(self, elements):
return self.combine_fn.add_inputs(
self.combine_fn.create_accumulator(), elements)
def merge_only(self, accumulators):
return self.combine_fn.merge_accumulators(accumulators)
def extract_only(self, accumulator):
return self.combine_fn.extract_output(accumulator)
def convert_to_accumulator(self, element):
return self.combine_fn.add_input(
self.combine_fn.create_accumulator(), element)
class Latest(object):
"""Combiners for computing the latest element"""
@with_input_types(T)
@with_output_types(T)
class Globally(CombinerWithoutDefaults):
"""Compute the element with the latest timestamp from a
PCollection."""
@staticmethod
def add_timestamp(element, timestamp=core.DoFn.TimestampParam):
return [(element, timestamp)]
def expand(self, pcoll):
if self.has_defaults:
return (
pcoll
| core.ParDo(self.add_timestamp).with_output_types(
Tuple[T, TimestampType])
| core.CombineGlobally(LatestCombineFn()))
else:
return (
pcoll
| core.ParDo(self.add_timestamp).with_output_types(
Tuple[T, TimestampType])
| core.CombineGlobally(LatestCombineFn()).without_defaults())
@with_input_types(Tuple[K, V])
@with_output_types(Tuple[K, V])
class PerKey(ptransform.PTransform):
"""Compute elements with the latest timestamp for each key
from a keyed PCollection"""
@staticmethod
def add_timestamp(element, timestamp=core.DoFn.TimestampParam):
key, value = element
return [(key, (value, timestamp))]
def expand(self, pcoll):
return (
pcoll
| core.ParDo(self.add_timestamp).with_output_types(
Tuple[K, Tuple[T, TimestampType]])
| core.CombinePerKey(LatestCombineFn()))
@with_input_types(Tuple[T, TimestampType])
@with_output_types(T)
class LatestCombineFn(core.CombineFn):
"""CombineFn to get the element with the latest timestamp
from a PCollection."""
def create_accumulator(self):
return (None, window.MIN_TIMESTAMP)
def add_input(self, accumulator, element):
if accumulator[1] > element[1]:
return accumulator
else:
return element
def merge_accumulators(self, accumulators):
result = self.create_accumulator()
for accumulator in accumulators:
result = self.add_input(result, accumulator)
return result
def extract_output(self, accumulator):
return accumulator[0]
| apache-2.0 |
oy-vey/algorithms-and-data-structures | 2-DataStructures/Week3/make_heap/build_heap.py | 1 | 1729 | # python3
class HeapBuilder:
def __init__(self):
self._swaps = []
self._data = []
def ReadData(self):
n = int(input())
self._data = [int(s) for s in input().split()]
assert n == len(self._data)
def WriteResponse(self):
print(len(self._swaps))
for swap in self._swaps:
print(swap[0], swap[1])
def SiftDown(self, i):
size = len(self._data)
minIndex = i
l = 2*i + 1
if l < size and self._data[l] < self._data[minIndex]:
minIndex = l
r = 2*i + 2
if r < size and self._data[r] < self._data[minIndex]:
minIndex = r
if i != minIndex:
self._swaps.append((i, minIndex))
self._data[i], self._data[minIndex] = self._data[minIndex], self._data[i]
self.SiftDown(minIndex)
def GenerateSwaps(self):
# The following naive implementation just sorts
# the given sequence using selection sort algorithm
# and saves the resulting sequence of swaps.
# This turns the given array into a heap,
# but in the worst case gives a quadratic number of swaps.
#
# for i in range(len(self._data)):
# for j in range(i + 1, len(self._data)):
# if self._data[i] > self._data[j]:
# self._swaps.append((i, j))
# self._data[i], self._data[j] = self._data[j], self._data[i]
size = len(self._data)
for i in range((size // 2) - 1, -1, -1):
self.SiftDown(i)
def Solve(self):
self.ReadData()
self.GenerateSwaps()
self.WriteResponse()
if __name__ == '__main__':
heap_builder = HeapBuilder()
heap_builder.Solve()
| mit |
sameetb-cuelogic/edx-platform-test | common/djangoapps/student/management/commands/assigngroups.py | 170 | 3059 | from django.core.management.base import BaseCommand
from django.contrib.auth.models import User
from student.models import UserTestGroup
import random
import sys
import datetime
from textwrap import dedent
import json
from pytz import UTC
def group_from_value(groups, v):
''' Given group: (('a',0.3),('b',0.4),('c',0.3)) And random value
in [0,1], return the associated group (in the above case, return
'a' if v<0.3, 'b' if 0.3<=v<0.7, and 'c' if v>0.7
'''
sum = 0
for (g, p) in groups:
sum = sum + p
if sum > v:
return g
return g # For round-off errors
class Command(BaseCommand):
help = dedent("""\
Assign users to test groups. Takes a list of groups:
a:0.3,b:0.4,c:0.3 file.txt "Testing something"
Will assign each user to group a, b, or c with
probability 0.3, 0.4, 0.3. Probabilities must
add up to 1.
Will log what happened to file.txt.
""")
def handle(self, *args, **options):
if len(args) != 3:
print "Invalid number of options"
sys.exit(-1)
# Extract groups from string
group_strs = [x.split(':') for x in args[0].split(',')]
groups = [(group, float(value)) for group, value in group_strs]
print "Groups", groups
## Confirm group probabilities add up to 1
total = sum(zip(*groups)[1])
print "Total:", total
if abs(total - 1) > 0.01:
print "Total not 1"
sys.exit(-1)
## Confirm groups don't already exist
for group in dict(groups):
if UserTestGroup.objects.filter(name=group).count() != 0:
print group, "already exists!"
sys.exit(-1)
group_objects = {}
f = open(args[1], "a+")
## Create groups
for group in dict(groups):
utg = UserTestGroup()
utg.name = group
utg.description = json.dumps({"description": args[2]},
{"time": datetime.datetime.now(UTC).isoformat()})
group_objects[group] = utg
group_objects[group].save()
## Assign groups
users = list(User.objects.all())
count = 0
for user in users:
if count % 1000 == 0:
print count
count = count + 1
v = random.uniform(0, 1)
group = group_from_value(groups, v)
group_objects[group].users.add(user)
f.write(u"Assigned user {name} ({id}) to {group}\n".format(
name=user.username,
id=user.id,
group=group
).encode('utf-8'))
## Save groups
for group in group_objects:
group_objects[group].save()
f.close()
# python manage.py assigngroups summary_test:0.3,skip_summary_test:0.7 log.txt "Do previews of future materials help?"
# python manage.py assigngroups skip_capacitor:0.3,capacitor:0.7 log.txt "Do we show capacitor in linearity tutorial?"
| agpl-3.0 |
markoshorro/gem5 | src/arch/x86/isa/insts/general_purpose/rotate_and_shift/shift.py | 91 | 6764 | # Copyright (c) 2007 The Hewlett-Packard Development Company
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Gabe Black
microcode = '''
def macroop SAL_R_I
{
slli reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAL_M_I
{
ldst t1, seg, sib, disp
slli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAL_P_I
{
rdip t7
ldst t1, seg, riprel, disp
slli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAL_1_R
{
slli reg, reg, 1, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAL_1_M
{
ldst t1, seg, sib, disp
slli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAL_1_P
{
rdip t7
ldst t1, seg, riprel, disp
slli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAL_R_R
{
sll reg, reg, regm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAL_M_R
{
ldst t1, seg, sib, disp
sll t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAL_P_R
{
rdip t7
ldst t1, seg, riprel, disp
sll t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHLD_R_R
{
mdbi regm, 0
sld reg, reg, rcx, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHLD_M_R
{
ldst t1, seg, sib, disp
mdbi reg, 0
sld t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHLD_P_R
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
sld t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHLD_R_R_I
{
mdbi regm, 0
sldi reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHLD_M_R_I
{
ldst t1, seg, sib, disp
mdbi reg, 0
sldi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHLD_P_R_I
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
sldi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHR_R_I
{
srli reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHR_M_I
{
ldst t1, seg, sib, disp
srli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHR_P_I
{
rdip t7
ldst t1, seg, riprel, disp
srli t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHR_1_R
{
srli reg, reg, 1, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHR_1_M
{
ldst t1, seg, sib, disp
srli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHR_1_P
{
rdip t7
ldst t1, seg, riprel, disp
srli t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHR_R_R
{
srl reg, reg, regm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHR_M_R
{
ldst t1, seg, sib, disp
srl t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
srl t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHRD_R_R
{
mdbi regm, 0
srd reg, reg, rcx, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHRD_M_R
{
ldst t1, seg, sib, disp
mdbi reg, 0
srd t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHRD_P_R
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
srd t1, t1, rcx, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SHRD_R_R_I
{
mdbi regm, 0
srdi reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SHRD_M_R_I
{
ldst t1, seg, sib, disp
mdbi reg, 0
srdi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SHRD_P_R_I
{
rdip t7
ldst t1, seg, riprel, disp
mdbi reg, 0
srdi t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAR_R_I
{
srai reg, reg, imm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAR_M_I
{
ldst t1, seg, sib, disp
srai t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAR_P_I
{
rdip t7
ldst t1, seg, riprel, disp
srai t1, t1, imm, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAR_1_R
{
srai reg, reg, 1, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAR_1_M
{
ldst t1, seg, sib, disp
srai t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAR_1_P
{
rdip t7
ldst t1, seg, riprel, disp
srai t1, t1, 1, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
def macroop SAR_R_R
{
sra reg, reg, regm, flags=(CF,OF,SF,ZF,PF)
};
def macroop SAR_M_R
{
ldst t1, seg, sib, disp
sra t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, sib, disp
};
def macroop SAR_P_R
{
rdip t7
ldst t1, seg, riprel, disp
sra t1, t1, reg, flags=(CF,OF,SF,ZF,PF)
st t1, seg, riprel, disp
};
'''
| bsd-3-clause |
schenkd/webdev-project | app/main/forms.py | 1 | 2869 | # ~*~ encoding: utf-8 ~*~
from flask_wtf import FlaskForm
from wtforms import StringField, SubmitField, TextAreaField, IntegerField, SelectField
from wtforms.validators import DataRequired, Email, ValidationError
from datetime import datetime
from app.models import Drug
# Generiert ein Liste von Tupeln aus einem integer und einem string
def generate_int_tupel_list(number):
liste = list()
for x in range(1, number+1):
liste.append((x, str(x)))
return liste
def get_date():
date = datetime.date(datetime.utcnow())
return {'year': date.year, 'month': date.month, 'day': date.day}
choices = [(None, ''),
('Produktionsprobleme', 'Produktionsprobleme'),
('Hersteller wechsel', 'Hersteller wechsel'),
('Änderung des Herstellungsverfahrens', 'Änderung des Herstellungsverfahrens'),
('Unzureichende Produktionskapazitäten', 'Unzureichende Produktionskapazitäten'),
('GMP-Mängel', 'GMP-Mängel'),
('Probleme bei der Endfreigabe', 'Probleme bei der Endfreigabe')]
boolean = [(False, 'Nein'),
(True, 'Ja')]
day = generate_int_tupel_list(31)
month = generate_int_tupel_list(12)
year = [(2017, '2017'),
(2018, '2018'),
(2019, '2019')]
classified = [(0, 'keine Klassifizierung'),
(1, 'versorgungsrelevant'),
(2, 'versorgungsgefährdend')]
class EngpassForm(FlaskForm):
enr = IntegerField('ENR', validators=[DataRequired()])
pzn = IntegerField('PZN')
alternative = SelectField('Alternativepräperate', choices=boolean)
inform_expert_group = SelectField('Info an Fachkreise', choices=boolean)
day = SelectField('Tag', choices=day, default=get_date()['day'])
month = SelectField('Monat', choices=month, default=get_date()['month'])
year = SelectField('Jahr', choices=year, default=get_date()['year'])
reason = SelectField('Grund für den Lieferengpass', choices=choices)
other_reasons = TextAreaField('Sonstige Gründe')
telephone = StringField('Telefon')
email = StringField('Email')
submit = SubmitField('Melden')
def validate_enr(self, field):
print('VALIDATION')
if not Drug.objects(enr=self.enr.data):
print('ERROR!')
raise ValidationError('ENR ist nicht bekannt!')
class ContactForm(FlaskForm):
firstname = StringField('Vorname', validators=[DataRequired()])
lastname = StringField('Nachname', validators=[DataRequired()])
message = TextAreaField('Nachricht')
telephone = StringField('Telefon')
email = StringField('Email', validators=[DataRequired(), Email()])
submit = SubmitField('Abschicken')
class ClassifyForm(FlaskForm):
enr = IntegerField('ENR', validators=[DataRequired()])
classify = SelectField('Klassifizierung', choices=classified)
submit = SubmitField('Abschicken')
| mit |
ovnicraft/openerp-restaurant | purchase/edi/purchase_order.py | 439 | 9703 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
from openerp.tools.translate import _
from openerp.addons.edi import EDIMixin
PURCHASE_ORDER_LINE_EDI_STRUCT = {
'name': True,
'date_planned': True,
'product_id': True,
'product_uom': True,
'price_unit': True,
'product_qty': True,
# fields used for web preview only - discarded on import
'price_subtotal': True,
}
PURCHASE_ORDER_EDI_STRUCT = {
'company_id': True, # -> to be changed into partner
'name': True,
'partner_ref': True,
'origin': True,
'date_order': True,
'partner_id': True,
#custom: 'partner_address',
'notes': True,
'order_line': PURCHASE_ORDER_LINE_EDI_STRUCT,
#custom: currency_id
# fields used for web preview only - discarded on import
'amount_total': True,
'amount_untaxed': True,
'amount_tax': True,
'state':True,
}
class purchase_order(osv.osv, EDIMixin):
_inherit = 'purchase.order'
def edi_export(self, cr, uid, records, edi_struct=None, context=None):
"""Exports a purchase order"""
edi_struct = dict(edi_struct or PURCHASE_ORDER_EDI_STRUCT)
res_company = self.pool.get('res.company')
res_partner_obj = self.pool.get('res.partner')
edi_doc_list = []
for order in records:
# generate the main report
self._edi_generate_report_attachment(cr, uid, order, context=context)
# Get EDI doc based on struct. The result will also contain all metadata fields and attachments.
edi_doc = super(purchase_order,self).edi_export(cr, uid, [order], edi_struct, context)[0]
edi_doc.update({
# force trans-typing to purchase.order upon import
'__import_model': 'sale.order',
'__import_module': 'sale',
'company_address': res_company.edi_export_address(cr, uid, order.company_id, context=context),
'partner_address': res_partner_obj.edi_export(cr, uid, [order.partner_id], context=context)[0],
'currency': self.pool.get('res.currency').edi_export(cr, uid, [order.pricelist_id.currency_id],
context=context)[0],
})
if edi_doc.get('order_line'):
for line in edi_doc['order_line']:
line['__import_model'] = 'sale.order.line'
edi_doc_list.append(edi_doc)
return edi_doc_list
def edi_import_company(self, cr, uid, edi_document, context=None):
# TODO: for multi-company setups, we currently import the document in the
# user's current company, but we should perhaps foresee a way to select
# the desired company among the user's allowed companies
self._edi_requires_attributes(('company_id','company_address'), edi_document)
res_partner = self.pool.get('res.partner')
xid, company_name = edi_document.pop('company_id')
# Retrofit address info into a unified partner info (changed in v7 - used to keep them separate)
company_address_edi = edi_document.pop('company_address')
company_address_edi['name'] = company_name
company_address_edi['is_company'] = True
company_address_edi['__import_model'] = 'res.partner'
company_address_edi['__id'] = xid # override address ID, as of v7 they should be the same anyway
if company_address_edi.get('logo'):
company_address_edi['image'] = company_address_edi.pop('logo')
company_address_edi['supplier'] = True
partner_id = res_partner.edi_import(cr, uid, company_address_edi, context=context)
# modify edi_document to refer to new partner
partner = res_partner.browse(cr, uid, partner_id, context=context)
partner_edi_m2o = self.edi_m2o(cr, uid, partner, context=context)
edi_document['partner_id'] = partner_edi_m2o
edi_document.pop('partner_address', None) # ignored, that's supposed to be our own address!
return partner_id
def _edi_get_pricelist(self, cr, uid, partner_id, currency, context=None):
# TODO: refactor into common place for purchase/sale, e.g. into product module
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
pricelist = partner.property_product_pricelist_purchase
if not pricelist:
pricelist = self.pool.get('ir.model.data').get_object(cr, uid, 'purchase', 'list0', context=context)
if not pricelist.currency_id == currency:
# look for a pricelist with the right type and currency, or make a new one
pricelist_type = 'purchase'
product_pricelist = self.pool.get('product.pricelist')
match_pricelist_ids = product_pricelist.search(cr, uid,[('type','=',pricelist_type),
('currency_id','=',currency.id)])
if match_pricelist_ids:
pricelist_id = match_pricelist_ids[0]
else:
pricelist_name = _('EDI Pricelist (%s)') % (currency.name,)
pricelist_id = product_pricelist.create(cr, uid, {'name': pricelist_name,
'type': pricelist_type,
'currency_id': currency.id,
})
self.pool.get('product.pricelist.version').create(cr, uid, {'name': pricelist_name,
'pricelist_id': pricelist_id})
pricelist = product_pricelist.browse(cr, uid, pricelist_id)
return self.edi_m2o(cr, uid, pricelist, context=context)
def _edi_get_location(self, cr, uid, partner_id, context=None):
partner_model = self.pool.get('res.partner')
partner = partner_model.browse(cr, uid, partner_id, context=context)
location = partner.property_stock_customer
if not location:
location = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'stock_location_stock', context=context)
return self.edi_m2o(cr, uid, location, context=context)
def edi_import(self, cr, uid, edi_document, context=None):
self._edi_requires_attributes(('company_id','company_address','order_line','date_order','currency'), edi_document)
#import company as a new partner
partner_id = self.edi_import_company(cr, uid, edi_document, context=context)
# currency for rounding the discount calculations and for the pricelist
res_currency = self.pool.get('res.currency')
currency_info = edi_document.pop('currency')
currency_id = res_currency.edi_import(cr, uid, currency_info, context=context)
order_currency = res_currency.browse(cr, uid, currency_id)
partner_ref = edi_document.pop('partner_ref', False)
edi_document['partner_ref'] = edi_document['name']
edi_document['name'] = partner_ref or edi_document['name']
edi_document['pricelist_id'] = self._edi_get_pricelist(cr, uid, partner_id, order_currency, context=context)
edi_document['location_id'] = self._edi_get_location(cr, uid, partner_id, context=context)
# discard web preview fields, if present
edi_document.pop('amount_total', None)
edi_document.pop('amount_tax', None)
edi_document.pop('amount_untaxed', None)
edi_document.pop('payment_term', None)
edi_document.pop('order_policy', None)
edi_document.pop('user_id', None)
for order_line in edi_document['order_line']:
self._edi_requires_attributes(('date_planned', 'product_id', 'product_uom', 'product_qty', 'price_unit'), order_line)
# original sale order contains unit price and discount, but not final line price
discount = order_line.pop('discount', 0.0)
if discount:
order_line['price_unit'] = res_currency.round(cr, uid, order_currency,
(order_line['price_unit'] * (1 - (discount or 0.0) / 100.0)))
# sale order lines have sequence numbers, not purchase order lines
order_line.pop('sequence', None)
# discard web preview fields, if present
order_line.pop('price_subtotal', None)
return super(purchase_order,self).edi_import(cr, uid, edi_document, context=context)
class purchase_order_line(osv.osv, EDIMixin):
_inherit='purchase.order.line'
| agpl-3.0 |
leandrotoledo/python-telegram-bot | tests/test_filters.py | 2 | 87928 | #!/usr/bin/env python
#
# A library that provides a Python interface to the Telegram Bot API
# Copyright (C) 2015-2021
# Leandro Toledo de Souza <devs@python-telegram-bot.org>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser Public License for more details.
#
# You should have received a copy of the GNU Lesser Public License
# along with this program. If not, see [http://www.gnu.org/licenses/].
import datetime
import pytest
from telegram import Message, User, Chat, MessageEntity, Document, Update, Dice
from telegram.ext import Filters, BaseFilter, MessageFilter, UpdateFilter
from sys import version_info as py_ver
import inspect
import re
from telegram.utils.deprecate import TelegramDeprecationWarning
@pytest.fixture(scope='function')
def update():
return Update(
0,
Message(
0,
datetime.datetime.utcnow(),
Chat(0, 'private'),
from_user=User(0, 'Testuser', False),
via_bot=User(0, "Testbot", True),
sender_chat=Chat(0, 'Channel'),
forward_from=User(0, "HAL9000", False),
forward_from_chat=Chat(0, "Channel"),
),
)
@pytest.fixture(scope='function', params=MessageEntity.ALL_TYPES)
def message_entity(request):
return MessageEntity(request.param, 0, 0, url='', user=User(1, 'first_name', False))
@pytest.fixture(
scope='class',
params=[{'class': MessageFilter}, {'class': UpdateFilter}],
ids=['MessageFilter', 'UpdateFilter'],
)
def base_class(request):
return request.param['class']
class TestFilters:
def test_all_filters_slot_behaviour(self, recwarn, mro_slots):
"""
Use depth first search to get all nested filters, and instantiate them (which need it) with
the correct number of arguments, then test each filter separately. Also tests setting
custom attributes on custom filters.
"""
# The total no. of filters excluding filters defined in __all__ is about 70 as of 16/2/21.
# Gather all the filters to test using DFS-
visited = []
classes = inspect.getmembers(Filters, predicate=inspect.isclass) # List[Tuple[str, type]]
stack = classes.copy()
while stack:
cls = stack[-1][-1] # get last element and its class
for inner_cls in inspect.getmembers(
cls, # Get inner filters
lambda a: inspect.isclass(a) and not issubclass(a, cls.__class__),
):
if inner_cls[1] not in visited:
stack.append(inner_cls)
visited.append(inner_cls[1])
classes.append(inner_cls)
break
else:
stack.pop()
# Now start the actual testing
for name, cls in classes:
# Can't instantiate abstract classes without overriding methods, so skip them for now
if inspect.isabstract(cls) or name in {'__class__', '__base__'}:
continue
assert '__slots__' in cls.__dict__, f"Filter {name!r} doesn't have __slots__"
# get no. of args minus the 'self' argument
args = len(inspect.signature(cls.__init__).parameters) - 1
if cls.__base__.__name__ == '_ChatUserBaseFilter': # Special case, only 1 arg needed
inst = cls('1')
else:
inst = cls() if args < 1 else cls(*['blah'] * args) # unpack variable no. of args
for attr in cls.__slots__:
assert getattr(inst, attr, 'err') != 'err', f"got extra slot '{attr}' for {name}"
assert not inst.__dict__, f"got missing slot(s): {inst.__dict__} for {name}"
assert len(mro_slots(inst)) == len(set(mro_slots(inst))), f"same slot in {name}"
with pytest.warns(TelegramDeprecationWarning, match='custom attributes') as warn:
inst.custom = 'should give warning'
if not warn:
pytest.fail(f"Filter {name!r} didn't warn when setting custom attr")
assert '__dict__' not in BaseFilter.__slots__ if py_ver < (3, 7) else True, 'dict in abc'
class CustomFilter(MessageFilter):
def filter(self, message: Message):
pass
with pytest.warns(None):
CustomFilter().custom = 'allowed' # Test setting custom attr to custom filters
with pytest.warns(TelegramDeprecationWarning, match='custom attributes'):
Filters().custom = 'raise warning'
def test_filters_all(self, update):
assert Filters.all(update)
def test_filters_text(self, update):
update.message.text = 'test'
assert (Filters.text)(update)
update.message.text = '/test'
assert (Filters.text)(update)
def test_filters_text_strings(self, update):
update.message.text = '/test'
assert Filters.text({'/test', 'test1'})(update)
assert not Filters.text(['test1', 'test2'])(update)
def test_filters_caption(self, update):
update.message.caption = 'test'
assert (Filters.caption)(update)
update.message.caption = None
assert not (Filters.caption)(update)
def test_filters_caption_strings(self, update):
update.message.caption = 'test'
assert Filters.caption({'test', 'test1'})(update)
assert not Filters.caption(['test1', 'test2'])(update)
def test_filters_command_default(self, update):
update.message.text = 'test'
assert not Filters.command(update)
update.message.text = '/test'
assert not Filters.command(update)
# Only accept commands at the beginning
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 3, 5)]
assert not Filters.command(update)
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 5)]
assert Filters.command(update)
def test_filters_command_anywhere(self, update):
update.message.text = 'test /cmd'
assert not (Filters.command(False))(update)
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 5, 4)]
assert (Filters.command(False))(update)
def test_filters_regex(self, update):
SRE_TYPE = type(re.match("", ""))
update.message.text = '/start deep-linked param'
result = Filters.regex(r'deep-linked param')(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert type(matches[0]) is SRE_TYPE
update.message.text = '/help'
assert Filters.regex(r'help')(update)
update.message.text = 'test'
assert not Filters.regex(r'fail')(update)
assert Filters.regex(r'test')(update)
assert Filters.regex(re.compile(r'test'))(update)
assert Filters.regex(re.compile(r'TEST', re.IGNORECASE))(update)
update.message.text = 'i love python'
assert Filters.regex(r'.\b[lo]{2}ve python')(update)
update.message.text = None
assert not Filters.regex(r'fail')(update)
def test_filters_regex_multiple(self, update):
SRE_TYPE = type(re.match("", ""))
update.message.text = '/start deep-linked param'
result = (Filters.regex('deep') & Filters.regex(r'linked param'))(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
result = (Filters.regex('deep') | Filters.regex(r'linked param'))(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
result = (Filters.regex('not int') | Filters.regex(r'linked param'))(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
result = (Filters.regex('not int') & Filters.regex(r'linked param'))(update)
assert not result
def test_filters_merged_with_regex(self, update):
SRE_TYPE = type(re.match("", ""))
update.message.text = '/start deep-linked param'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 6)]
result = (Filters.command & Filters.regex(r'linked param'))(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
result = (Filters.regex(r'linked param') & Filters.command)(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
result = (Filters.regex(r'linked param') | Filters.command)(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
# Should not give a match since it's a or filter and it short circuits
result = (Filters.command | Filters.regex(r'linked param'))(update)
assert result is True
def test_regex_complex_merges(self, update):
SRE_TYPE = type(re.match("", ""))
update.message.text = 'test it out'
test_filter = Filters.regex('test') & (
(Filters.status_update | Filters.forwarded) | Filters.regex('out')
)
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert len(matches) == 2
assert all(type(res) is SRE_TYPE for res in matches)
update.message.forward_date = datetime.datetime.utcnow()
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
update.message.text = 'test it'
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
update.message.forward_date = None
result = test_filter(update)
assert not result
update.message.text = 'test it out'
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
update.message.pinned_message = True
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
update.message.text = 'it out'
result = test_filter(update)
assert not result
update.message.text = 'test it out'
update.message.forward_date = None
update.message.pinned_message = None
test_filter = (Filters.regex('test') | Filters.command) & (
Filters.regex('it') | Filters.status_update
)
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert len(matches) == 2
assert all(type(res) is SRE_TYPE for res in matches)
update.message.text = 'test'
result = test_filter(update)
assert not result
update.message.pinned_message = True
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert len(matches) == 1
assert all(type(res) is SRE_TYPE for res in matches)
update.message.text = 'nothing'
result = test_filter(update)
assert not result
update.message.text = '/start'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 6)]
result = test_filter(update)
assert result
assert isinstance(result, bool)
update.message.text = '/start it'
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert len(matches) == 1
assert all(type(res) is SRE_TYPE for res in matches)
def test_regex_inverted(self, update):
update.message.text = '/start deep-linked param'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 5)]
filter = ~Filters.regex(r'deep-linked param')
result = filter(update)
assert not result
update.message.text = 'not it'
result = filter(update)
assert result
assert isinstance(result, bool)
filter = ~Filters.regex('linked') & Filters.command
update.message.text = "it's linked"
result = filter(update)
assert not result
update.message.text = '/start'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 6)]
result = filter(update)
assert result
update.message.text = '/linked'
result = filter(update)
assert not result
filter = ~Filters.regex('linked') | Filters.command
update.message.text = "it's linked"
update.message.entities = []
result = filter(update)
assert not result
update.message.text = '/start linked'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 6)]
result = filter(update)
assert result
update.message.text = '/start'
result = filter(update)
assert result
update.message.text = 'nothig'
update.message.entities = []
result = filter(update)
assert result
def test_filters_caption_regex(self, update):
SRE_TYPE = type(re.match("", ""))
update.message.caption = '/start deep-linked param'
result = Filters.caption_regex(r'deep-linked param')(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert type(matches[0]) is SRE_TYPE
update.message.caption = '/help'
assert Filters.caption_regex(r'help')(update)
update.message.caption = 'test'
assert not Filters.caption_regex(r'fail')(update)
assert Filters.caption_regex(r'test')(update)
assert Filters.caption_regex(re.compile(r'test'))(update)
assert Filters.caption_regex(re.compile(r'TEST', re.IGNORECASE))(update)
update.message.caption = 'i love python'
assert Filters.caption_regex(r'.\b[lo]{2}ve python')(update)
update.message.caption = None
assert not Filters.caption_regex(r'fail')(update)
def test_filters_caption_regex_multiple(self, update):
SRE_TYPE = type(re.match("", ""))
update.message.caption = '/start deep-linked param'
result = (Filters.caption_regex('deep') & Filters.caption_regex(r'linked param'))(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
result = (Filters.caption_regex('deep') | Filters.caption_regex(r'linked param'))(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
result = (Filters.caption_regex('not int') | Filters.caption_regex(r'linked param'))(
update
)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
result = (Filters.caption_regex('not int') & Filters.caption_regex(r'linked param'))(
update
)
assert not result
def test_filters_merged_with_caption_regex(self, update):
SRE_TYPE = type(re.match("", ""))
update.message.caption = '/start deep-linked param'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 6)]
result = (Filters.command & Filters.caption_regex(r'linked param'))(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
result = (Filters.caption_regex(r'linked param') & Filters.command)(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
result = (Filters.caption_regex(r'linked param') | Filters.command)(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
# Should not give a match since it's a or filter and it short circuits
result = (Filters.command | Filters.caption_regex(r'linked param'))(update)
assert result is True
def test_caption_regex_complex_merges(self, update):
SRE_TYPE = type(re.match("", ""))
update.message.caption = 'test it out'
test_filter = Filters.caption_regex('test') & (
(Filters.status_update | Filters.forwarded) | Filters.caption_regex('out')
)
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert len(matches) == 2
assert all(type(res) is SRE_TYPE for res in matches)
update.message.forward_date = datetime.datetime.utcnow()
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
update.message.caption = 'test it'
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
update.message.forward_date = None
result = test_filter(update)
assert not result
update.message.caption = 'test it out'
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
update.message.pinned_message = True
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert all(type(res) is SRE_TYPE for res in matches)
update.message.caption = 'it out'
result = test_filter(update)
assert not result
update.message.caption = 'test it out'
update.message.forward_date = None
update.message.pinned_message = None
test_filter = (Filters.caption_regex('test') | Filters.command) & (
Filters.caption_regex('it') | Filters.status_update
)
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert len(matches) == 2
assert all(type(res) is SRE_TYPE for res in matches)
update.message.caption = 'test'
result = test_filter(update)
assert not result
update.message.pinned_message = True
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert len(matches) == 1
assert all(type(res) is SRE_TYPE for res in matches)
update.message.caption = 'nothing'
result = test_filter(update)
assert not result
update.message.caption = '/start'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 6)]
result = test_filter(update)
assert result
assert isinstance(result, bool)
update.message.caption = '/start it'
result = test_filter(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert len(matches) == 1
assert all(type(res) is SRE_TYPE for res in matches)
def test_caption_regex_inverted(self, update):
update.message.caption = '/start deep-linked param'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 5)]
test_filter = ~Filters.caption_regex(r'deep-linked param')
result = test_filter(update)
assert not result
update.message.caption = 'not it'
result = test_filter(update)
assert result
assert isinstance(result, bool)
test_filter = ~Filters.caption_regex('linked') & Filters.command
update.message.caption = "it's linked"
result = test_filter(update)
assert not result
update.message.caption = '/start'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 6)]
result = test_filter(update)
assert result
update.message.caption = '/linked'
result = test_filter(update)
assert not result
test_filter = ~Filters.caption_regex('linked') | Filters.command
update.message.caption = "it's linked"
update.message.entities = []
result = test_filter(update)
assert not result
update.message.caption = '/start linked'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 6)]
result = test_filter(update)
assert result
update.message.caption = '/start'
result = test_filter(update)
assert result
update.message.caption = 'nothig'
update.message.entities = []
result = test_filter(update)
assert result
def test_filters_reply(self, update):
another_message = Message(
1,
datetime.datetime.utcnow(),
Chat(0, 'private'),
from_user=User(1, 'TestOther', False),
)
update.message.text = 'test'
assert not Filters.reply(update)
update.message.reply_to_message = another_message
assert Filters.reply(update)
def test_filters_audio(self, update):
assert not Filters.audio(update)
update.message.audio = 'test'
assert Filters.audio(update)
def test_filters_document(self, update):
assert not Filters.document(update)
update.message.document = 'test'
assert Filters.document(update)
def test_filters_document_type(self, update):
update.message.document = Document(
"file_id", 'unique_id', mime_type="application/vnd.android.package-archive"
)
assert Filters.document.apk(update)
assert Filters.document.application(update)
assert not Filters.document.doc(update)
assert not Filters.document.audio(update)
update.message.document.mime_type = "application/msword"
assert Filters.document.doc(update)
assert Filters.document.application(update)
assert not Filters.document.docx(update)
assert not Filters.document.audio(update)
update.message.document.mime_type = (
"application/vnd.openxmlformats-officedocument.wordprocessingml.document"
)
assert Filters.document.docx(update)
assert Filters.document.application(update)
assert not Filters.document.exe(update)
assert not Filters.document.audio(update)
update.message.document.mime_type = "application/x-ms-dos-executable"
assert Filters.document.exe(update)
assert Filters.document.application(update)
assert not Filters.document.docx(update)
assert not Filters.document.audio(update)
update.message.document.mime_type = "video/mp4"
assert Filters.document.gif(update)
assert Filters.document.video(update)
assert not Filters.document.jpg(update)
assert not Filters.document.text(update)
update.message.document.mime_type = "image/jpeg"
assert Filters.document.jpg(update)
assert Filters.document.image(update)
assert not Filters.document.mp3(update)
assert not Filters.document.video(update)
update.message.document.mime_type = "audio/mpeg"
assert Filters.document.mp3(update)
assert Filters.document.audio(update)
assert not Filters.document.pdf(update)
assert not Filters.document.image(update)
update.message.document.mime_type = "application/pdf"
assert Filters.document.pdf(update)
assert Filters.document.application(update)
assert not Filters.document.py(update)
assert not Filters.document.audio(update)
update.message.document.mime_type = "text/x-python"
assert Filters.document.py(update)
assert Filters.document.text(update)
assert not Filters.document.svg(update)
assert not Filters.document.application(update)
update.message.document.mime_type = "image/svg+xml"
assert Filters.document.svg(update)
assert Filters.document.image(update)
assert not Filters.document.txt(update)
assert not Filters.document.video(update)
update.message.document.mime_type = "text/plain"
assert Filters.document.txt(update)
assert Filters.document.text(update)
assert not Filters.document.targz(update)
assert not Filters.document.application(update)
update.message.document.mime_type = "application/x-compressed-tar"
assert Filters.document.targz(update)
assert Filters.document.application(update)
assert not Filters.document.wav(update)
assert not Filters.document.audio(update)
update.message.document.mime_type = "audio/x-wav"
assert Filters.document.wav(update)
assert Filters.document.audio(update)
assert not Filters.document.xml(update)
assert not Filters.document.image(update)
update.message.document.mime_type = "application/xml"
assert Filters.document.xml(update)
assert Filters.document.application(update)
assert not Filters.document.zip(update)
assert not Filters.document.audio(update)
update.message.document.mime_type = "application/zip"
assert Filters.document.zip(update)
assert Filters.document.application(update)
assert not Filters.document.apk(update)
assert not Filters.document.audio(update)
update.message.document.mime_type = "image/x-rgb"
assert not Filters.document.category("application/")(update)
assert not Filters.document.mime_type("application/x-sh")(update)
update.message.document.mime_type = "application/x-sh"
assert Filters.document.category("application/")(update)
assert Filters.document.mime_type("application/x-sh")(update)
def test_filters_file_extension_basic(self, update):
update.message.document = Document(
"file_id",
"unique_id",
file_name="file.jpg",
mime_type="image/jpeg",
)
assert Filters.document.file_extension("jpg")(update)
assert not Filters.document.file_extension("jpeg")(update)
assert not Filters.document.file_extension("file.jpg")(update)
update.message.document.file_name = "file.tar.gz"
assert Filters.document.file_extension("tar.gz")(update)
assert Filters.document.file_extension("gz")(update)
assert not Filters.document.file_extension("tgz")(update)
assert not Filters.document.file_extension("jpg")(update)
update.message.document = None
assert not Filters.document.file_extension("jpg")(update)
def test_filters_file_extension_minds_dots(self, update):
update.message.document = Document(
"file_id",
"unique_id",
file_name="file.jpg",
mime_type="image/jpeg",
)
assert not Filters.document.file_extension(".jpg")(update)
assert not Filters.document.file_extension("e.jpg")(update)
assert not Filters.document.file_extension("file.jpg")(update)
assert not Filters.document.file_extension("")(update)
update.message.document.file_name = "file..jpg"
assert Filters.document.file_extension("jpg")(update)
assert Filters.document.file_extension(".jpg")(update)
assert not Filters.document.file_extension("..jpg")(update)
update.message.document.file_name = "file.docx"
assert Filters.document.file_extension("docx")(update)
assert not Filters.document.file_extension("doc")(update)
assert not Filters.document.file_extension("ocx")(update)
update.message.document.file_name = "file"
assert not Filters.document.file_extension("")(update)
assert not Filters.document.file_extension("file")(update)
update.message.document.file_name = "file."
assert Filters.document.file_extension("")(update)
def test_filters_file_extension_none_arg(self, update):
update.message.document = Document(
"file_id",
"unique_id",
file_name="file.jpg",
mime_type="image/jpeg",
)
assert not Filters.document.file_extension(None)(update)
update.message.document.file_name = "file"
assert Filters.document.file_extension(None)(update)
assert not Filters.document.file_extension("None")(update)
update.message.document.file_name = "file."
assert not Filters.document.file_extension(None)(update)
update.message.document = None
assert not Filters.document.file_extension(None)(update)
def test_filters_file_extension_case_sensitivity(self, update):
update.message.document = Document(
"file_id",
"unique_id",
file_name="file.jpg",
mime_type="image/jpeg",
)
assert Filters.document.file_extension("JPG")(update)
assert Filters.document.file_extension("jpG")(update)
update.message.document.file_name = "file.JPG"
assert Filters.document.file_extension("jpg")(update)
assert not Filters.document.file_extension("jpg", case_sensitive=True)(update)
update.message.document.file_name = "file.Dockerfile"
assert Filters.document.file_extension("Dockerfile", case_sensitive=True)(update)
assert not Filters.document.file_extension("DOCKERFILE", case_sensitive=True)(update)
def test_filters_file_extension_name(self):
assert Filters.document.file_extension("jpg").name == (
"Filters.document.file_extension('jpg')"
)
assert Filters.document.file_extension("JPG").name == (
"Filters.document.file_extension('jpg')"
)
assert Filters.document.file_extension("jpg", case_sensitive=True).name == (
"Filters.document.file_extension('jpg', case_sensitive=True)"
)
assert Filters.document.file_extension("JPG", case_sensitive=True).name == (
"Filters.document.file_extension('JPG', case_sensitive=True)"
)
assert Filters.document.file_extension(".jpg").name == (
"Filters.document.file_extension('.jpg')"
)
assert Filters.document.file_extension("").name == "Filters.document.file_extension('')"
assert (
Filters.document.file_extension(None).name == "Filters.document.file_extension(None)"
)
def test_filters_animation(self, update):
assert not Filters.animation(update)
update.message.animation = 'test'
assert Filters.animation(update)
def test_filters_photo(self, update):
assert not Filters.photo(update)
update.message.photo = 'test'
assert Filters.photo(update)
def test_filters_sticker(self, update):
assert not Filters.sticker(update)
update.message.sticker = 'test'
assert Filters.sticker(update)
def test_filters_video(self, update):
assert not Filters.video(update)
update.message.video = 'test'
assert Filters.video(update)
def test_filters_voice(self, update):
assert not Filters.voice(update)
update.message.voice = 'test'
assert Filters.voice(update)
def test_filters_video_note(self, update):
assert not Filters.video_note(update)
update.message.video_note = 'test'
assert Filters.video_note(update)
def test_filters_contact(self, update):
assert not Filters.contact(update)
update.message.contact = 'test'
assert Filters.contact(update)
def test_filters_location(self, update):
assert not Filters.location(update)
update.message.location = 'test'
assert Filters.location(update)
def test_filters_venue(self, update):
assert not Filters.venue(update)
update.message.venue = 'test'
assert Filters.venue(update)
def test_filters_status_update(self, update):
assert not Filters.status_update(update)
update.message.new_chat_members = ['test']
assert Filters.status_update(update)
assert Filters.status_update.new_chat_members(update)
update.message.new_chat_members = None
update.message.left_chat_member = 'test'
assert Filters.status_update(update)
assert Filters.status_update.left_chat_member(update)
update.message.left_chat_member = None
update.message.new_chat_title = 'test'
assert Filters.status_update(update)
assert Filters.status_update.new_chat_title(update)
update.message.new_chat_title = ''
update.message.new_chat_photo = 'test'
assert Filters.status_update(update)
assert Filters.status_update.new_chat_photo(update)
update.message.new_chat_photo = None
update.message.delete_chat_photo = True
assert Filters.status_update(update)
assert Filters.status_update.delete_chat_photo(update)
update.message.delete_chat_photo = False
update.message.group_chat_created = True
assert Filters.status_update(update)
assert Filters.status_update.chat_created(update)
update.message.group_chat_created = False
update.message.supergroup_chat_created = True
assert Filters.status_update(update)
assert Filters.status_update.chat_created(update)
update.message.supergroup_chat_created = False
update.message.channel_chat_created = True
assert Filters.status_update(update)
assert Filters.status_update.chat_created(update)
update.message.channel_chat_created = False
update.message.message_auto_delete_timer_changed = True
assert Filters.status_update(update)
assert Filters.status_update.message_auto_delete_timer_changed(update)
update.message.message_auto_delete_timer_changed = False
update.message.migrate_to_chat_id = 100
assert Filters.status_update(update)
assert Filters.status_update.migrate(update)
update.message.migrate_to_chat_id = 0
update.message.migrate_from_chat_id = 100
assert Filters.status_update(update)
assert Filters.status_update.migrate(update)
update.message.migrate_from_chat_id = 0
update.message.pinned_message = 'test'
assert Filters.status_update(update)
assert Filters.status_update.pinned_message(update)
update.message.pinned_message = None
update.message.connected_website = 'http://example.com/'
assert Filters.status_update(update)
assert Filters.status_update.connected_website(update)
update.message.connected_website = None
update.message.proximity_alert_triggered = 'alert'
assert Filters.status_update(update)
assert Filters.status_update.proximity_alert_triggered(update)
update.message.proximity_alert_triggered = None
update.message.voice_chat_scheduled = 'scheduled'
assert Filters.status_update(update)
assert Filters.status_update.voice_chat_scheduled(update)
update.message.voice_chat_scheduled = None
update.message.voice_chat_started = 'hello'
assert Filters.status_update(update)
assert Filters.status_update.voice_chat_started(update)
update.message.voice_chat_started = None
update.message.voice_chat_ended = 'bye'
assert Filters.status_update(update)
assert Filters.status_update.voice_chat_ended(update)
update.message.voice_chat_ended = None
update.message.voice_chat_participants_invited = 'invited'
assert Filters.status_update(update)
assert Filters.status_update.voice_chat_participants_invited(update)
update.message.voice_chat_participants_invited = None
def test_filters_forwarded(self, update):
assert not Filters.forwarded(update)
update.message.forward_date = datetime.datetime.utcnow()
assert Filters.forwarded(update)
def test_filters_game(self, update):
assert not Filters.game(update)
update.message.game = 'test'
assert Filters.game(update)
def test_entities_filter(self, update, message_entity):
update.message.entities = [message_entity]
assert Filters.entity(message_entity.type)(update)
update.message.entities = []
assert not Filters.entity(MessageEntity.MENTION)(update)
second = message_entity.to_dict()
second['type'] = 'bold'
second = MessageEntity.de_json(second, None)
update.message.entities = [message_entity, second]
assert Filters.entity(message_entity.type)(update)
assert not Filters.caption_entity(message_entity.type)(update)
def test_caption_entities_filter(self, update, message_entity):
update.message.caption_entities = [message_entity]
assert Filters.caption_entity(message_entity.type)(update)
update.message.caption_entities = []
assert not Filters.caption_entity(MessageEntity.MENTION)(update)
second = message_entity.to_dict()
second['type'] = 'bold'
second = MessageEntity.de_json(second, None)
update.message.caption_entities = [message_entity, second]
assert Filters.caption_entity(message_entity.type)(update)
assert not Filters.entity(message_entity.type)(update)
def test_private_filter(self, update):
assert Filters.private(update)
update.message.chat.type = 'group'
assert not Filters.private(update)
def test_private_filter_deprecation(self, update):
with pytest.warns(TelegramDeprecationWarning):
Filters.private(update)
def test_group_filter(self, update):
assert not Filters.group(update)
update.message.chat.type = 'group'
assert Filters.group(update)
update.message.chat.type = 'supergroup'
assert Filters.group(update)
def test_group_filter_deprecation(self, update):
with pytest.warns(TelegramDeprecationWarning):
Filters.group(update)
@pytest.mark.parametrize(
('chat_type, results'),
[
(None, (False, False, False, False, False, False)),
(Chat.PRIVATE, (True, True, False, False, False, False)),
(Chat.GROUP, (True, False, True, False, True, False)),
(Chat.SUPERGROUP, (True, False, False, True, True, False)),
(Chat.CHANNEL, (True, False, False, False, False, True)),
],
)
def test_filters_chat_types(self, update, chat_type, results):
update.message.chat.type = chat_type
assert Filters.chat_type(update) is results[0]
assert Filters.chat_type.private(update) is results[1]
assert Filters.chat_type.group(update) is results[2]
assert Filters.chat_type.supergroup(update) is results[3]
assert Filters.chat_type.groups(update) is results[4]
assert Filters.chat_type.channel(update) is results[5]
def test_filters_user_init(self):
with pytest.raises(RuntimeError, match='in conjunction with'):
Filters.user(user_id=1, username='user')
def test_filters_user_allow_empty(self, update):
assert not Filters.user()(update)
assert Filters.user(allow_empty=True)(update)
def test_filters_user_id(self, update):
assert not Filters.user(user_id=1)(update)
update.message.from_user.id = 1
assert Filters.user(user_id=1)(update)
update.message.from_user.id = 2
assert Filters.user(user_id=[1, 2])(update)
assert not Filters.user(user_id=[3, 4])(update)
update.message.from_user = None
assert not Filters.user(user_id=[3, 4])(update)
def test_filters_username(self, update):
assert not Filters.user(username='user')(update)
assert not Filters.user(username='Testuser')(update)
update.message.from_user.username = 'user@'
assert Filters.user(username='@user@')(update)
assert Filters.user(username='user@')(update)
assert Filters.user(username=['user1', 'user@', 'user2'])(update)
assert not Filters.user(username=['@username', '@user_2'])(update)
update.message.from_user = None
assert not Filters.user(username=['@username', '@user_2'])(update)
def test_filters_user_change_id(self, update):
f = Filters.user(user_id=1)
assert f.user_ids == {1}
update.message.from_user.id = 1
assert f(update)
update.message.from_user.id = 2
assert not f(update)
f.user_ids = 2
assert f.user_ids == {2}
assert f(update)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.usernames = 'user'
def test_filters_user_change_username(self, update):
f = Filters.user(username='user')
update.message.from_user.username = 'user'
assert f(update)
update.message.from_user.username = 'User'
assert not f(update)
f.usernames = 'User'
assert f(update)
with pytest.raises(RuntimeError, match='user_id in conjunction'):
f.user_ids = 1
def test_filters_user_add_user_by_name(self, update):
users = ['user_a', 'user_b', 'user_c']
f = Filters.user()
for user in users:
update.message.from_user.username = user
assert not f(update)
f.add_usernames('user_a')
f.add_usernames(['user_b', 'user_c'])
for user in users:
update.message.from_user.username = user
assert f(update)
with pytest.raises(RuntimeError, match='user_id in conjunction'):
f.add_user_ids(1)
def test_filters_user_add_user_by_id(self, update):
users = [1, 2, 3]
f = Filters.user()
for user in users:
update.message.from_user.id = user
assert not f(update)
f.add_user_ids(1)
f.add_user_ids([2, 3])
for user in users:
update.message.from_user.username = user
assert f(update)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.add_usernames('user')
def test_filters_user_remove_user_by_name(self, update):
users = ['user_a', 'user_b', 'user_c']
f = Filters.user(username=users)
with pytest.raises(RuntimeError, match='user_id in conjunction'):
f.remove_user_ids(1)
for user in users:
update.message.from_user.username = user
assert f(update)
f.remove_usernames('user_a')
f.remove_usernames(['user_b', 'user_c'])
for user in users:
update.message.from_user.username = user
assert not f(update)
def test_filters_user_remove_user_by_id(self, update):
users = [1, 2, 3]
f = Filters.user(user_id=users)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.remove_usernames('user')
for user in users:
update.message.from_user.id = user
assert f(update)
f.remove_user_ids(1)
f.remove_user_ids([2, 3])
for user in users:
update.message.from_user.username = user
assert not f(update)
def test_filters_user_repr(self):
f = Filters.user([1, 2])
assert str(f) == 'Filters.user(1, 2)'
f.remove_user_ids(1)
f.remove_user_ids(2)
assert str(f) == 'Filters.user()'
f.add_usernames('@foobar')
assert str(f) == 'Filters.user(foobar)'
f.add_usernames('@barfoo')
assert str(f).startswith('Filters.user(')
# we don't know th exact order
assert 'barfoo' in str(f) and 'foobar' in str(f)
with pytest.raises(RuntimeError, match='Cannot set name'):
f.name = 'foo'
def test_filters_chat_init(self):
with pytest.raises(RuntimeError, match='in conjunction with'):
Filters.chat(chat_id=1, username='chat')
def test_filters_chat_allow_empty(self, update):
assert not Filters.chat()(update)
assert Filters.chat(allow_empty=True)(update)
def test_filters_chat_id(self, update):
assert not Filters.chat(chat_id=1)(update)
update.message.chat.id = 1
assert Filters.chat(chat_id=1)(update)
update.message.chat.id = 2
assert Filters.chat(chat_id=[1, 2])(update)
assert not Filters.chat(chat_id=[3, 4])(update)
update.message.chat = None
assert not Filters.chat(chat_id=[3, 4])(update)
def test_filters_chat_username(self, update):
assert not Filters.chat(username='chat')(update)
assert not Filters.chat(username='Testchat')(update)
update.message.chat.username = 'chat@'
assert Filters.chat(username='@chat@')(update)
assert Filters.chat(username='chat@')(update)
assert Filters.chat(username=['chat1', 'chat@', 'chat2'])(update)
assert not Filters.chat(username=['@username', '@chat_2'])(update)
update.message.chat = None
assert not Filters.chat(username=['@username', '@chat_2'])(update)
def test_filters_chat_change_id(self, update):
f = Filters.chat(chat_id=1)
assert f.chat_ids == {1}
update.message.chat.id = 1
assert f(update)
update.message.chat.id = 2
assert not f(update)
f.chat_ids = 2
assert f.chat_ids == {2}
assert f(update)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.usernames = 'chat'
def test_filters_chat_change_username(self, update):
f = Filters.chat(username='chat')
update.message.chat.username = 'chat'
assert f(update)
update.message.chat.username = 'User'
assert not f(update)
f.usernames = 'User'
assert f(update)
with pytest.raises(RuntimeError, match='chat_id in conjunction'):
f.chat_ids = 1
def test_filters_chat_add_chat_by_name(self, update):
chats = ['chat_a', 'chat_b', 'chat_c']
f = Filters.chat()
for chat in chats:
update.message.chat.username = chat
assert not f(update)
f.add_usernames('chat_a')
f.add_usernames(['chat_b', 'chat_c'])
for chat in chats:
update.message.chat.username = chat
assert f(update)
with pytest.raises(RuntimeError, match='chat_id in conjunction'):
f.add_chat_ids(1)
def test_filters_chat_add_chat_by_id(self, update):
chats = [1, 2, 3]
f = Filters.chat()
for chat in chats:
update.message.chat.id = chat
assert not f(update)
f.add_chat_ids(1)
f.add_chat_ids([2, 3])
for chat in chats:
update.message.chat.username = chat
assert f(update)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.add_usernames('chat')
def test_filters_chat_remove_chat_by_name(self, update):
chats = ['chat_a', 'chat_b', 'chat_c']
f = Filters.chat(username=chats)
with pytest.raises(RuntimeError, match='chat_id in conjunction'):
f.remove_chat_ids(1)
for chat in chats:
update.message.chat.username = chat
assert f(update)
f.remove_usernames('chat_a')
f.remove_usernames(['chat_b', 'chat_c'])
for chat in chats:
update.message.chat.username = chat
assert not f(update)
def test_filters_chat_remove_chat_by_id(self, update):
chats = [1, 2, 3]
f = Filters.chat(chat_id=chats)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.remove_usernames('chat')
for chat in chats:
update.message.chat.id = chat
assert f(update)
f.remove_chat_ids(1)
f.remove_chat_ids([2, 3])
for chat in chats:
update.message.chat.username = chat
assert not f(update)
def test_filters_chat_repr(self):
f = Filters.chat([1, 2])
assert str(f) == 'Filters.chat(1, 2)'
f.remove_chat_ids(1)
f.remove_chat_ids(2)
assert str(f) == 'Filters.chat()'
f.add_usernames('@foobar')
assert str(f) == 'Filters.chat(foobar)'
f.add_usernames('@barfoo')
assert str(f).startswith('Filters.chat(')
# we don't know th exact order
assert 'barfoo' in str(f) and 'foobar' in str(f)
with pytest.raises(RuntimeError, match='Cannot set name'):
f.name = 'foo'
def test_filters_forwarded_from_init(self):
with pytest.raises(RuntimeError, match='in conjunction with'):
Filters.forwarded_from(chat_id=1, username='chat')
def test_filters_forwarded_from_allow_empty(self, update):
assert not Filters.forwarded_from()(update)
assert Filters.forwarded_from(allow_empty=True)(update)
def test_filters_forwarded_from_id(self, update):
# Test with User id-
assert not Filters.forwarded_from(chat_id=1)(update)
update.message.forward_from.id = 1
assert Filters.forwarded_from(chat_id=1)(update)
update.message.forward_from.id = 2
assert Filters.forwarded_from(chat_id=[1, 2])(update)
assert not Filters.forwarded_from(chat_id=[3, 4])(update)
update.message.forward_from = None
assert not Filters.forwarded_from(chat_id=[3, 4])(update)
# Test with Chat id-
update.message.forward_from_chat.id = 4
assert Filters.forwarded_from(chat_id=[4])(update)
assert Filters.forwarded_from(chat_id=[3, 4])(update)
update.message.forward_from_chat.id = 2
assert not Filters.forwarded_from(chat_id=[3, 4])(update)
assert Filters.forwarded_from(chat_id=2)(update)
def test_filters_forwarded_from_username(self, update):
# For User username
assert not Filters.forwarded_from(username='chat')(update)
assert not Filters.forwarded_from(username='Testchat')(update)
update.message.forward_from.username = 'chat@'
assert Filters.forwarded_from(username='@chat@')(update)
assert Filters.forwarded_from(username='chat@')(update)
assert Filters.forwarded_from(username=['chat1', 'chat@', 'chat2'])(update)
assert not Filters.forwarded_from(username=['@username', '@chat_2'])(update)
update.message.forward_from = None
assert not Filters.forwarded_from(username=['@username', '@chat_2'])(update)
# For Chat username
assert not Filters.forwarded_from(username='chat')(update)
assert not Filters.forwarded_from(username='Testchat')(update)
update.message.forward_from_chat.username = 'chat@'
assert Filters.forwarded_from(username='@chat@')(update)
assert Filters.forwarded_from(username='chat@')(update)
assert Filters.forwarded_from(username=['chat1', 'chat@', 'chat2'])(update)
assert not Filters.forwarded_from(username=['@username', '@chat_2'])(update)
update.message.forward_from_chat = None
assert not Filters.forwarded_from(username=['@username', '@chat_2'])(update)
def test_filters_forwarded_from_change_id(self, update):
f = Filters.forwarded_from(chat_id=1)
# For User ids-
assert f.chat_ids == {1}
update.message.forward_from.id = 1
assert f(update)
update.message.forward_from.id = 2
assert not f(update)
f.chat_ids = 2
assert f.chat_ids == {2}
assert f(update)
# For Chat ids-
f = Filters.forwarded_from(chat_id=1) # reset this
update.message.forward_from = None # and change this to None, only one of them can be True
assert f.chat_ids == {1}
update.message.forward_from_chat.id = 1
assert f(update)
update.message.forward_from_chat.id = 2
assert not f(update)
f.chat_ids = 2
assert f.chat_ids == {2}
assert f(update)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.usernames = 'chat'
def test_filters_forwarded_from_change_username(self, update):
# For User usernames
f = Filters.forwarded_from(username='chat')
update.message.forward_from.username = 'chat'
assert f(update)
update.message.forward_from.username = 'User'
assert not f(update)
f.usernames = 'User'
assert f(update)
# For Chat usernames
update.message.forward_from = None
f = Filters.forwarded_from(username='chat')
update.message.forward_from_chat.username = 'chat'
assert f(update)
update.message.forward_from_chat.username = 'User'
assert not f(update)
f.usernames = 'User'
assert f(update)
with pytest.raises(RuntimeError, match='chat_id in conjunction'):
f.chat_ids = 1
def test_filters_forwarded_from_add_chat_by_name(self, update):
chats = ['chat_a', 'chat_b', 'chat_c']
f = Filters.forwarded_from()
# For User usernames
for chat in chats:
update.message.forward_from.username = chat
assert not f(update)
f.add_usernames('chat_a')
f.add_usernames(['chat_b', 'chat_c'])
for chat in chats:
update.message.forward_from.username = chat
assert f(update)
# For Chat usernames
update.message.forward_from = None
f = Filters.forwarded_from()
for chat in chats:
update.message.forward_from_chat.username = chat
assert not f(update)
f.add_usernames('chat_a')
f.add_usernames(['chat_b', 'chat_c'])
for chat in chats:
update.message.forward_from_chat.username = chat
assert f(update)
with pytest.raises(RuntimeError, match='chat_id in conjunction'):
f.add_chat_ids(1)
def test_filters_forwarded_from_add_chat_by_id(self, update):
chats = [1, 2, 3]
f = Filters.forwarded_from()
# For User ids
for chat in chats:
update.message.forward_from.id = chat
assert not f(update)
f.add_chat_ids(1)
f.add_chat_ids([2, 3])
for chat in chats:
update.message.forward_from.username = chat
assert f(update)
# For Chat ids-
update.message.forward_from = None
f = Filters.forwarded_from()
for chat in chats:
update.message.forward_from_chat.id = chat
assert not f(update)
f.add_chat_ids(1)
f.add_chat_ids([2, 3])
for chat in chats:
update.message.forward_from_chat.username = chat
assert f(update)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.add_usernames('chat')
def test_filters_forwarded_from_remove_chat_by_name(self, update):
chats = ['chat_a', 'chat_b', 'chat_c']
f = Filters.forwarded_from(username=chats)
with pytest.raises(RuntimeError, match='chat_id in conjunction'):
f.remove_chat_ids(1)
# For User usernames
for chat in chats:
update.message.forward_from.username = chat
assert f(update)
f.remove_usernames('chat_a')
f.remove_usernames(['chat_b', 'chat_c'])
for chat in chats:
update.message.forward_from.username = chat
assert not f(update)
# For Chat usernames
update.message.forward_from = None
f = Filters.forwarded_from(username=chats)
for chat in chats:
update.message.forward_from_chat.username = chat
assert f(update)
f.remove_usernames('chat_a')
f.remove_usernames(['chat_b', 'chat_c'])
for chat in chats:
update.message.forward_from_chat.username = chat
assert not f(update)
def test_filters_forwarded_from_remove_chat_by_id(self, update):
chats = [1, 2, 3]
f = Filters.forwarded_from(chat_id=chats)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.remove_usernames('chat')
# For User ids
for chat in chats:
update.message.forward_from.id = chat
assert f(update)
f.remove_chat_ids(1)
f.remove_chat_ids([2, 3])
for chat in chats:
update.message.forward_from.username = chat
assert not f(update)
# For Chat ids
update.message.forward_from = None
f = Filters.forwarded_from(chat_id=chats)
for chat in chats:
update.message.forward_from_chat.id = chat
assert f(update)
f.remove_chat_ids(1)
f.remove_chat_ids([2, 3])
for chat in chats:
update.message.forward_from_chat.username = chat
assert not f(update)
def test_filters_forwarded_from_repr(self):
f = Filters.forwarded_from([1, 2])
assert str(f) == 'Filters.forwarded_from(1, 2)'
f.remove_chat_ids(1)
f.remove_chat_ids(2)
assert str(f) == 'Filters.forwarded_from()'
f.add_usernames('@foobar')
assert str(f) == 'Filters.forwarded_from(foobar)'
f.add_usernames('@barfoo')
assert str(f).startswith('Filters.forwarded_from(')
# we don't know the exact order
assert 'barfoo' in str(f) and 'foobar' in str(f)
with pytest.raises(RuntimeError, match='Cannot set name'):
f.name = 'foo'
def test_filters_sender_chat_init(self):
with pytest.raises(RuntimeError, match='in conjunction with'):
Filters.sender_chat(chat_id=1, username='chat')
def test_filters_sender_chat_allow_empty(self, update):
assert not Filters.sender_chat()(update)
assert Filters.sender_chat(allow_empty=True)(update)
def test_filters_sender_chat_id(self, update):
assert not Filters.sender_chat(chat_id=1)(update)
update.message.sender_chat.id = 1
assert Filters.sender_chat(chat_id=1)(update)
update.message.sender_chat.id = 2
assert Filters.sender_chat(chat_id=[1, 2])(update)
assert not Filters.sender_chat(chat_id=[3, 4])(update)
update.message.sender_chat = None
assert not Filters.sender_chat(chat_id=[3, 4])(update)
def test_filters_sender_chat_username(self, update):
assert not Filters.sender_chat(username='chat')(update)
assert not Filters.sender_chat(username='Testchat')(update)
update.message.sender_chat.username = 'chat@'
assert Filters.sender_chat(username='@chat@')(update)
assert Filters.sender_chat(username='chat@')(update)
assert Filters.sender_chat(username=['chat1', 'chat@', 'chat2'])(update)
assert not Filters.sender_chat(username=['@username', '@chat_2'])(update)
update.message.sender_chat = None
assert not Filters.sender_chat(username=['@username', '@chat_2'])(update)
def test_filters_sender_chat_change_id(self, update):
f = Filters.sender_chat(chat_id=1)
assert f.chat_ids == {1}
update.message.sender_chat.id = 1
assert f(update)
update.message.sender_chat.id = 2
assert not f(update)
f.chat_ids = 2
assert f.chat_ids == {2}
assert f(update)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.usernames = 'chat'
def test_filters_sender_chat_change_username(self, update):
f = Filters.sender_chat(username='chat')
update.message.sender_chat.username = 'chat'
assert f(update)
update.message.sender_chat.username = 'User'
assert not f(update)
f.usernames = 'User'
assert f(update)
with pytest.raises(RuntimeError, match='chat_id in conjunction'):
f.chat_ids = 1
def test_filters_sender_chat_add_sender_chat_by_name(self, update):
chats = ['chat_a', 'chat_b', 'chat_c']
f = Filters.sender_chat()
for chat in chats:
update.message.sender_chat.username = chat
assert not f(update)
f.add_usernames('chat_a')
f.add_usernames(['chat_b', 'chat_c'])
for chat in chats:
update.message.sender_chat.username = chat
assert f(update)
with pytest.raises(RuntimeError, match='chat_id in conjunction'):
f.add_chat_ids(1)
def test_filters_sender_chat_add_sender_chat_by_id(self, update):
chats = [1, 2, 3]
f = Filters.sender_chat()
for chat in chats:
update.message.sender_chat.id = chat
assert not f(update)
f.add_chat_ids(1)
f.add_chat_ids([2, 3])
for chat in chats:
update.message.sender_chat.username = chat
assert f(update)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.add_usernames('chat')
def test_filters_sender_chat_remove_sender_chat_by_name(self, update):
chats = ['chat_a', 'chat_b', 'chat_c']
f = Filters.sender_chat(username=chats)
with pytest.raises(RuntimeError, match='chat_id in conjunction'):
f.remove_chat_ids(1)
for chat in chats:
update.message.sender_chat.username = chat
assert f(update)
f.remove_usernames('chat_a')
f.remove_usernames(['chat_b', 'chat_c'])
for chat in chats:
update.message.sender_chat.username = chat
assert not f(update)
def test_filters_sender_chat_remove_sender_chat_by_id(self, update):
chats = [1, 2, 3]
f = Filters.sender_chat(chat_id=chats)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.remove_usernames('chat')
for chat in chats:
update.message.sender_chat.id = chat
assert f(update)
f.remove_chat_ids(1)
f.remove_chat_ids([2, 3])
for chat in chats:
update.message.sender_chat.username = chat
assert not f(update)
def test_filters_sender_chat_repr(self):
f = Filters.sender_chat([1, 2])
assert str(f) == 'Filters.sender_chat(1, 2)'
f.remove_chat_ids(1)
f.remove_chat_ids(2)
assert str(f) == 'Filters.sender_chat()'
f.add_usernames('@foobar')
assert str(f) == 'Filters.sender_chat(foobar)'
f.add_usernames('@barfoo')
assert str(f).startswith('Filters.sender_chat(')
# we don't know th exact order
assert 'barfoo' in str(f) and 'foobar' in str(f)
with pytest.raises(RuntimeError, match='Cannot set name'):
f.name = 'foo'
def test_filters_sender_chat_super_group(self, update):
update.message.sender_chat.type = Chat.PRIVATE
assert not Filters.sender_chat.super_group(update)
update.message.sender_chat.type = Chat.CHANNEL
assert not Filters.sender_chat.super_group(update)
update.message.sender_chat.type = Chat.SUPERGROUP
assert Filters.sender_chat.super_group(update)
update.message.sender_chat = None
assert not Filters.sender_chat.super_group(update)
def test_filters_sender_chat_channel(self, update):
update.message.sender_chat.type = Chat.PRIVATE
assert not Filters.sender_chat.channel(update)
update.message.sender_chat.type = Chat.SUPERGROUP
assert not Filters.sender_chat.channel(update)
update.message.sender_chat.type = Chat.CHANNEL
assert Filters.sender_chat.channel(update)
update.message.sender_chat = None
assert not Filters.sender_chat.channel(update)
def test_filters_invoice(self, update):
assert not Filters.invoice(update)
update.message.invoice = 'test'
assert Filters.invoice(update)
def test_filters_successful_payment(self, update):
assert not Filters.successful_payment(update)
update.message.successful_payment = 'test'
assert Filters.successful_payment(update)
def test_filters_passport_data(self, update):
assert not Filters.passport_data(update)
update.message.passport_data = 'test'
assert Filters.passport_data(update)
def test_filters_poll(self, update):
assert not Filters.poll(update)
update.message.poll = 'test'
assert Filters.poll(update)
@pytest.mark.parametrize('emoji', Dice.ALL_EMOJI)
def test_filters_dice(self, update, emoji):
update.message.dice = Dice(4, emoji)
assert Filters.dice(update)
update.message.dice = None
assert not Filters.dice(update)
@pytest.mark.parametrize('emoji', Dice.ALL_EMOJI)
def test_filters_dice_list(self, update, emoji):
update.message.dice = None
assert not Filters.dice(5)(update)
update.message.dice = Dice(5, emoji)
assert Filters.dice(5)(update)
assert Filters.dice({5, 6})(update)
assert not Filters.dice(1)(update)
assert not Filters.dice([2, 3])(update)
def test_filters_dice_type(self, update):
update.message.dice = Dice(5, '🎲')
assert Filters.dice.dice(update)
assert Filters.dice.dice([4, 5])(update)
assert not Filters.dice.darts(update)
assert not Filters.dice.basketball(update)
assert not Filters.dice.dice([6])(update)
update.message.dice = Dice(5, '🎯')
assert Filters.dice.darts(update)
assert Filters.dice.darts([4, 5])(update)
assert not Filters.dice.dice(update)
assert not Filters.dice.basketball(update)
assert not Filters.dice.darts([6])(update)
update.message.dice = Dice(5, '🏀')
assert Filters.dice.basketball(update)
assert Filters.dice.basketball([4, 5])(update)
assert not Filters.dice.dice(update)
assert not Filters.dice.darts(update)
assert not Filters.dice.basketball([4])(update)
update.message.dice = Dice(5, '⚽')
assert Filters.dice.football(update)
assert Filters.dice.football([4, 5])(update)
assert not Filters.dice.dice(update)
assert not Filters.dice.darts(update)
assert not Filters.dice.football([4])(update)
update.message.dice = Dice(5, '🎰')
assert Filters.dice.slot_machine(update)
assert Filters.dice.slot_machine([4, 5])(update)
assert not Filters.dice.dice(update)
assert not Filters.dice.darts(update)
assert not Filters.dice.slot_machine([4])(update)
update.message.dice = Dice(5, '🎳')
assert Filters.dice.bowling(update)
assert Filters.dice.bowling([4, 5])(update)
assert not Filters.dice.dice(update)
assert not Filters.dice.darts(update)
assert not Filters.dice.bowling([4])(update)
def test_language_filter_single(self, update):
update.message.from_user.language_code = 'en_US'
assert (Filters.language('en_US'))(update)
assert (Filters.language('en'))(update)
assert not (Filters.language('en_GB'))(update)
assert not (Filters.language('da'))(update)
update.message.from_user.language_code = 'da'
assert not (Filters.language('en_US'))(update)
assert not (Filters.language('en'))(update)
assert not (Filters.language('en_GB'))(update)
assert (Filters.language('da'))(update)
def test_language_filter_multiple(self, update):
f = Filters.language(['en_US', 'da'])
update.message.from_user.language_code = 'en_US'
assert f(update)
update.message.from_user.language_code = 'en_GB'
assert not f(update)
update.message.from_user.language_code = 'da'
assert f(update)
def test_and_filters(self, update):
update.message.text = 'test'
update.message.forward_date = datetime.datetime.utcnow()
assert (Filters.text & Filters.forwarded)(update)
update.message.text = '/test'
assert (Filters.text & Filters.forwarded)(update)
update.message.text = 'test'
update.message.forward_date = None
assert not (Filters.text & Filters.forwarded)(update)
update.message.text = 'test'
update.message.forward_date = datetime.datetime.utcnow()
assert (Filters.text & Filters.forwarded & Filters.private)(update)
def test_or_filters(self, update):
update.message.text = 'test'
assert (Filters.text | Filters.status_update)(update)
update.message.group_chat_created = True
assert (Filters.text | Filters.status_update)(update)
update.message.text = None
assert (Filters.text | Filters.status_update)(update)
update.message.group_chat_created = False
assert not (Filters.text | Filters.status_update)(update)
def test_and_or_filters(self, update):
update.message.text = 'test'
update.message.forward_date = datetime.datetime.utcnow()
assert (Filters.text & (Filters.status_update | Filters.forwarded))(update)
update.message.forward_date = None
assert not (Filters.text & (Filters.forwarded | Filters.status_update))(update)
update.message.pinned_message = True
assert Filters.text & (Filters.forwarded | Filters.status_update)(update)
assert (
str(Filters.text & (Filters.forwarded | Filters.entity(MessageEntity.MENTION)))
== '<Filters.text and <Filters.forwarded or '
'Filters.entity(mention)>>'
)
def test_xor_filters(self, update):
update.message.text = 'test'
update.effective_user.id = 123
assert not (Filters.text ^ Filters.user(123))(update)
update.message.text = None
update.effective_user.id = 1234
assert not (Filters.text ^ Filters.user(123))(update)
update.message.text = 'test'
assert (Filters.text ^ Filters.user(123))(update)
update.message.text = None
update.effective_user.id = 123
assert (Filters.text ^ Filters.user(123))(update)
def test_xor_filters_repr(self, update):
assert str(Filters.text ^ Filters.user(123)) == '<Filters.text xor Filters.user(123)>'
with pytest.raises(RuntimeError, match='Cannot set name'):
(Filters.text ^ Filters.user(123)).name = 'foo'
def test_and_xor_filters(self, update):
update.message.text = 'test'
update.message.forward_date = datetime.datetime.utcnow()
assert (Filters.forwarded & (Filters.text ^ Filters.user(123)))(update)
update.message.text = None
update.effective_user.id = 123
assert (Filters.forwarded & (Filters.text ^ Filters.user(123)))(update)
update.message.text = 'test'
assert not (Filters.forwarded & (Filters.text ^ Filters.user(123)))(update)
update.message.forward_date = None
update.message.text = None
update.effective_user.id = 123
assert not (Filters.forwarded & (Filters.text ^ Filters.user(123)))(update)
update.message.text = 'test'
update.effective_user.id = 456
assert not (Filters.forwarded & (Filters.text ^ Filters.user(123)))(update)
assert (
str(Filters.forwarded & (Filters.text ^ Filters.user(123)))
== '<Filters.forwarded and <Filters.text xor '
'Filters.user(123)>>'
)
def test_xor_regex_filters(self, update):
SRE_TYPE = type(re.match("", ""))
update.message.text = 'test'
update.message.forward_date = datetime.datetime.utcnow()
assert not (Filters.forwarded ^ Filters.regex('^test$'))(update)
update.message.forward_date = None
result = (Filters.forwarded ^ Filters.regex('^test$'))(update)
assert result
assert isinstance(result, dict)
matches = result['matches']
assert isinstance(matches, list)
assert type(matches[0]) is SRE_TYPE
update.message.forward_date = datetime.datetime.utcnow()
update.message.text = None
assert (Filters.forwarded ^ Filters.regex('^test$'))(update) is True
def test_inverted_filters(self, update):
update.message.text = '/test'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 5)]
assert Filters.command(update)
assert not (~Filters.command)(update)
update.message.text = 'test'
update.message.entities = []
assert not Filters.command(update)
assert (~Filters.command)(update)
def test_inverted_filters_repr(self, update):
assert str(~Filters.text) == '<inverted Filters.text>'
with pytest.raises(RuntimeError, match='Cannot set name'):
(~Filters.text).name = 'foo'
def test_inverted_and_filters(self, update):
update.message.text = '/test'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 5)]
update.message.forward_date = 1
assert (Filters.forwarded & Filters.command)(update)
assert not (~Filters.forwarded & Filters.command)(update)
assert not (Filters.forwarded & ~Filters.command)(update)
assert not (~(Filters.forwarded & Filters.command))(update)
update.message.forward_date = None
assert not (Filters.forwarded & Filters.command)(update)
assert (~Filters.forwarded & Filters.command)(update)
assert not (Filters.forwarded & ~Filters.command)(update)
assert (~(Filters.forwarded & Filters.command))(update)
update.message.text = 'test'
update.message.entities = []
assert not (Filters.forwarded & Filters.command)(update)
assert not (~Filters.forwarded & Filters.command)(update)
assert not (Filters.forwarded & ~Filters.command)(update)
assert (~(Filters.forwarded & Filters.command))(update)
def test_faulty_custom_filter(self, update):
class _CustomFilter(BaseFilter):
pass
with pytest.raises(TypeError, match='Can\'t instantiate abstract class _CustomFilter'):
_CustomFilter()
def test_custom_unnamed_filter(self, update, base_class):
class Unnamed(base_class):
def filter(self, mes):
return True
unnamed = Unnamed()
assert str(unnamed) == Unnamed.__name__
def test_update_type_message(self, update):
assert Filters.update.message(update)
assert not Filters.update.edited_message(update)
assert Filters.update.messages(update)
assert not Filters.update.channel_post(update)
assert not Filters.update.edited_channel_post(update)
assert not Filters.update.channel_posts(update)
assert Filters.update(update)
def test_update_type_edited_message(self, update):
update.edited_message, update.message = update.message, update.edited_message
assert not Filters.update.message(update)
assert Filters.update.edited_message(update)
assert Filters.update.messages(update)
assert not Filters.update.channel_post(update)
assert not Filters.update.edited_channel_post(update)
assert not Filters.update.channel_posts(update)
assert Filters.update(update)
def test_update_type_channel_post(self, update):
update.channel_post, update.message = update.message, update.edited_message
assert not Filters.update.message(update)
assert not Filters.update.edited_message(update)
assert not Filters.update.messages(update)
assert Filters.update.channel_post(update)
assert not Filters.update.edited_channel_post(update)
assert Filters.update.channel_posts(update)
assert Filters.update(update)
def test_update_type_edited_channel_post(self, update):
update.edited_channel_post, update.message = update.message, update.edited_message
assert not Filters.update.message(update)
assert not Filters.update.edited_message(update)
assert not Filters.update.messages(update)
assert not Filters.update.channel_post(update)
assert Filters.update.edited_channel_post(update)
assert Filters.update.channel_posts(update)
assert Filters.update(update)
def test_merged_short_circuit_and(self, update, base_class):
update.message.text = '/test'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 5)]
class TestException(Exception):
pass
class RaisingFilter(base_class):
def filter(self, _):
raise TestException
raising_filter = RaisingFilter()
with pytest.raises(TestException):
(Filters.command & raising_filter)(update)
update.message.text = 'test'
update.message.entities = []
(Filters.command & raising_filter)(update)
def test_merged_filters_repr(self, update):
with pytest.raises(RuntimeError, match='Cannot set name'):
(Filters.text & Filters.photo).name = 'foo'
def test_merged_short_circuit_or(self, update, base_class):
update.message.text = 'test'
class TestException(Exception):
pass
class RaisingFilter(base_class):
def filter(self, _):
raise TestException
raising_filter = RaisingFilter()
with pytest.raises(TestException):
(Filters.command | raising_filter)(update)
update.message.text = '/test'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 5)]
(Filters.command | raising_filter)(update)
def test_merged_data_merging_and(self, update, base_class):
update.message.text = '/test'
update.message.entities = [MessageEntity(MessageEntity.BOT_COMMAND, 0, 5)]
class DataFilter(base_class):
data_filter = True
def __init__(self, data):
self.data = data
def filter(self, _):
return {'test': [self.data]}
result = (Filters.command & DataFilter('blah'))(update)
assert result['test'] == ['blah']
result = (DataFilter('blah1') & DataFilter('blah2'))(update)
assert result['test'] == ['blah1', 'blah2']
update.message.text = 'test'
update.message.entities = []
result = (Filters.command & DataFilter('blah'))(update)
assert not result
def test_merged_data_merging_or(self, update, base_class):
update.message.text = '/test'
class DataFilter(base_class):
data_filter = True
def __init__(self, data):
self.data = data
def filter(self, _):
return {'test': [self.data]}
result = (Filters.command | DataFilter('blah'))(update)
assert result
result = (DataFilter('blah1') | DataFilter('blah2'))(update)
assert result['test'] == ['blah1']
update.message.text = 'test'
result = (Filters.command | DataFilter('blah'))(update)
assert result['test'] == ['blah']
def test_filters_via_bot_init(self):
with pytest.raises(RuntimeError, match='in conjunction with'):
Filters.via_bot(bot_id=1, username='bot')
def test_filters_via_bot_allow_empty(self, update):
assert not Filters.via_bot()(update)
assert Filters.via_bot(allow_empty=True)(update)
def test_filters_via_bot_id(self, update):
assert not Filters.via_bot(bot_id=1)(update)
update.message.via_bot.id = 1
assert Filters.via_bot(bot_id=1)(update)
update.message.via_bot.id = 2
assert Filters.via_bot(bot_id=[1, 2])(update)
assert not Filters.via_bot(bot_id=[3, 4])(update)
update.message.via_bot = None
assert not Filters.via_bot(bot_id=[3, 4])(update)
def test_filters_via_bot_username(self, update):
assert not Filters.via_bot(username='bot')(update)
assert not Filters.via_bot(username='Testbot')(update)
update.message.via_bot.username = 'bot@'
assert Filters.via_bot(username='@bot@')(update)
assert Filters.via_bot(username='bot@')(update)
assert Filters.via_bot(username=['bot1', 'bot@', 'bot2'])(update)
assert not Filters.via_bot(username=['@username', '@bot_2'])(update)
update.message.via_bot = None
assert not Filters.user(username=['@username', '@bot_2'])(update)
def test_filters_via_bot_change_id(self, update):
f = Filters.via_bot(bot_id=3)
assert f.bot_ids == {3}
update.message.via_bot.id = 3
assert f(update)
update.message.via_bot.id = 2
assert not f(update)
f.bot_ids = 2
assert f.bot_ids == {2}
assert f(update)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.usernames = 'user'
def test_filters_via_bot_change_username(self, update):
f = Filters.via_bot(username='bot')
update.message.via_bot.username = 'bot'
assert f(update)
update.message.via_bot.username = 'Bot'
assert not f(update)
f.usernames = 'Bot'
assert f(update)
with pytest.raises(RuntimeError, match='bot_id in conjunction'):
f.bot_ids = 1
def test_filters_via_bot_add_user_by_name(self, update):
users = ['bot_a', 'bot_b', 'bot_c']
f = Filters.via_bot()
for user in users:
update.message.via_bot.username = user
assert not f(update)
f.add_usernames('bot_a')
f.add_usernames(['bot_b', 'bot_c'])
for user in users:
update.message.via_bot.username = user
assert f(update)
with pytest.raises(RuntimeError, match='bot_id in conjunction'):
f.add_bot_ids(1)
def test_filters_via_bot_add_user_by_id(self, update):
users = [1, 2, 3]
f = Filters.via_bot()
for user in users:
update.message.via_bot.id = user
assert not f(update)
f.add_bot_ids(1)
f.add_bot_ids([2, 3])
for user in users:
update.message.via_bot.username = user
assert f(update)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.add_usernames('bot')
def test_filters_via_bot_remove_user_by_name(self, update):
users = ['bot_a', 'bot_b', 'bot_c']
f = Filters.via_bot(username=users)
with pytest.raises(RuntimeError, match='bot_id in conjunction'):
f.remove_bot_ids(1)
for user in users:
update.message.via_bot.username = user
assert f(update)
f.remove_usernames('bot_a')
f.remove_usernames(['bot_b', 'bot_c'])
for user in users:
update.message.via_bot.username = user
assert not f(update)
def test_filters_via_bot_remove_user_by_id(self, update):
users = [1, 2, 3]
f = Filters.via_bot(bot_id=users)
with pytest.raises(RuntimeError, match='username in conjunction'):
f.remove_usernames('bot')
for user in users:
update.message.via_bot.id = user
assert f(update)
f.remove_bot_ids(1)
f.remove_bot_ids([2, 3])
for user in users:
update.message.via_bot.username = user
assert not f(update)
def test_filters_via_bot_repr(self):
f = Filters.via_bot([1, 2])
assert str(f) == 'Filters.via_bot(1, 2)'
f.remove_bot_ids(1)
f.remove_bot_ids(2)
assert str(f) == 'Filters.via_bot()'
f.add_usernames('@foobar')
assert str(f) == 'Filters.via_bot(foobar)'
f.add_usernames('@barfoo')
assert str(f).startswith('Filters.via_bot(')
# we don't know th exact order
assert 'barfoo' in str(f) and 'foobar' in str(f)
with pytest.raises(RuntimeError, match='Cannot set name'):
f.name = 'foo'
def test_filters_attachment(self, update):
assert not Filters.attachment(update)
# we need to define a new Update (or rather, message class) here because
# effective_attachment is only evaluated once per instance, and the filter relies on that
up = Update(
0,
Message(
0,
datetime.datetime.utcnow(),
Chat(0, 'private'),
document=Document("str", "other_str"),
),
)
assert Filters.attachment(up)
| lgpl-3.0 |
emmuchira/kps_erp | erpnext/patches/v6_4/fix_expense_included_in_valuation.py | 29 | 3199 | # Copyright (c) 2013, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import print_function, unicode_literals
import frappe
from frappe.utils import cstr
def execute():
for company in frappe.db.sql("select name, expenses_included_in_valuation from tabCompany", as_dict=1):
frozen_date = get_frozen_date(company.name, company.expenses_included_in_valuation)
# Purchase Invoices after frozen date
# which are not against Receipt, but valuation related tax is there
pi_list = frappe.db.sql("""
select distinct pi.name
from `tabPurchase Invoice` pi, `tabPurchase Invoice Item` pi_item
where
pi.name = pi_item.parent
and pi.company = %s
and pi.posting_date > %s
and pi.docstatus = 1
and pi.is_opening = 'No'
and (pi_item.item_tax_amount is not null and pi_item.item_tax_amount > 0)
and (pi_item.purchase_receipt is null or pi_item.purchase_receipt = '')
and (pi_item.item_code is not null and pi_item.item_code != '')
and exists(select name from `tabItem` where name=pi_item.item_code and is_stock_item=1)
""", (company.name, frozen_date), as_dict=1)
for pi in pi_list:
# Check whether gle exists for Expenses Included in Valuation account against the PI
gle_for_expenses_included_in_valuation = frappe.db.sql("""select name from `tabGL Entry`
where voucher_type='Purchase Invoice' and voucher_no=%s and account=%s""",
(pi.name, company.expenses_included_in_valuation))
if gle_for_expenses_included_in_valuation:
print(pi.name)
frappe.db.sql("""delete from `tabGL Entry`
where voucher_type='Purchase Invoice' and voucher_no=%s""", pi.name)
purchase_invoice = frappe.get_doc("Purchase Invoice", pi.name)
# some old entries have missing expense accounts
if purchase_invoice.against_expense_account:
expense_account = purchase_invoice.against_expense_account.split(",")
if len(expense_account) == 1:
expense_account = expense_account[0]
for item in purchase_invoice.items:
if not item.expense_account:
item.db_set("expense_account", expense_account, update_modified=False)
purchase_invoice.make_gl_entries()
def get_frozen_date(company, account):
# Accounting frozen upto
accounts_frozen_upto = frappe.db.get_single_value("Accounts Settings", "acc_frozen_upto")
# Last adjustment entry to correct Expenses Included in Valuation account balance
last_adjustment_entry = frappe.db.sql("""select posting_date from `tabGL Entry`
where account=%s and company=%s and voucher_type = 'Journal Entry'
order by posting_date desc limit 1""", (account, company))
last_adjustment_date = cstr(last_adjustment_entry[0][0]) if last_adjustment_entry else None
# Last period closing voucher
last_closing_entry = frappe.db.sql("""select posting_date from `tabGL Entry`
where company=%s and voucher_type = 'Period Closing Voucher'
order by posting_date desc limit 1""", company)
last_closing_date = cstr(last_closing_entry[0][0]) if last_closing_entry else None
frozen_date = max([accounts_frozen_upto, last_adjustment_date, last_closing_date])
return frozen_date or '1900-01-01'
| gpl-3.0 |
bolkedebruin/airflow | airflow/providers/ftp/hooks/ftp.py | 2 | 10380 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import datetime
import ftplib
import os.path
from airflow.hooks.base_hook import BaseHook
def mlsd(conn, path="", facts=None):
"""
BACKPORT FROM PYTHON3 FTPLIB.
List a directory in a standardized format by using MLSD
command (RFC-3659). If path is omitted the current directory
is assumed. "facts" is a list of strings representing the type
of information desired (e.g. ["type", "size", "perm"]).
Return a generator object yielding a tuple of two elements
for every file found in path.
First element is the file name, the second one is a dictionary
including a variable number of "facts" depending on the server
and whether "facts" argument has been provided.
"""
facts = facts or []
if facts:
conn.sendcmd("OPTS MLST " + ";".join(facts) + ";")
if path:
cmd = "MLSD %s" % path
else:
cmd = "MLSD"
lines = []
conn.retrlines(cmd, lines.append)
for line in lines:
facts_found, _, name = line.rstrip(ftplib.CRLF).partition(' ')
entry = {}
for fact in facts_found[:-1].split(";"):
key, _, value = fact.partition("=")
entry[key.lower()] = value
yield (name, entry)
class FTPHook(BaseHook):
"""
Interact with FTP.
Errors that may occur throughout but should be handled downstream.
You can specify mode for data transfers in the extra field of your
connection as ``{"passive": "true"}``.
"""
def __init__(self, ftp_conn_id='ftp_default'):
self.ftp_conn_id = ftp_conn_id
self.conn = None
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.conn is not None:
self.close_conn()
def get_conn(self):
"""
Returns a FTP connection object
"""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
pasv = params.extra_dejson.get("passive", True)
self.conn = ftplib.FTP(params.host, params.login, params.password)
self.conn.set_pasv(pasv)
return self.conn
def close_conn(self):
"""
Closes the connection. An error will occur if the
connection wasn't ever opened.
"""
conn = self.conn
conn.quit()
self.conn = None
def describe_directory(self, path):
"""
Returns a dictionary of {filename: {attributes}} for all files
on the remote system (where the MLSD command is supported).
:param path: full path to the remote directory
:type path: str
"""
conn = self.get_conn()
conn.cwd(path)
try:
# only works in Python 3
files = dict(conn.mlsd())
except AttributeError:
files = dict(mlsd(conn))
return files
def list_directory(self, path, nlst=False):
"""
Returns a list of files on the remote system.
:param path: full path to the remote directory to list
:type path: str
"""
conn = self.get_conn()
conn.cwd(path)
files = conn.nlst()
return files
def create_directory(self, path):
"""
Creates a directory on the remote system.
:param path: full path to the remote directory to create
:type path: str
"""
conn = self.get_conn()
conn.mkd(path)
def delete_directory(self, path):
"""
Deletes a directory on the remote system.
:param path: full path to the remote directory to delete
:type path: str
"""
conn = self.get_conn()
conn.rmd(path)
def retrieve_file(
self,
remote_full_path,
local_full_path_or_buffer,
callback=None):
"""
Transfers the remote file to a local location.
If local_full_path_or_buffer is a string path, the file will be put
at that location; if it is a file-like buffer, the file will
be written to the buffer but not closed.
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:type local_full_path_or_buffer: str or file-like buffer
:param callback: callback which is called each time a block of data
is read. if you do not use a callback, these blocks will be written
to the file or buffer passed in. if you do pass in a callback, note
that writing to a file or buffer will need to be handled inside the
callback.
[default: output_handle.write()]
:type callback: callable
.. code-block:: python
hook = FTPHook(ftp_conn_id='my_conn')
remote_path = '/path/to/remote/file'
local_path = '/path/to/local/file'
# with a custom callback (in this case displaying progress on each read)
def print_progress(percent_progress):
self.log.info('Percent Downloaded: %s%%' % percent_progress)
total_downloaded = 0
total_file_size = hook.get_size(remote_path)
output_handle = open(local_path, 'wb')
def write_to_file_with_progress(data):
total_downloaded += len(data)
output_handle.write(data)
percent_progress = (total_downloaded / total_file_size) * 100
print_progress(percent_progress)
hook.retrieve_file(remote_path, None, callback=write_to_file_with_progress)
# without a custom callback data is written to the local_path
hook.retrieve_file(remote_path, local_path)
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, str)
# without a callback, default to writing to a user-provided file or
# file-like buffer
if not callback:
if is_path:
output_handle = open(local_full_path_or_buffer, 'wb')
else:
output_handle = local_full_path_or_buffer
callback = output_handle.write
else:
output_handle = None
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
self.log.info('Retrieving file from FTP: %s', remote_full_path)
conn.retrbinary('RETR %s' % remote_file_name, callback)
self.log.info('Finished retrieving file from FTP: %s', remote_full_path)
if is_path and output_handle:
output_handle.close()
def store_file(self, remote_full_path, local_full_path_or_buffer):
"""
Transfers a local file to the remote location.
If local_full_path_or_buffer is a string path, the file will be read
from that location; if it is a file-like buffer, the file will
be read from the buffer but not closed.
:param remote_full_path: full path to the remote file
:type remote_full_path: str
:param local_full_path_or_buffer: full path to the local file or a
file-like buffer
:type local_full_path_or_buffer: str or file-like buffer
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, str)
if is_path:
input_handle = open(local_full_path_or_buffer, 'rb')
else:
input_handle = local_full_path_or_buffer
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
conn.storbinary('STOR %s' % remote_file_name, input_handle)
if is_path:
input_handle.close()
def delete_file(self, path):
"""
Removes a file on the FTP Server.
:param path: full path to the remote file
:type path: str
"""
conn = self.get_conn()
conn.delete(path)
def rename(self, from_name, to_name):
"""
Rename a file.
:param from_name: rename file from name
:param to_name: rename file to name
"""
conn = self.get_conn()
return conn.rename(from_name, to_name)
def get_mod_time(self, path):
"""
Returns a datetime object representing the last time the file was modified
:param path: remote file path
:type path: str
"""
conn = self.get_conn()
ftp_mdtm = conn.sendcmd('MDTM ' + path)
time_val = ftp_mdtm[4:]
# time_val optionally has microseconds
try:
return datetime.datetime.strptime(time_val, "%Y%m%d%H%M%S.%f")
except ValueError:
return datetime.datetime.strptime(time_val, '%Y%m%d%H%M%S')
def get_size(self, path):
"""
Returns the size of a file (in bytes)
:param path: remote file path
:type path: str
"""
conn = self.get_conn()
return conn.size(path)
class FTPSHook(FTPHook):
def get_conn(self):
"""
Returns a FTPS connection object.
"""
if self.conn is None:
params = self.get_connection(self.ftp_conn_id)
pasv = params.extra_dejson.get("passive", True)
if params.port:
ftplib.FTP_TLS.port = params.port
self.conn = ftplib.FTP_TLS(
params.host, params.login, params.password
)
self.conn.set_pasv(pasv)
return self.conn
| apache-2.0 |
nugget/home-assistant | homeassistant/components/device_tracker/xiaomi.py | 18 | 5820 | """
Support for Xiaomi Mi routers.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/device_tracker.xiaomi/
"""
import logging
import requests
import voluptuous as vol
import homeassistant.helpers.config_validation as cv
from homeassistant.components.device_tracker import (
DOMAIN, PLATFORM_SCHEMA, DeviceScanner)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME, default='admin'): cv.string,
vol.Required(CONF_PASSWORD): cv.string
})
def get_scanner(hass, config):
"""Validate the configuration and return a Xiaomi Device Scanner."""
scanner = XiaomiDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class XiaomiDeviceScanner(DeviceScanner):
"""This class queries a Xiaomi Mi router.
Adapted from Luci scanner.
"""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.last_results = {}
self.token = _get_token(self.host, self.username, self.password)
self.mac2name = None
self.success_init = self.token is not None
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return self.last_results
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if self.mac2name is None:
result = self._retrieve_list_with_retry()
if result:
hosts = [x for x in result
if 'mac' in x and 'name' in x]
mac2name_list = [
(x['mac'].upper(), x['name']) for x in hosts]
self.mac2name = dict(mac2name_list)
else:
# Error, handled in the _retrieve_list_with_retry
return
return self.mac2name.get(device.upper(), None)
def _update_info(self):
"""Ensure the information from the router are up to date.
Returns true if scanning successful.
"""
if not self.success_init:
return False
result = self._retrieve_list_with_retry()
if result:
self._store_result(result)
return True
return False
def _retrieve_list_with_retry(self):
"""Retrieve the device list with a retry if token is invalid.
Return the list if successful.
"""
_LOGGER.info("Refreshing device list")
result = _retrieve_list(self.host, self.token)
if result:
return result
_LOGGER.info("Refreshing token and retrying device list refresh")
self.token = _get_token(self.host, self.username, self.password)
return _retrieve_list(self.host, self.token)
def _store_result(self, result):
"""Extract and store the device list in self.last_results."""
self.last_results = []
for device_entry in result:
# Check if the device is marked as connected
if int(device_entry['online']) == 1:
self.last_results.append(device_entry['mac'])
def _retrieve_list(host, token, **kwargs):
"""Get device list for the given host."""
url = "http://{}/cgi-bin/luci/;stok={}/api/misystem/devicelist"
url = url.format(host, token)
try:
res = requests.get(url, timeout=5, **kwargs)
except requests.exceptions.Timeout:
_LOGGER.exception(
"Connection to the router timed out at URL %s", url)
return
if res.status_code != 200:
_LOGGER.exception(
"Connection failed with http code %s", res.status_code)
return
try:
result = res.json()
except ValueError:
# If json decoder could not parse the response
_LOGGER.exception("Failed to parse response from mi router")
return
try:
xiaomi_code = result['code']
except KeyError:
_LOGGER.exception(
"No field code in response from mi router. %s", result)
return
if xiaomi_code == 0:
try:
return result['list']
except KeyError:
_LOGGER.exception("No list in response from mi router. %s", result)
return
else:
_LOGGER.info(
"Receive wrong Xiaomi code %s, expected 0 in response %s",
xiaomi_code, result)
return
def _get_token(host, username, password):
"""Get authentication token for the given host+username+password."""
url = 'http://{}/cgi-bin/luci/api/xqsystem/login'.format(host)
data = {'username': username, 'password': password}
try:
res = requests.post(url, data=data, timeout=5)
except requests.exceptions.Timeout:
_LOGGER.exception("Connection to the router timed out")
return
if res.status_code == 200:
try:
result = res.json()
except ValueError:
# If JSON decoder could not parse the response
_LOGGER.exception("Failed to parse response from mi router")
return
try:
return result['token']
except KeyError:
error_message = "Xiaomi token cannot be refreshed, response from "\
+ "url: [%s] \nwith parameter: [%s] \nwas: [%s]"
_LOGGER.exception(error_message, url, data, result)
return
else:
_LOGGER.error('Invalid response: [%s] at url: [%s] with data [%s]',
res, url, data)
| apache-2.0 |
jscn/django | tests/aggregation/tests.py | 31 | 45900 | from __future__ import unicode_literals
import datetime
import re
from decimal import Decimal
from django.core.exceptions import FieldError
from django.db import connection
from django.db.models import (
Avg, Count, DecimalField, DurationField, F, FloatField, Func, IntegerField,
Max, Min, Sum, Value,
)
from django.test import TestCase
from django.test.utils import Approximate, CaptureQueriesContext
from django.utils import timezone
from .models import Author, Book, Publisher, Store
class AggregateTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.a1 = Author.objects.create(name='Adrian Holovaty', age=34)
cls.a2 = Author.objects.create(name='Jacob Kaplan-Moss', age=35)
cls.a3 = Author.objects.create(name='Brad Dayley', age=45)
cls.a4 = Author.objects.create(name='James Bennett', age=29)
cls.a5 = Author.objects.create(name='Jeffrey Forcier', age=37)
cls.a6 = Author.objects.create(name='Paul Bissex', age=29)
cls.a7 = Author.objects.create(name='Wesley J. Chun', age=25)
cls.a8 = Author.objects.create(name='Peter Norvig', age=57)
cls.a9 = Author.objects.create(name='Stuart Russell', age=46)
cls.a1.friends.add(cls.a2, cls.a4)
cls.a2.friends.add(cls.a1, cls.a7)
cls.a4.friends.add(cls.a1)
cls.a5.friends.add(cls.a6, cls.a7)
cls.a6.friends.add(cls.a5, cls.a7)
cls.a7.friends.add(cls.a2, cls.a5, cls.a6)
cls.a8.friends.add(cls.a9)
cls.a9.friends.add(cls.a8)
cls.p1 = Publisher.objects.create(name='Apress', num_awards=3, duration=datetime.timedelta(days=1))
cls.p2 = Publisher.objects.create(name='Sams', num_awards=1, duration=datetime.timedelta(days=2))
cls.p3 = Publisher.objects.create(name='Prentice Hall', num_awards=7)
cls.p4 = Publisher.objects.create(name='Morgan Kaufmann', num_awards=9)
cls.p5 = Publisher.objects.create(name="Jonno's House of Books", num_awards=0)
cls.b1 = Book.objects.create(
isbn='159059725', name='The Definitive Guide to Django: Web Development Done Right',
pages=447, rating=4.5, price=Decimal('30.00'), contact=cls.a1, publisher=cls.p1,
pubdate=datetime.date(2007, 12, 6)
)
cls.b2 = Book.objects.create(
isbn='067232959', name='Sams Teach Yourself Django in 24 Hours',
pages=528, rating=3.0, price=Decimal('23.09'), contact=cls.a3, publisher=cls.p2,
pubdate=datetime.date(2008, 3, 3)
)
cls.b3 = Book.objects.create(
isbn='159059996', name='Practical Django Projects',
pages=300, rating=4.0, price=Decimal('29.69'), contact=cls.a4, publisher=cls.p1,
pubdate=datetime.date(2008, 6, 23)
)
cls.b4 = Book.objects.create(
isbn='013235613', name='Python Web Development with Django',
pages=350, rating=4.0, price=Decimal('29.69'), contact=cls.a5, publisher=cls.p3,
pubdate=datetime.date(2008, 11, 3)
)
cls.b5 = Book.objects.create(
isbn='013790395', name='Artificial Intelligence: A Modern Approach',
pages=1132, rating=4.0, price=Decimal('82.80'), contact=cls.a8, publisher=cls.p3,
pubdate=datetime.date(1995, 1, 15)
)
cls.b6 = Book.objects.create(
isbn='155860191', name='Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp',
pages=946, rating=5.0, price=Decimal('75.00'), contact=cls.a8, publisher=cls.p4,
pubdate=datetime.date(1991, 10, 15)
)
cls.b1.authors.add(cls.a1, cls.a2)
cls.b2.authors.add(cls.a3)
cls.b3.authors.add(cls.a4)
cls.b4.authors.add(cls.a5, cls.a6, cls.a7)
cls.b5.authors.add(cls.a8, cls.a9)
cls.b6.authors.add(cls.a8)
s1 = Store.objects.create(
name='Amazon.com',
original_opening=datetime.datetime(1994, 4, 23, 9, 17, 42),
friday_night_closing=datetime.time(23, 59, 59)
)
s2 = Store.objects.create(
name='Books.com',
original_opening=datetime.datetime(2001, 3, 15, 11, 23, 37),
friday_night_closing=datetime.time(23, 59, 59)
)
s3 = Store.objects.create(
name="Mamma and Pappa's Books",
original_opening=datetime.datetime(1945, 4, 25, 16, 24, 14),
friday_night_closing=datetime.time(21, 30)
)
s1.books.add(cls.b1, cls.b2, cls.b3, cls.b4, cls.b5, cls.b6)
s2.books.add(cls.b1, cls.b3, cls.b5, cls.b6)
s3.books.add(cls.b3, cls.b4, cls.b6)
def test_empty_aggregate(self):
self.assertEqual(Author.objects.all().aggregate(), {})
def test_aggregate_in_order_by(self):
msg = (
'Using an aggregate in order_by() without also including it in '
'annotate() is not allowed: Avg(F(book__rating)'
)
with self.assertRaisesMessage(FieldError, msg):
Author.objects.values('age').order_by(Avg('book__rating'))
def test_single_aggregate(self):
vals = Author.objects.aggregate(Avg("age"))
self.assertEqual(vals, {"age__avg": Approximate(37.4, places=1)})
def test_multiple_aggregates(self):
vals = Author.objects.aggregate(Sum("age"), Avg("age"))
self.assertEqual(vals, {"age__sum": 337, "age__avg": Approximate(37.4, places=1)})
def test_filter_aggregate(self):
vals = Author.objects.filter(age__gt=29).aggregate(Sum("age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["age__sum"], 254)
def test_related_aggregate(self):
vals = Author.objects.aggregate(Avg("friends__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["friends__age__avg"], 34.07, places=2)
vals = Book.objects.filter(rating__lt=4.5).aggregate(Avg("authors__age"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["authors__age__avg"], 38.2857, places=2)
vals = Author.objects.all().filter(name__contains="a").aggregate(Avg("book__rating"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__rating__avg"], 4.0)
vals = Book.objects.aggregate(Sum("publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["publisher__num_awards__sum"], 30)
vals = Publisher.objects.aggregate(Sum("book__price"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__price__sum"], Decimal("270.27"))
def test_aggregate_multi_join(self):
vals = Store.objects.aggregate(Max("books__authors__age"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["books__authors__age__max"], 57)
vals = Author.objects.aggregate(Min("book__publisher__num_awards"))
self.assertEqual(len(vals), 1)
self.assertEqual(vals["book__publisher__num_awards__min"], 1)
def test_aggregate_alias(self):
vals = Store.objects.filter(name="Amazon.com").aggregate(amazon_mean=Avg("books__rating"))
self.assertEqual(len(vals), 1)
self.assertAlmostEqual(vals["amazon_mean"], 4.08, places=2)
def test_annotate_basic(self):
self.assertQuerysetEqual(
Book.objects.annotate().order_by('pk'), [
"The Definitive Guide to Django: Web Development Done Right",
"Sams Teach Yourself Django in 24 Hours",
"Practical Django Projects",
"Python Web Development with Django",
"Artificial Intelligence: A Modern Approach",
"Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp"
],
lambda b: b.name
)
books = Book.objects.annotate(mean_age=Avg("authors__age"))
b = books.get(pk=self.b1.pk)
self.assertEqual(
b.name,
'The Definitive Guide to Django: Web Development Done Right'
)
self.assertEqual(b.mean_age, 34.5)
def test_annotate_defer(self):
qs = Book.objects.annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
def test_annotate_defer_select_related(self):
qs = Book.objects.select_related('contact').annotate(
page_sum=Sum("pages")).defer('name').filter(pk=self.b1.pk)
rows = [
(1, "159059725", 447, "Adrian Holovaty",
"The Definitive Guide to Django: Web Development Done Right")
]
self.assertQuerysetEqual(
qs.order_by('pk'), rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name)
)
def test_annotate_m2m(self):
books = Book.objects.filter(rating__lt=4.5).annotate(Avg("authors__age")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 51.5),
('Practical Django Projects', 29.0),
('Python Web Development with Django', Approximate(30.3, places=1)),
('Sams Teach Yourself Django in 24 Hours', 45.0)
],
lambda b: (b.name, b.authors__age__avg),
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 2),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 1),
('Practical Django Projects', 1),
('Python Web Development with Django', 3),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 2)
],
lambda b: (b.name, b.num_authors)
)
def test_backwards_m2m_annotate(self):
authors = Author.objects.filter(name__contains="a").annotate(Avg("book__rating")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 4.5),
('Brad Dayley', 3.0),
('Jacob Kaplan-Moss', 4.5),
('James Bennett', 4.0),
('Paul Bissex', 4.0),
('Stuart Russell', 4.0)
],
lambda a: (a.name, a.book__rating__avg)
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 1),
('Brad Dayley', 1),
('Jacob Kaplan-Moss', 1),
('James Bennett', 1),
('Jeffrey Forcier', 1),
('Paul Bissex', 1),
('Peter Norvig', 2),
('Stuart Russell', 1),
('Wesley J. Chun', 1)
],
lambda a: (a.name, a.num_books)
)
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
books, [
('Artificial Intelligence: A Modern Approach', 7),
('Paradigms of Artificial Intelligence Programming: Case Studies in Common Lisp', 9),
('Practical Django Projects', 3),
('Python Web Development with Django', 7),
('Sams Teach Yourself Django in 24 Hours', 1),
('The Definitive Guide to Django: Web Development Done Right', 3)
],
lambda b: (b.name, b.publisher__num_awards__sum)
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
publishers, [
('Apress', Decimal("59.69")),
("Jonno's House of Books", None),
('Morgan Kaufmann', Decimal("75.00")),
('Prentice Hall', Decimal("112.49")),
('Sams', Decimal("23.09"))
],
lambda p: (p.name, p.book__price__sum)
)
def test_annotate_values(self):
books = list(Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values())
self.assertEqual(
books, [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg('authors__age'))
.values('pk', 'isbn', 'mean_age')
)
self.assertEqual(
list(books), [
{
"pk": 1,
"isbn": "159059725",
"mean_age": 34.5,
}
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values("name")
self.assertEqual(
list(books), [
{
"name": "The Definitive Guide to Django: Web Development Done Right"
}
]
)
books = Book.objects.filter(pk=self.b1.pk).values().annotate(mean_age=Avg('authors__age'))
self.assertEqual(
list(books), [
{
"contact_id": 1,
"id": 1,
"isbn": "159059725",
"mean_age": 34.5,
"name": "The Definitive Guide to Django: Web Development Done Right",
"pages": 447,
"price": Approximate(Decimal("30")),
"pubdate": datetime.date(2007, 12, 6),
"publisher_id": 1,
"rating": 4.5,
}
]
)
books = (
Book.objects
.values("rating")
.annotate(n_authors=Count("authors__id"), mean_age=Avg("authors__age"))
.order_by("rating")
)
self.assertEqual(
list(books), [
{
"rating": 3.0,
"n_authors": 1,
"mean_age": 45.0,
},
{
"rating": 4.0,
"n_authors": 6,
"mean_age": Approximate(37.16, places=1)
},
{
"rating": 4.5,
"n_authors": 2,
"mean_age": 34.5,
},
{
"rating": 5.0,
"n_authors": 1,
"mean_age": 57.0,
}
]
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertEqual(len(authors), 9)
self.assertQuerysetEqual(
authors, [
('Adrian Holovaty', 32.0),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 29.5),
('James Bennett', 34.0),
('Jeffrey Forcier', 27.0),
('Paul Bissex', 31.0),
('Peter Norvig', 46.0),
('Stuart Russell', 57.0),
('Wesley J. Chun', Approximate(33.66, places=1))
],
lambda a: (a.name, a.friends__age__avg)
)
def test_count(self):
vals = Book.objects.aggregate(Count("rating"))
self.assertEqual(vals, {"rating__count": 6})
vals = Book.objects.aggregate(Count("rating", distinct=True))
self.assertEqual(vals, {"rating__count": 4})
def test_count_star(self):
with self.assertNumQueries(1) as ctx:
Book.objects.aggregate(n=Count("*"))
sql = ctx.captured_queries[0]['sql']
self.assertIn('SELECT COUNT(*) ', sql)
def test_non_grouped_annotation_not_in_group_by(self):
"""
An annotation not included in values() before an aggregate should be
excluded from the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating')
.annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
)
self.assertEqual(
list(qs), [
{'rating': 4.0, 'count': 2},
]
)
def test_grouped_annotation_in_group_by(self):
"""
An annotation included in values() before an aggregate should be
included in the group by clause.
"""
qs = (
Book.objects.annotate(xprice=F('price')).filter(rating=4.0).values('rating', 'xprice')
.annotate(count=Count('publisher_id', distinct=True)).values('count', 'rating').order_by('count')
)
self.assertEqual(
list(qs), [
{'rating': 4.0, 'count': 1},
{'rating': 4.0, 'count': 2},
]
)
def test_fkey_aggregate(self):
explicit = list(Author.objects.annotate(Count('book__id')))
implicit = list(Author.objects.annotate(Count('book')))
self.assertEqual(explicit, implicit)
def test_annotate_ordering(self):
books = Book.objects.values('rating').annotate(oldest=Max('authors__age')).order_by('oldest', 'rating')
self.assertEqual(
list(books), [
{
"rating": 4.5,
"oldest": 35,
},
{
"rating": 3.0,
"oldest": 45
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 5.0,
"oldest": 57,
}
]
)
books = Book.objects.values("rating").annotate(oldest=Max("authors__age")).order_by("-oldest", "-rating")
self.assertEqual(
list(books), [
{
"rating": 5.0,
"oldest": 57,
},
{
"rating": 4.0,
"oldest": 57,
},
{
"rating": 3.0,
"oldest": 45,
},
{
"rating": 4.5,
"oldest": 35,
}
]
)
def test_aggregate_annotation(self):
vals = Book.objects.annotate(num_authors=Count("authors__id")).aggregate(Avg("num_authors"))
self.assertEqual(vals, {"num_authors__avg": Approximate(1.66, places=1)})
def test_avg_duration_field(self):
# Explicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg('duration', output_field=DurationField())),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
# Implicit `output_field`.
self.assertEqual(
Publisher.objects.aggregate(Avg('duration')),
{'duration__avg': datetime.timedelta(days=1, hours=12)}
)
def test_sum_duration_field(self):
self.assertEqual(
Publisher.objects.aggregate(Sum('duration', output_field=DurationField())),
{'duration__sum': datetime.timedelta(days=3)}
)
def test_sum_distinct_aggregate(self):
"""
Sum on a distinct() QuerySet should aggregate only the distinct items.
"""
authors = Author.objects.filter(book__in=[5, 6])
self.assertEqual(authors.count(), 3)
distinct_authors = authors.distinct()
self.assertEqual(distinct_authors.count(), 2)
# Selected author ages are 57 and 46
age_sum = distinct_authors.aggregate(Sum('age'))
self.assertEqual(age_sum['age__sum'], 103)
def test_filtering(self):
p = Publisher.objects.create(name='Expensive Publisher', num_awards=0)
Book.objects.create(
name='ExpensiveBook1',
pages=1,
isbn='111',
rating=3.5,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 1)
)
Book.objects.create(
name='ExpensiveBook2',
pages=1,
isbn='222',
rating=4.0,
price=Decimal("1000"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 2)
)
Book.objects.create(
name='ExpensiveBook3',
pages=1,
isbn='333',
rating=4.5,
price=Decimal("35"),
publisher=p,
contact_id=1,
pubdate=datetime.date(2008, 12, 3)
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Apress",
"Sams",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = (
Publisher.objects
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = (
Publisher.objects
.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__range=[1, 2]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Sams",
"Prentice Hall",
"Morgan Kaufmann",
],
lambda p: p.name
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__in=[1, 3]).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Sams",
"Morgan Kaufmann",
"Expensive Publisher",
],
lambda p: p.name,
)
publishers = Publisher.objects.annotate(num_books=Count("book")).filter(num_books__isnull=True)
self.assertEqual(len(publishers), 0)
def test_annotation(self):
vals = Author.objects.filter(pk=self.a1.pk).aggregate(Count("friends__id"))
self.assertEqual(vals, {"friends__id__count": 2})
books = Book.objects.annotate(num_authors=Count("authors__name")).filter(num_authors__exact=2).order_by("pk")
self.assertQuerysetEqual(
books, [
"The Definitive Guide to Django: Web Development Done Right",
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
authors = (
Author.objects
.annotate(num_friends=Count("friends__id", distinct=True))
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerysetEqual(
authors, [
"Brad Dayley",
],
lambda a: a.name
)
publishers = Publisher.objects.annotate(num_books=Count("book__id")).filter(num_books__gt=1).order_by("pk")
self.assertQuerysetEqual(
publishers, [
"Apress",
"Prentice Hall",
],
lambda p: p.name
)
publishers = (
Publisher.objects
.filter(book__price__lt=Decimal("40.0"))
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerysetEqual(
publishers, [
"Apress",
],
lambda p: p.name
)
books = (
Book.objects
.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
)
self.assertQuerysetEqual(
books, [
"Artificial Intelligence: A Modern Approach",
],
lambda b: b.name
)
def test_more_aggregation(self):
a = Author.objects.get(name__contains='Norvig')
b = Book.objects.get(name__contains='Done Right')
b.authors.add(a)
b.save()
vals = (
Book.objects
.annotate(num_authors=Count("authors__id"))
.filter(authors__name__contains="Norvig", num_authors__gt=1)
.aggregate(Avg("rating"))
)
self.assertEqual(vals, {"rating__avg": 4.25})
def test_even_more_aggregate(self):
publishers = Publisher.objects.annotate(
earliest_book=Min("book__pubdate"),
).exclude(earliest_book=None).order_by("earliest_book").values(
'earliest_book',
'num_awards',
'id',
'name',
)
self.assertEqual(
list(publishers), [
{
'earliest_book': datetime.date(1991, 10, 15),
'num_awards': 9,
'id': 4,
'name': 'Morgan Kaufmann'
},
{
'earliest_book': datetime.date(1995, 1, 15),
'num_awards': 7,
'id': 3,
'name': 'Prentice Hall'
},
{
'earliest_book': datetime.date(2007, 12, 6),
'num_awards': 3,
'id': 1,
'name': 'Apress'
},
{
'earliest_book': datetime.date(2008, 3, 3),
'num_awards': 1,
'id': 2,
'name': 'Sams'
}
]
)
vals = Store.objects.aggregate(Max("friday_night_closing"), Min("original_opening"))
self.assertEqual(
vals,
{
"friday_night_closing__max": datetime.time(23, 59, 59),
"original_opening__min": datetime.datetime(1945, 4, 25, 16, 24, 14),
}
)
def test_annotate_values_list(self):
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("pk", "isbn", "mean_age")
)
self.assertEqual(
list(books), [
(1, "159059725", 34.5),
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("isbn")
self.assertEqual(
list(books), [
('159059725',)
]
)
books = Book.objects.filter(pk=self.b1.pk).annotate(mean_age=Avg("authors__age")).values_list("mean_age")
self.assertEqual(
list(books), [
(34.5,)
]
)
books = (
Book.objects
.filter(pk=self.b1.pk)
.annotate(mean_age=Avg("authors__age"))
.values_list("mean_age", flat=True)
)
self.assertEqual(list(books), [34.5])
books = Book.objects.values_list("price").annotate(count=Count("price")).order_by("-count", "price")
self.assertEqual(
list(books), [
(Decimal("29.69"), 2),
(Decimal('23.09'), 1),
(Decimal('30'), 1),
(Decimal('75'), 1),
(Decimal('82.8'), 1),
]
)
def test_dates_with_aggregation(self):
"""
Test that .dates() returns a distinct set of dates when applied to a
QuerySet with aggregation.
Refs #18056. Previously, .dates() would return distinct (date_kind,
aggregation) sets, in this case (year, num_authors), so 2008 would be
returned twice because there are books from 2008 with a different
number of authors.
"""
dates = Book.objects.annotate(num_authors=Count("authors")).dates('pubdate', 'year')
self.assertQuerysetEqual(
dates, [
"datetime.date(1991, 1, 1)",
"datetime.date(1995, 1, 1)",
"datetime.date(2007, 1, 1)",
"datetime.date(2008, 1, 1)"
]
)
def test_values_aggregation(self):
# Refs #20782
max_rating = Book.objects.values('rating').aggregate(max_rating=Max('rating'))
self.assertEqual(max_rating['max_rating'], 5)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id')
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3})
def test_ticket17424(self):
"""
Check that doing exclude() on a foreign model after annotate()
doesn't crash.
"""
all_books = list(Book.objects.values_list('pk', flat=True).order_by('pk'))
annotated_books = Book.objects.order_by('pk').annotate(one=Count("id"))
# The value doesn't matter, we just need any negative
# constraint on a related model that's a noop.
excluded_books = annotated_books.exclude(publisher__name="__UNLIKELY_VALUE__")
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
self.assertIsNone(excluded_books.query.alias_map["aggregation_book"].join_type)
def test_ticket12886(self):
"""
Check that aggregation over sliced queryset works correctly.
"""
qs = Book.objects.all().order_by('-rating')[0:3]
vals = qs.aggregate(average_top3_rating=Avg('rating'))['average_top3_rating']
self.assertAlmostEqual(vals, 4.5, places=2)
def test_ticket11881(self):
"""
Check that subqueries do not needlessly contain ORDER BY, SELECT FOR UPDATE
or select_related() stuff.
"""
qs = Book.objects.all().select_for_update().order_by(
'pk').select_related('publisher').annotate(max_pk=Max('pk'))
with CaptureQueriesContext(connection) as captured_queries:
qs.aggregate(avg_pk=Avg('max_pk'))
self.assertEqual(len(captured_queries), 1)
qstr = captured_queries[0]['sql'].lower()
self.assertNotIn('for update', qstr)
forced_ordering = connection.ops.force_no_ordering()
if forced_ordering:
# If the backend needs to force an ordering we make sure it's
# the only "ORDER BY" clause present in the query.
self.assertEqual(
re.findall(r'order by (\w+)', qstr),
[', '.join(f[1][0] for f in forced_ordering).lower()]
)
else:
self.assertNotIn('order by', qstr)
self.assertEqual(qstr.count(' join '), 0)
def test_decimal_max_digits_has_no_effect(self):
Book.objects.all().delete()
a1 = Author.objects.first()
p1 = Publisher.objects.first()
thedate = timezone.now()
for i in range(10):
Book.objects.create(
isbn="abcde{}".format(i), name="none", pages=10, rating=4.0,
price=9999.98, contact=a1, publisher=p1, pubdate=thedate)
book = Book.objects.aggregate(price_sum=Sum('price'))
self.assertEqual(book['price_sum'], Decimal("99999.80"))
def test_nonaggregate_aggregation_throws(self):
with self.assertRaisesMessage(TypeError, 'fail is not an aggregate expression'):
Book.objects.aggregate(fail=F('price'))
def test_nonfield_annotation(self):
book = Book.objects.annotate(val=Max(Value(2, output_field=IntegerField()))).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(Value(2), output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
book = Book.objects.annotate(val=Max(2, output_field=IntegerField())).first()
self.assertEqual(book.val, 2)
def test_missing_output_field_raises_error(self):
with self.assertRaisesMessage(FieldError, 'Cannot resolve expression type, unknown output_field'):
Book.objects.annotate(val=Max(2)).first()
def test_annotation_expressions(self):
authors = Author.objects.annotate(combined_ages=Sum(F('age') + F('friends__age'))).order_by('name')
authors2 = Author.objects.annotate(combined_ages=Sum('age') + Sum('friends__age')).order_by('name')
for qs in (authors, authors2):
self.assertEqual(len(qs), 9)
self.assertQuerysetEqual(
qs, [
('Adrian Holovaty', 132),
('Brad Dayley', None),
('Jacob Kaplan-Moss', 129),
('James Bennett', 63),
('Jeffrey Forcier', 128),
('Paul Bissex', 120),
('Peter Norvig', 103),
('Stuart Russell', 103),
('Wesley J. Chun', 176)
],
lambda a: (a.name, a.combined_ages)
)
def test_aggregation_expressions(self):
a1 = Author.objects.aggregate(av_age=Sum('age') / Count('*'))
a2 = Author.objects.aggregate(av_age=Sum('age') / Count('age'))
a3 = Author.objects.aggregate(av_age=Avg('age'))
self.assertEqual(a1, {'av_age': 37})
self.assertEqual(a2, {'av_age': 37})
self.assertEqual(a3, {'av_age': Approximate(37.4, places=1)})
def test_avg_decimal_field(self):
v = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price')))['avg_price']
self.assertIsInstance(v, float)
self.assertEqual(v, Approximate(47.39, places=2))
def test_order_of_precedence(self):
p1 = Book.objects.filter(rating=4).aggregate(avg_price=(Avg('price') + 2) * 3)
self.assertEqual(p1, {'avg_price': Approximate(148.18, places=2)})
p2 = Book.objects.filter(rating=4).aggregate(avg_price=Avg('price') + 2 * 3)
self.assertEqual(p2, {'avg_price': Approximate(53.39, places=2)})
def test_combine_different_types(self):
with self.assertRaisesMessage(FieldError, 'Expression contains mixed types. You must set output_field'):
Book.objects.annotate(sums=Sum('rating') + Sum('pages') + Sum('price')).get(pk=self.b4.pk)
b1 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=IntegerField())).get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
b2 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=FloatField())).get(pk=self.b4.pk)
self.assertEqual(b2.sums, 383.69)
b3 = Book.objects.annotate(sums=Sum(F('rating') + F('pages') + F('price'),
output_field=DecimalField())).get(pk=self.b4.pk)
self.assertEqual(b3.sums, Approximate(Decimal("383.69"), places=2))
def test_complex_aggregations_require_kwarg(self):
with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
Author.objects.annotate(Sum(F('age') + F('friends__age')))
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum('age') / Count('age'))
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Author.objects.aggregate(Sum(1))
def test_aggregate_over_complex_annotation(self):
qs = Author.objects.annotate(
combined_ages=Sum(F('age') + F('friends__age')))
age = qs.aggregate(max_combined_age=Max('combined_ages'))
self.assertEqual(age['max_combined_age'], 176)
age = qs.aggregate(max_combined_age_doubled=Max('combined_ages') * 2)
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age=Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age'], 954)
age = qs.aggregate(
max_combined_age_doubled=Max('combined_ages') + Max('combined_ages'),
sum_combined_age_doubled=Sum('combined_ages') + Sum('combined_ages'))
self.assertEqual(age['max_combined_age_doubled'], 176 * 2)
self.assertEqual(age['sum_combined_age_doubled'], 954 * 2)
def test_values_annotation_with_expression(self):
# ensure the F() is promoted to the group by clause
qs = Author.objects.values('name').annotate(another_age=Sum('age') + F('age'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['another_age'], 68)
qs = qs.annotate(friend_count=Count('friends'))
a = qs.get(name="Adrian Holovaty")
self.assertEqual(a['friend_count'], 2)
qs = qs.annotate(combined_age=Sum('age') + F('friends__age')).filter(
name="Adrian Holovaty").order_by('-combined_age')
self.assertEqual(
list(qs), [
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"another_age": 68,
"friend_count": 1,
"combined_age": 63
}
]
)
vals = qs.values('name', 'combined_age')
self.assertEqual(
list(vals), [
{
"name": 'Adrian Holovaty',
"combined_age": 69
},
{
"name": 'Adrian Holovaty',
"combined_age": 63
}
]
)
def test_annotate_values_aggregate(self):
alias_age = Author.objects.annotate(
age_alias=F('age')
).values(
'age_alias',
).aggregate(sum_age=Sum('age_alias'))
age = Author.objects.values('age').aggregate(sum_age=Sum('age'))
self.assertEqual(alias_age['sum_age'], age['sum_age'])
def test_annotate_over_annotate(self):
author = Author.objects.annotate(
age_alias=F('age')
).annotate(
sum_age=Sum('age_alias')
).get(name="Adrian Holovaty")
other_author = Author.objects.annotate(
sum_age=Sum('age')
).get(name="Adrian Holovaty")
self.assertEqual(author.sum_age, other_author.sum_age)
def test_annotated_aggregate_over_annotated_aggregate(self):
with self.assertRaisesMessage(FieldError, "Cannot compute Sum('id__max'): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(Sum('id__max'))
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super(MyMax, self).as_sql(compiler, connection)
with self.assertRaisesMessage(FieldError, "Cannot compute Max('id__max'): 'id__max' is an aggregate"):
Book.objects.annotate(Max('id')).annotate(my_max=MyMax('id__max', 'price'))
def test_multi_arg_aggregate(self):
class MyMax(Max):
def as_sql(self, compiler, connection):
self.set_source_expressions(self.get_source_expressions()[0:1])
return super(MyMax, self).as_sql(compiler, connection)
with self.assertRaisesMessage(TypeError, 'Complex aggregates require an alias'):
Book.objects.aggregate(MyMax('pages', 'price'))
with self.assertRaisesMessage(TypeError, 'Complex annotations require an alias'):
Book.objects.annotate(MyMax('pages', 'price'))
Book.objects.aggregate(max_field=MyMax('pages', 'price'))
def test_add_implementation(self):
class MySum(Sum):
pass
# test completely changing how the output is rendered
def lower_case_function_override(self, compiler, connection):
sql, params = compiler.compile(self.source_expressions[0])
substitutions = dict(function=self.function.lower(), expressions=sql)
substitutions.update(self.extra)
return self.template % substitutions, params
setattr(MySum, 'as_' + connection.vendor, lower_case_function_override)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test changing the dict and delegating
def lower_case_function_super(self, compiler, connection):
self.extra['function'] = self.function.lower()
return super(MySum, self).as_sql(compiler, connection)
setattr(MySum, 'as_' + connection.vendor, lower_case_function_super)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('sum('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 383)
# test overriding all parts of the template
def be_evil(self, compiler, connection):
substitutions = dict(function='MAX', expressions='2')
substitutions.update(self.extra)
return self.template % substitutions, ()
setattr(MySum, 'as_' + connection.vendor, be_evil)
qs = Book.objects.annotate(
sums=MySum(F('rating') + F('pages') + F('price'), output_field=IntegerField())
)
self.assertEqual(str(qs.query).count('MAX('), 1)
b1 = qs.get(pk=self.b4.pk)
self.assertEqual(b1.sums, 2)
def test_complex_values_aggregation(self):
max_rating = Book.objects.values('rating').aggregate(
double_max_rating=Max('rating') + Max('rating'))
self.assertEqual(max_rating['double_max_rating'], 5 * 2)
max_books_per_rating = Book.objects.values('rating').annotate(
books_per_rating=Count('id') + 5
).aggregate(Max('books_per_rating'))
self.assertEqual(
max_books_per_rating,
{'books_per_rating__max': 3 + 5})
def test_expression_on_aggregation(self):
# Create a plain expression
class Greatest(Func):
function = 'GREATEST'
def as_sqlite(self, compiler, connection):
return super(Greatest, self).as_sql(compiler, connection, function='MAX')
qs = Publisher.objects.annotate(
price_or_median=Greatest(Avg('book__rating'), Avg('book__price'))
).filter(price_or_median__gte=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = Publisher.objects.annotate(
rating_or_num_awards=Greatest(Avg('book__rating'), F('num_awards'),
output_field=FloatField())
).filter(rating_or_num_awards__gt=F('num_awards')).order_by('num_awards')
self.assertQuerysetEqual(
qs2, [1, 3], lambda v: v.num_awards)
| bsd-3-clause |
yadavsaroj/avro | lang/py3/avro/tests/av_bench.py | 21 | 3194 | #!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import string
import sys
import time
import avro.datafile
import avro.io
import avro.schema
TYPES = ('A', 'CNAME',)
FILENAME = 'datafile.avr'
def GenerateRandomName():
return ''.join(random.sample(string.ascii_lowercase, 15))
def GenerateRandomIP():
return '%s.%s.%s.%s' % (
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
random.randint(0, 255),
)
def Write(nrecords):
"""Writes a data file with the specified number of random records.
Args:
nrecords: Number of records to write.
"""
schema_s = """
{
"type": "record",
"name": "Query",
"fields" : [
{"name": "query", "type": "string"},
{"name": "response", "type": "string"},
{"name": "type", "type": "string", "default": "A"}
]
}
"""
schema = avro.schema.Parse(schema_s)
writer = avro.io.DatumWriter(schema)
with open(FILENAME, 'wb') as out:
with avro.datafile.DataFileWriter(
out, writer, schema,
# codec='deflate'
) as data_writer:
for _ in range(nrecords):
response = GenerateRandomIP()
query = GenerateRandomName()
type = random.choice(TYPES)
data_writer.append({
'query': query,
'response': response,
'type': type,
})
def Read(expect_nrecords):
"""Reads the data file generated by Write()."""
with open(FILENAME, 'rb') as f:
reader = avro.io.DatumReader()
with avro.datafile.DataFileReader(f, reader) as file_reader:
nrecords = 0
for record in file_reader:
nrecords += 1
assert (nrecords == expect_nrecords), (
'Expecting %d records, got %d.' % (expected_nrecords, nrecords))
def Timing(f, *args):
s = time.time()
f(*args)
e = time.time()
return e - s
def Main(args):
nrecords = int(args[1])
print('Write %0.4f' % Timing(Write, nrecords))
print('Read %0.4f' % Timing(Read, nrecords))
if __name__ == '__main__':
log_formatter = logging.Formatter(
'%(asctime)s %(levelname)s %(filename)s:%(lineno)s : %(message)s')
logging.root.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler()
console_handler.setFormatter(log_formatter)
console_handler.setLevel(logging.DEBUG)
logging.root.addHandler(console_handler)
Main(sys.argv)
| apache-2.0 |
SatoshiNXSimudrone/sl4a-damon-clone | python/src/Lib/test/test_timeout.py | 55 | 6714 | """Unit tests for socket timeout feature."""
import unittest
from test import test_support
# This requires the 'network' resource as given on the regrtest command line.
skip_expected = not test_support.is_resource_enabled('network')
import time
import socket
class CreationTestCase(unittest.TestCase):
"""Test case for socket.gettimeout() and socket.settimeout()"""
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
def tearDown(self):
self.sock.close()
def testObjectCreation(self):
# Test Socket creation
self.assertEqual(self.sock.gettimeout(), None,
"timeout not disabled by default")
def testFloatReturnValue(self):
# Test return value of gettimeout()
self.sock.settimeout(7.345)
self.assertEqual(self.sock.gettimeout(), 7.345)
self.sock.settimeout(3)
self.assertEqual(self.sock.gettimeout(), 3)
self.sock.settimeout(None)
self.assertEqual(self.sock.gettimeout(), None)
def testReturnType(self):
# Test return type of gettimeout()
self.sock.settimeout(1)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
self.sock.settimeout(3.9)
self.assertEqual(type(self.sock.gettimeout()), type(1.0))
def testTypeCheck(self):
# Test type checking by settimeout()
self.sock.settimeout(0)
self.sock.settimeout(0L)
self.sock.settimeout(0.0)
self.sock.settimeout(None)
self.assertRaises(TypeError, self.sock.settimeout, "")
self.assertRaises(TypeError, self.sock.settimeout, u"")
self.assertRaises(TypeError, self.sock.settimeout, ())
self.assertRaises(TypeError, self.sock.settimeout, [])
self.assertRaises(TypeError, self.sock.settimeout, {})
self.assertRaises(TypeError, self.sock.settimeout, 0j)
def testRangeCheck(self):
# Test range checking by settimeout()
self.assertRaises(ValueError, self.sock.settimeout, -1)
self.assertRaises(ValueError, self.sock.settimeout, -1L)
self.assertRaises(ValueError, self.sock.settimeout, -1.0)
def testTimeoutThenBlocking(self):
# Test settimeout() followed by setblocking()
self.sock.settimeout(10)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), None)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.settimeout(10)
self.sock.setblocking(0)
self.assertEqual(self.sock.gettimeout(), 0.0)
self.sock.setblocking(1)
self.assertEqual(self.sock.gettimeout(), None)
def testBlockingThenTimeout(self):
# Test setblocking() followed by settimeout()
self.sock.setblocking(0)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
self.sock.setblocking(1)
self.sock.settimeout(1)
self.assertEqual(self.sock.gettimeout(), 1)
class TimeoutTestCase(unittest.TestCase):
"""Test case for socket.socket() timeout functions"""
# There are a number of tests here trying to make sure that an operation
# doesn't take too much longer than expected. But competing machine
# activity makes it inevitable that such tests will fail at times.
# When fuzz was at 1.0, I (tim) routinely saw bogus failures on Win2K
# and Win98SE. Boosting it to 2.0 helped a lot, but isn't a real
# solution.
fuzz = 2.0
def setUp(self):
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addr_remote = ('www.python.org.', 80)
self.addr_local = ('127.0.0.1', 25339)
def tearDown(self):
self.sock.close()
def testConnectTimeout(self):
# Choose a private address that is unlikely to exist to prevent
# failures due to the connect succeeding before the timeout.
# Use a dotted IP address to avoid including the DNS lookup time
# with the connect time. This avoids failing the assertion that
# the timeout occurred fast enough.
addr = ('10.0.0.0', 12345)
# Test connect() timeout
_timeout = 0.001
self.sock.settimeout(_timeout)
_t1 = time.time()
self.failUnlessRaises(socket.error, self.sock.connect, addr)
_t2 = time.time()
_delta = abs(_t1 - _t2)
self.assert_(_delta < _timeout + self.fuzz,
"timeout (%g) is more than %g seconds more than expected (%g)"
%(_delta, self.fuzz, _timeout))
def testRecvTimeout(self):
# Test recv() timeout
_timeout = 0.02
self.sock.connect(self.addr_remote)
self.sock.settimeout(_timeout)
_t1 = time.time()
self.failUnlessRaises(socket.error, self.sock.recv, 1024)
_t2 = time.time()
_delta = abs(_t1 - _t2)
self.assert_(_delta < _timeout + self.fuzz,
"timeout (%g) is %g seconds more than expected (%g)"
%(_delta, self.fuzz, _timeout))
def testAcceptTimeout(self):
# Test accept() timeout
_timeout = 2
self.sock.settimeout(_timeout)
self.sock.bind(self.addr_local)
self.sock.listen(5)
_t1 = time.time()
self.failUnlessRaises(socket.error, self.sock.accept)
_t2 = time.time()
_delta = abs(_t1 - _t2)
self.assert_(_delta < _timeout + self.fuzz,
"timeout (%g) is %g seconds more than expected (%g)"
%(_delta, self.fuzz, _timeout))
def testRecvfromTimeout(self):
# Test recvfrom() timeout
_timeout = 2
self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.sock.settimeout(_timeout)
self.sock.bind(self.addr_local)
_t1 = time.time()
self.failUnlessRaises(socket.error, self.sock.recvfrom, 8192)
_t2 = time.time()
_delta = abs(_t1 - _t2)
self.assert_(_delta < _timeout + self.fuzz,
"timeout (%g) is %g seconds more than expected (%g)"
%(_delta, self.fuzz, _timeout))
def testSend(self):
# Test send() timeout
# couldn't figure out how to test it
pass
def testSendto(self):
# Test sendto() timeout
# couldn't figure out how to test it
pass
def testSendall(self):
# Test sendall() timeout
# couldn't figure out how to test it
pass
def test_main():
test_support.requires('network')
test_support.run_unittest(CreationTestCase, TimeoutTestCase)
if __name__ == "__main__":
test_main()
| apache-2.0 |
ychen820/microblog | y/google-cloud-sdk/platform/google_appengine/lib/django-1.2/django/contrib/flatpages/views.py | 295 | 2613 | from django.contrib.flatpages.models import FlatPage
from django.template import loader, RequestContext
from django.shortcuts import get_object_or_404
from django.http import HttpResponse, HttpResponseRedirect
from django.conf import settings
from django.core.xheaders import populate_xheaders
from django.utils.safestring import mark_safe
from django.views.decorators.csrf import csrf_protect
DEFAULT_TEMPLATE = 'flatpages/default.html'
# This view is called from FlatpageFallbackMiddleware.process_response
# when a 404 is raised, which often means CsrfViewMiddleware.process_view
# has not been called even if CsrfViewMiddleware is installed. So we need
# to use @csrf_protect, in case the template needs {% csrf_token %}.
# However, we can't just wrap this view; if no matching flatpage exists,
# or a redirect is required for authentication, the 404 needs to be returned
# without any CSRF checks. Therefore, we only
# CSRF protect the internal implementation.
def flatpage(request, url):
"""
Public interface to the flat page view.
Models: `flatpages.flatpages`
Templates: Uses the template defined by the ``template_name`` field,
or `flatpages/default.html` if template_name is not defined.
Context:
flatpage
`flatpages.flatpages` object
"""
if not url.endswith('/') and settings.APPEND_SLASH:
return HttpResponseRedirect("%s/" % request.path)
if not url.startswith('/'):
url = "/" + url
f = get_object_or_404(FlatPage, url__exact=url, sites__id__exact=settings.SITE_ID)
return render_flatpage(request, f)
@csrf_protect
def render_flatpage(request, f):
"""
Internal interface to the flat page view.
"""
# If registration is required for accessing this page, and the user isn't
# logged in, redirect to the login page.
if f.registration_required and not request.user.is_authenticated():
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(request.path)
if f.template_name:
t = loader.select_template((f.template_name, DEFAULT_TEMPLATE))
else:
t = loader.get_template(DEFAULT_TEMPLATE)
# To avoid having to always use the "|safe" filter in flatpage templates,
# mark the title and content as already safe (since they are raw HTML
# content in the first place).
f.title = mark_safe(f.title)
f.content = mark_safe(f.content)
c = RequestContext(request, {
'flatpage': f,
})
response = HttpResponse(t.render(c))
populate_xheaders(request, response, FlatPage, f.id)
return response
| bsd-3-clause |
trast/git | contrib/hg-to-git/hg-to-git.py | 47 | 7867 | #!/usr/bin/env python
""" hg-to-git.py - A Mercurial to GIT converter
Copyright (C)2007 Stelian Pop <stelian@popies.net>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
"""
import os, os.path, sys
import tempfile, pickle, getopt
import re
# Maps hg version -> git version
hgvers = {}
# List of children for each hg revision
hgchildren = {}
# List of parents for each hg revision
hgparents = {}
# Current branch for each hg revision
hgbranch = {}
# Number of new changesets converted from hg
hgnewcsets = 0
#------------------------------------------------------------------------------
def usage():
print """\
%s: [OPTIONS] <hgprj>
options:
-s, --gitstate=FILE: name of the state to be saved/read
for incrementals
-n, --nrepack=INT: number of changesets that will trigger
a repack (default=0, -1 to deactivate)
-v, --verbose: be verbose
required:
hgprj: name of the HG project to import (directory)
""" % sys.argv[0]
#------------------------------------------------------------------------------
def getgitenv(user, date):
env = ''
elems = re.compile('(.*?)\s+<(.*)>').match(user)
if elems:
env += 'export GIT_AUTHOR_NAME="%s" ;' % elems.group(1)
env += 'export GIT_COMMITTER_NAME="%s" ;' % elems.group(1)
env += 'export GIT_AUTHOR_EMAIL="%s" ;' % elems.group(2)
env += 'export GIT_COMMITTER_EMAIL="%s" ;' % elems.group(2)
else:
env += 'export GIT_AUTHOR_NAME="%s" ;' % user
env += 'export GIT_COMMITTER_NAME="%s" ;' % user
env += 'export GIT_AUTHOR_EMAIL= ;'
env += 'export GIT_COMMITTER_EMAIL= ;'
env += 'export GIT_AUTHOR_DATE="%s" ;' % date
env += 'export GIT_COMMITTER_DATE="%s" ;' % date
return env
#------------------------------------------------------------------------------
state = ''
opt_nrepack = 0
verbose = False
try:
opts, args = getopt.getopt(sys.argv[1:], 's:t:n:v', ['gitstate=', 'tempdir=', 'nrepack=', 'verbose'])
for o, a in opts:
if o in ('-s', '--gitstate'):
state = a
state = os.path.abspath(state)
if o in ('-n', '--nrepack'):
opt_nrepack = int(a)
if o in ('-v', '--verbose'):
verbose = True
if len(args) != 1:
raise Exception('params')
except:
usage()
sys.exit(1)
hgprj = args[0]
os.chdir(hgprj)
if state:
if os.path.exists(state):
if verbose:
print 'State does exist, reading'
f = open(state, 'r')
hgvers = pickle.load(f)
else:
print 'State does not exist, first run'
sock = os.popen('hg tip --template "{rev}"')
tip = sock.read()
if sock.close():
sys.exit(1)
if verbose:
print 'tip is', tip
# Calculate the branches
if verbose:
print 'analysing the branches...'
hgchildren["0"] = ()
hgparents["0"] = (None, None)
hgbranch["0"] = "master"
for cset in range(1, int(tip) + 1):
hgchildren[str(cset)] = ()
prnts = os.popen('hg log -r %d --template "{parents}"' % cset).read().strip().split(' ')
prnts = map(lambda x: x[:x.find(':')], prnts)
if prnts[0] != '':
parent = prnts[0].strip()
else:
parent = str(cset - 1)
hgchildren[parent] += ( str(cset), )
if len(prnts) > 1:
mparent = prnts[1].strip()
hgchildren[mparent] += ( str(cset), )
else:
mparent = None
hgparents[str(cset)] = (parent, mparent)
if mparent:
# For merge changesets, take either one, preferably the 'master' branch
if hgbranch[mparent] == 'master':
hgbranch[str(cset)] = 'master'
else:
hgbranch[str(cset)] = hgbranch[parent]
else:
# Normal changesets
# For first children, take the parent branch, for the others create a new branch
if hgchildren[parent][0] == str(cset):
hgbranch[str(cset)] = hgbranch[parent]
else:
hgbranch[str(cset)] = "branch-" + str(cset)
if not hgvers.has_key("0"):
print 'creating repository'
os.system('git init')
# loop through every hg changeset
for cset in range(int(tip) + 1):
# incremental, already seen
if hgvers.has_key(str(cset)):
continue
hgnewcsets += 1
# get info
log_data = os.popen('hg log -r %d --template "{tags}\n{date|date}\n{author}\n"' % cset).readlines()
tag = log_data[0].strip()
date = log_data[1].strip()
user = log_data[2].strip()
parent = hgparents[str(cset)][0]
mparent = hgparents[str(cset)][1]
#get comment
(fdcomment, filecomment) = tempfile.mkstemp()
csetcomment = os.popen('hg log -r %d --template "{desc}"' % cset).read().strip()
os.write(fdcomment, csetcomment)
os.close(fdcomment)
print '-----------------------------------------'
print 'cset:', cset
print 'branch:', hgbranch[str(cset)]
print 'user:', user
print 'date:', date
print 'comment:', csetcomment
if parent:
print 'parent:', parent
if mparent:
print 'mparent:', mparent
if tag:
print 'tag:', tag
print '-----------------------------------------'
# checkout the parent if necessary
if cset != 0:
if hgbranch[str(cset)] == "branch-" + str(cset):
print 'creating new branch', hgbranch[str(cset)]
os.system('git checkout -b %s %s' % (hgbranch[str(cset)], hgvers[parent]))
else:
print 'checking out branch', hgbranch[str(cset)]
os.system('git checkout %s' % hgbranch[str(cset)])
# merge
if mparent:
if hgbranch[parent] == hgbranch[str(cset)]:
otherbranch = hgbranch[mparent]
else:
otherbranch = hgbranch[parent]
print 'merging', otherbranch, 'into', hgbranch[str(cset)]
os.system(getgitenv(user, date) + 'git merge --no-commit -s ours "" %s %s' % (hgbranch[str(cset)], otherbranch))
# remove everything except .git and .hg directories
os.system('find . \( -path "./.hg" -o -path "./.git" \) -prune -o ! -name "." -print | xargs rm -rf')
# repopulate with checkouted files
os.system('hg update -C %d' % cset)
# add new files
os.system('git ls-files -x .hg --others | git update-index --add --stdin')
# delete removed files
os.system('git ls-files -x .hg --deleted | git update-index --remove --stdin')
# commit
os.system(getgitenv(user, date) + 'git commit --allow-empty -a -F %s' % filecomment)
os.unlink(filecomment)
# tag
if tag and tag != 'tip':
os.system(getgitenv(user, date) + 'git tag %s' % tag)
# delete branch if not used anymore...
if mparent and len(hgchildren[str(cset)]):
print "Deleting unused branch:", otherbranch
os.system('git branch -d %s' % otherbranch)
# retrieve and record the version
vvv = os.popen('git show --quiet --pretty=format:%H').read()
print 'record', cset, '->', vvv
hgvers[str(cset)] = vvv
if hgnewcsets >= opt_nrepack and opt_nrepack != -1:
os.system('git repack -a -d')
# write the state for incrementals
if state:
if verbose:
print 'Writing state'
f = open(state, 'w')
pickle.dump(hgvers, f)
# vim: et ts=8 sw=4 sts=4
| gpl-2.0 |
tragiclifestories/django | tests/known_related_objects/tests.py | 363 | 6425 | from __future__ import unicode_literals
from django.test import TestCase
from .models import Organiser, Pool, PoolStyle, Tournament
class ExistingRelatedInstancesTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.t1 = Tournament.objects.create(name='Tourney 1')
cls.t2 = Tournament.objects.create(name='Tourney 2')
cls.o1 = Organiser.objects.create(name='Organiser 1')
cls.p1 = Pool.objects.create(name='T1 Pool 1', tournament=cls.t1, organiser=cls.o1)
cls.p2 = Pool.objects.create(name='T1 Pool 2', tournament=cls.t1, organiser=cls.o1)
cls.p3 = Pool.objects.create(name='T2 Pool 1', tournament=cls.t2, organiser=cls.o1)
cls.p4 = Pool.objects.create(name='T2 Pool 2', tournament=cls.t2, organiser=cls.o1)
cls.ps1 = PoolStyle.objects.create(name='T1 Pool 2 Style', pool=cls.p2)
cls.ps2 = PoolStyle.objects.create(name='T2 Pool 1 Style', pool=cls.p3)
def test_foreign_key(self):
with self.assertNumQueries(2):
tournament = Tournament.objects.get(pk=self.t1.pk)
pool = tournament.pool_set.all()[0]
self.assertIs(tournament, pool.tournament)
def test_foreign_key_prefetch_related(self):
with self.assertNumQueries(2):
tournament = (Tournament.objects.prefetch_related('pool_set').get(pk=self.t1.pk))
pool = tournament.pool_set.all()[0]
self.assertIs(tournament, pool.tournament)
def test_foreign_key_multiple_prefetch(self):
with self.assertNumQueries(2):
tournaments = list(Tournament.objects.prefetch_related('pool_set').order_by('pk'))
pool1 = tournaments[0].pool_set.all()[0]
self.assertIs(tournaments[0], pool1.tournament)
pool2 = tournaments[1].pool_set.all()[0]
self.assertIs(tournaments[1], pool2.tournament)
def test_queryset_or(self):
tournament_1 = self.t1
tournament_2 = self.t2
with self.assertNumQueries(1):
pools = tournament_1.pool_set.all() | tournament_2.pool_set.all()
related_objects = set(pool.tournament for pool in pools)
self.assertEqual(related_objects, {tournament_1, tournament_2})
def test_queryset_or_different_cached_items(self):
tournament = self.t1
organiser = self.o1
with self.assertNumQueries(1):
pools = tournament.pool_set.all() | organiser.pool_set.all()
first = pools.filter(pk=self.p1.pk)[0]
self.assertIs(first.tournament, tournament)
self.assertIs(first.organiser, organiser)
def test_queryset_or_only_one_with_precache(self):
tournament_1 = self.t1
tournament_2 = self.t2
# 2 queries here as pool 3 has tournament 2, which is not cached
with self.assertNumQueries(2):
pools = tournament_1.pool_set.all() | Pool.objects.filter(pk=self.p3.pk)
related_objects = set(pool.tournament for pool in pools)
self.assertEqual(related_objects, {tournament_1, tournament_2})
# and the other direction
with self.assertNumQueries(2):
pools = Pool.objects.filter(pk=self.p3.pk) | tournament_1.pool_set.all()
related_objects = set(pool.tournament for pool in pools)
self.assertEqual(related_objects, {tournament_1, tournament_2})
def test_queryset_and(self):
tournament = self.t1
organiser = self.o1
with self.assertNumQueries(1):
pools = tournament.pool_set.all() & organiser.pool_set.all()
first = pools.filter(pk=self.p1.pk)[0]
self.assertIs(first.tournament, tournament)
self.assertIs(first.organiser, organiser)
def test_one_to_one(self):
with self.assertNumQueries(2):
style = PoolStyle.objects.get(pk=self.ps1.pk)
pool = style.pool
self.assertIs(style, pool.poolstyle)
def test_one_to_one_select_related(self):
with self.assertNumQueries(1):
style = PoolStyle.objects.select_related('pool').get(pk=self.ps1.pk)
pool = style.pool
self.assertIs(style, pool.poolstyle)
def test_one_to_one_multi_select_related(self):
with self.assertNumQueries(1):
poolstyles = list(PoolStyle.objects.select_related('pool').order_by('pk'))
self.assertIs(poolstyles[0], poolstyles[0].pool.poolstyle)
self.assertIs(poolstyles[1], poolstyles[1].pool.poolstyle)
def test_one_to_one_prefetch_related(self):
with self.assertNumQueries(2):
style = PoolStyle.objects.prefetch_related('pool').get(pk=self.ps1.pk)
pool = style.pool
self.assertIs(style, pool.poolstyle)
def test_one_to_one_multi_prefetch_related(self):
with self.assertNumQueries(2):
poolstyles = list(PoolStyle.objects.prefetch_related('pool').order_by('pk'))
self.assertIs(poolstyles[0], poolstyles[0].pool.poolstyle)
self.assertIs(poolstyles[1], poolstyles[1].pool.poolstyle)
def test_reverse_one_to_one(self):
with self.assertNumQueries(2):
pool = Pool.objects.get(pk=self.p2.pk)
style = pool.poolstyle
self.assertIs(pool, style.pool)
def test_reverse_one_to_one_select_related(self):
with self.assertNumQueries(1):
pool = Pool.objects.select_related('poolstyle').get(pk=self.p2.pk)
style = pool.poolstyle
self.assertIs(pool, style.pool)
def test_reverse_one_to_one_prefetch_related(self):
with self.assertNumQueries(2):
pool = Pool.objects.prefetch_related('poolstyle').get(pk=self.p2.pk)
style = pool.poolstyle
self.assertIs(pool, style.pool)
def test_reverse_one_to_one_multi_select_related(self):
with self.assertNumQueries(1):
pools = list(Pool.objects.select_related('poolstyle').order_by('pk'))
self.assertIs(pools[1], pools[1].poolstyle.pool)
self.assertIs(pools[2], pools[2].poolstyle.pool)
def test_reverse_one_to_one_multi_prefetch_related(self):
with self.assertNumQueries(2):
pools = list(Pool.objects.prefetch_related('poolstyle').order_by('pk'))
self.assertIs(pools[1], pools[1].poolstyle.pool)
self.assertIs(pools[2], pools[2].poolstyle.pool)
| bsd-3-clause |
johnkit/vtk-dev | Filters/Core/Testing/Python/TestContourCases.py | 11 | 4312 | # This test requires Numpy.
import sys
import vtk
from vtk.test import Testing
try:
import numpy as np
except ImportError:
print "WARNING: This test requires Numeric Python: http://numpy.sf.net"
sys.exit(0)
def GenerateCell(cellType, points):
cell = vtk.vtkUnstructuredGrid()
pts = vtk.vtkPoints()
for p in points:
pts.InsertNextPoint(p)
cell.SetPoints(pts)
cell.Allocate(1,1)
ids = vtk.vtkIdList()
for i in range(len(points)):
ids.InsertId(i,i)
cell.InsertNextCell(cellType, ids)
return cell
def Combination(sz, n):
c = np.zeros(sz)
i = 0
while n>0:
c[i] = n%2
n=n>>1
i = i + 1
return c
class CellTestBase:
def test_contours(self):
cell = vtk.vtkUnstructuredGrid()
cell.ShallowCopy(self.Cell)
np = self.Cell.GetNumberOfPoints()
ncomb = pow(2, np)
scalar = vtk.vtkDoubleArray()
scalar.SetName("scalar")
scalar.SetNumberOfTuples(np)
cell.GetPointData().SetScalars(scalar)
incorrectCases = []
for i in range(1,ncomb-1):
c = Combination(np, i)
for p in range(np):
scalar.SetTuple1(p, c[p])
gradientFilter = vtk.vtkGradientFilter()
gradientFilter.SetInputData(cell)
gradientFilter.SetInputArrayToProcess(0,0,0,0,'scalar')
gradientFilter.SetResultArrayName('grad')
gradientFilter.Update()
contourFilter = vtk.vtkContourFilter()
contourFilter.SetInputConnection(gradientFilter.GetOutputPort())
contourFilter.SetNumberOfContours(1)
contourFilter.SetValue(0, 0.5)
contourFilter.Update()
normalsFilter = vtk.vtkPolyDataNormals()
normalsFilter.SetInputConnection(contourFilter.GetOutputPort())
normalsFilter.SetConsistency(0)
normalsFilter.SetFlipNormals(0)
normalsFilter.SetSplitting(0)
calcFilter = vtk.vtkArrayCalculator()
calcFilter.SetInputConnection(normalsFilter.GetOutputPort())
calcFilter.SetAttributeModeToUsePointData()
calcFilter.AddVectorArrayName('grad')
calcFilter.AddVectorArrayName('Normals')
calcFilter.SetResultArrayName('dir')
calcFilter.SetFunction('grad.Normals')
calcFilter.Update()
out = vtk.vtkUnstructuredGrid()
out.ShallowCopy(calcFilter.GetOutput())
numPts = out.GetNumberOfPoints()
if numPts > 0:
dirArray = out.GetPointData().GetArray('dir')
for p in range(numPts):
if(dirArray.GetTuple1(p) > 0.0): # all normals are reversed
incorrectCases.append(i)
break
self.assertEquals(','.join([str(i) for i in incorrectCases]), '')
class TestTetra(Testing.vtkTest, CellTestBase):
def setUp(self):
self.Cell = GenerateCell(vtk.VTK_TETRA,
[ ( 1.0, -1.0, -1.0),
( 1.0, 1.0, 1.0),
(-1.0, 1.0, -1.0),
(-1.0, -1.0, 1.0) ])
class TestHexahedron(Testing.vtkTest, CellTestBase):
def setUp(self):
self.Cell = GenerateCell(vtk.VTK_HEXAHEDRON,
[ (-1.0, -1.0, -1.0),
( 1.0, -1.0, -1.0),
( 1.0, 1.0, -1.0),
(-1.0, 1.0, -1.0),
(-1.0, -1.0, 1.0),
( 1.0, -1.0, 1.0),
( 1.0, 1.0, 1.0),
(-1.0, 1.0, 1.0) ])
class TestWedge(Testing.vtkTest, CellTestBase):
def setUp(self):
self.Cell = GenerateCell(vtk.VTK_WEDGE,
[ (-1.0, -1.0, -1.0),
( 1.0, -1.0, -1.0),
( 0.0, -1.0, 1.0),
(-1.0, 1.0, -1.0),
( 1.0, 1.0, -1.0),
( 0.0, 1.0, 0.0) ])
class TestPyramid(Testing.vtkTest, CellTestBase):
def setUp(self):
self.Cell = GenerateCell(vtk.VTK_PYRAMID,
[ (-1.0, -1.0, -1.0),
( 1.0, -1.0, -1.0),
( 1.0, 1.0, -1.0),
(-1.0, 1.0, -1.0),
( 0.0, 0.0, 1.0) ])
if __name__ == '__main__':
Testing.main([(TestPyramid,'test'),(TestWedge,'test'),(TestTetra, 'test'),(TestHexahedron,'test')])
| bsd-3-clause |
BMJHayward/django | django/core/management/commands/shell.py | 492 | 3951 | import os
from django.core.management.base import BaseCommand
class Command(BaseCommand):
help = "Runs a Python interactive interpreter. Tries to use IPython or bpython, if one of them is available."
requires_system_checks = False
shells = ['ipython', 'bpython']
def add_arguments(self, parser):
parser.add_argument('--plain', action='store_true', dest='plain',
help='Tells Django to use plain Python, not IPython or bpython.')
parser.add_argument('--no-startup', action='store_true', dest='no_startup',
help='When using plain Python, ignore the PYTHONSTARTUP environment variable and ~/.pythonrc.py script.')
parser.add_argument('-i', '--interface', choices=self.shells, dest='interface',
help='Specify an interactive interpreter interface. Available options: "ipython" and "bpython"')
def _ipython_pre_011(self):
"""Start IPython pre-0.11"""
from IPython.Shell import IPShell
shell = IPShell(argv=[])
shell.mainloop()
def _ipython_pre_100(self):
"""Start IPython pre-1.0.0"""
from IPython.frontend.terminal.ipapp import TerminalIPythonApp
app = TerminalIPythonApp.instance()
app.initialize(argv=[])
app.start()
def _ipython(self):
"""Start IPython >= 1.0"""
from IPython import start_ipython
start_ipython(argv=[])
def ipython(self):
"""Start any version of IPython"""
for ip in (self._ipython, self._ipython_pre_100, self._ipython_pre_011):
try:
ip()
except ImportError:
pass
else:
return
# no IPython, raise ImportError
raise ImportError("No IPython")
def bpython(self):
import bpython
bpython.embed()
def run_shell(self, shell=None):
available_shells = [shell] if shell else self.shells
for shell in available_shells:
try:
return getattr(self, shell)()
except ImportError:
pass
raise ImportError
def handle(self, **options):
try:
if options['plain']:
# Don't bother loading IPython, because the user wants plain Python.
raise ImportError
self.run_shell(shell=options['interface'])
except ImportError:
import code
# Set up a dictionary to serve as the environment for the shell, so
# that tab completion works on objects that are imported at runtime.
# See ticket 5082.
imported_objects = {}
try: # Try activating rlcompleter, because it's handy.
import readline
except ImportError:
pass
else:
# We don't have to wrap the following import in a 'try', because
# we already know 'readline' was imported successfully.
import rlcompleter
readline.set_completer(rlcompleter.Completer(imported_objects).complete)
readline.parse_and_bind("tab:complete")
# We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system
# conventions and get $PYTHONSTARTUP first then .pythonrc.py.
if not options['no_startup']:
for pythonrc in (os.environ.get("PYTHONSTARTUP"), '~/.pythonrc.py'):
if not pythonrc:
continue
pythonrc = os.path.expanduser(pythonrc)
if not os.path.isfile(pythonrc):
continue
try:
with open(pythonrc) as handle:
exec(compile(handle.read(), pythonrc, 'exec'), imported_objects)
except NameError:
pass
code.interact(local=imported_objects)
| bsd-3-clause |
KosiehBarter/anaconda | widgets/python/AnacondaWidgets.py | 19 | 3138 | #
# Copyright (C) 2011-2013 Red Hat, Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Author: Chris Lumens <clumens@redhat.com>
#
"""
These classes and methods wrap the bindings automatically created by
gobject-introspection. They allow for creating more pythonic bindings
where necessary. For instance instead of creating a class and then
setting a bunch of properties, these classes allow passing the properties
at creation time.
"""
from gi.importer import modules
from gi.overrides import override
Anaconda = modules['AnacondaWidgets']._introspection_module
__all__ = []
class MountpointSelector(Anaconda.MountpointSelector):
def __init__(self, name=None, size=None, mountpoint=None):
Anaconda.MountpointSelector.__init__(self)
if name:
self.set_property("name", name)
if size:
self.set_property("size", size)
if mountpoint:
self.set_property("mountpoint", mountpoint)
# our extensions not related to the look of the widget
self._device = None
self._root = None
@property
def size(self):
return self.get_property("size")
@size.setter
def size(self, size):
self.set_property("size", size)
@property
def device(self):
return self._device
@device.setter
def device(self, device):
self._device = device
@property
def root(self):
return self._root
@root.setter
def root(self, dev):
self._root = dev
MountpointSelector = override(MountpointSelector)
__all__.append('MountpointSelector')
class SpokeSelector(Anaconda.SpokeSelector):
def __init__(self, title=None, icon=None, status=None):
Anaconda.SpokeSelector.__init__(self)
if title:
self.set_property("title", title)
if icon:
self.set_property("icon", icon)
if status:
self.set_property("status", status)
SpokeSelector = override(SpokeSelector)
__all__.append('SpokeSelector')
class DiskOverview(Anaconda.DiskOverview):
def __init__(self, description, kind, capacity, free, name, popup=None):
Anaconda.DiskOverview.__init__(self)
self.set_property("description", description)
self.set_property("kind", kind)
self.set_property("free", free)
self.set_property("capacity", capacity)
self.set_property("name", name)
if popup:
self.set_property("popup-info", popup)
DiskOverview = override(DiskOverview)
__all__.append('DiskOverview')
| gpl-2.0 |
kevinlondon/flexx | flexx/app/tests/test_serializer.py | 21 | 1468 |
from pytest import raises
from flexx.util.testing import run_tests_if_main
from flexx.pyscript import py2js, evaljs
from flexx.app.serialize import Serializer, serializer
class Foo:
def __init__(self, val):
self.val = val
def __json__(self):
return {'__type__': 'Foo', 'val': self.val}
def __from_json__(obj):
return Foo(obj['val'])
def __eq__(self, other):
return self.val == other.val
class Bar:
pass
foo1, foo2, foo3 = Foo(42), Foo(7), Foo(None)
s1 = {'a': foo1, 'b': [foo2, foo3]}
def test_python():
serializer.add_reviver('Foo', Foo.__from_json__)
text = serializer.saves(s1)
s2 = serializer.loads(text)
res = s2['a'].val + s2['b'][0].val
assert res == 49
assert s1 == s2
def test_python_wrong():
b = Bar()
raises(TypeError, serializer.saves, b)
def test_js():
code = py2js(Serializer)
code += py2js(Foo)
code += 'var serializer = new Serializer();\n'
code += 'var foo1 = new Foo(42), foo2 = new Foo(7), foo3 = new Foo(null);\n'
code += 'var s1 = {"a": foo1, "b": [foo2, foo3]};\n'
code += 'var text = serializer.saves(s1);\n'
code += 'var s2 = serializer.loads(text);\n'
code += 'text + "|" + (s2.a.val + s2.b[0].val);\n'
result = evaljs(code)
text, res = result.split('|')
s3 = serializer.loads(text)
assert s1 == s3
assert res == '49'
run_tests_if_main()
| bsd-2-clause |
Abjad/abjad | abjad/attach.py | 1 | 40809 | import copy
import importlib
import typing
from . import _inspect, exceptions
from . import tag as _tag
from .duration import Multiplier, Offset
from .score import AfterGraceContainer, BeforeGraceContainer, Component, Container, Leaf
from .storage import FormatSpecification, StorageFormatManager, storage
class Wrapper:
r"""
Wrapper.
.. container:: example
>>> component = abjad.Note("c'4")
>>> articulation = abjad.Articulation('accent', direction=abjad.Up)
>>> abjad.attach(articulation, component)
>>> wrapper = abjad.get.wrapper(component)
>>> string = abjad.storage(wrapper)
>>> print(string)
abjad.Wrapper(
indicator=abjad.Articulation('accent', Up),
tag=abjad.Tag(),
)
.. container:: example
Duplicate indicator warnings take two forms.
>>> voice_1 = abjad.Voice("c''4 d'' e'' f''", name='VoiceI')
>>> voice_2 = abjad.Voice("c'4 d' e' f'", name='VoiceII')
>>> abjad.attach(abjad.Clef('alto'), voice_2[0])
>>> staff = abjad.Staff([voice_1, voice_2], simultaneous=True)
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
<<
\context Voice = "VoiceI"
{
c''4
d''4
e''4
f''4
}
\context Voice = "VoiceII"
{
\clef "alto"
c'4
d'4
e'4
f'4
}
>>
First form when attempting to attach a contexted indicator to a leaf
that already carries a contexted indicator of the same type:
>>> abjad.attach(abjad.Clef('treble'), voice_2[0])
Traceback (most recent call last):
...
abjad...PersistentIndicatorError:
<BLANKLINE>
Can not attach ...
<BLANKLINE>
abjad.Wrapper(
context='Staff',
indicator=abjad.Clef('treble'),
tag=abjad.Tag(),
)
<BLANKLINE>
... to Note("c'4") in VoiceII because ...
<BLANKLINE>
abjad.Wrapper(
context='Staff',
indicator=abjad.Clef('alto'),
tag=abjad.Tag(),
)
<BLANKLINE>
... is already attached to the same leaf.
<BLANKLINE>
Second form when attempting to attach a contexted indicator to a leaf
governed by some other component carrying a contexted indicator of the
same type.
>>> abjad.attach(abjad.Clef('treble'), voice_1[0])
Traceback (most recent call last):
...
abjad...PersistentIndicatorError:
<BLANKLINE>
Can not attach ...
<BLANKLINE>
abjad.Wrapper(
context='Staff',
indicator=abjad.Clef('treble'),
tag=abjad.Tag(),
)
<BLANKLINE>
... to Note("c''4") in VoiceI because ...
<BLANKLINE>
abjad.Wrapper(
context='Staff',
indicator=abjad.Clef('alto'),
tag=abjad.Tag(),
)
<BLANKLINE>
... is already attached to Note("c'4") in VoiceII.
<BLANKLINE>
"""
### CLASS VARIABLES ###
__documentation_section__ = "Internals"
__slots__ = (
"_annotation",
"_component",
"_context",
"_deactivate",
"_effective_context",
"_indicator",
"_synthetic_offset",
"_tag",
)
### INITIALIZER ###
def __init__(
self,
annotation: str = None,
component=None,
context: str = None,
deactivate: bool = None,
indicator: typing.Any = None,
synthetic_offset: int = None,
tag: typing.Union[str, _tag.Tag] = None,
) -> None:
assert not isinstance(indicator, type(self)), repr(indicator)
if annotation is not None:
assert isinstance(annotation, str), repr(annotation)
self._annotation = annotation
if component is not None:
assert isinstance(component, Component), repr(component)
self._component = component
if deactivate is not None:
deactivate = bool(deactivate)
if context is not None:
assert isinstance(context, str), repr(context)
self._context = context
if deactivate is not None:
deactivate = bool(deactivate)
self._deactivate = deactivate
self._effective_context = None
self._indicator = indicator
if synthetic_offset is not None:
synthetic_offset = Offset(synthetic_offset)
self._synthetic_offset = synthetic_offset
if tag is not None:
assert isinstance(tag, (str, _tag.Tag))
tag = _tag.Tag(tag)
self._tag: _tag.Tag = tag
if component is not None:
self._bind_component(component)
### SPECIAL METHODS ###
def __copy__(self, *arguments) -> "Wrapper":
r"""
Copies wrapper.
.. container:: example
Preserves annotation flag:
>>> old_staff = abjad.Staff("c'4 d'4 e'4 f'4")
>>> abjad.annotate(old_staff[0], 'bow_direction', abjad.Down)
>>> string = abjad.lilypond(old_staff)
>>> print(string)
\new Staff {
c'4
d'4
e'4
f'4
}
>>> leaf = old_staff[0]
>>> abjad.get.annotation(leaf, 'bow_direction')
Down
>>> new_staff = abjad.mutate.copy(old_staff)
>>> string = abjad.lilypond(new_staff)
>>> print(string)
\new Staff {
c'4
d'4
e'4
f'4
}
>>> leaf = new_staff[0]
>>> abjad.get.annotation(leaf, "bow_direction")
Down
.. container:: example
Preserves tag:
>>> old_staff = abjad.Staff("c'4 d'4 e'4 f'4")
>>> clef = abjad.Clef("alto")
>>> abjad.attach(clef, old_staff[0], tag=abjad.Tag("RED:M1"))
>>> string = abjad.lilypond(old_staff, tags=True)
>>> print(string)
\new Staff
{
%! RED
%! M1
\clef "alto"
c'4
d'4
e'4
f'4
}
>>> leaf = old_staff[0]
>>> wrapper = abjad.get.wrapper(leaf)
>>> string = abjad.storage(wrapper)
>>> print(string)
abjad.Wrapper(
context='Staff',
indicator=abjad.Clef('alto'),
tag=abjad.Tag('RED:M1'),
)
>>> new_staff = abjad.mutate.copy(old_staff)
>>> string = abjad.lilypond(new_staff, tags=True)
>>> print(string)
\new Staff
{
%! RED
%! M1
\clef "alto"
c'4
d'4
e'4
f'4
}
>>> leaf = new_staff[0]
>>> wrapper = abjad.get.wrapper(leaf)
>>> string = abjad.storage(wrapper)
>>> print(string)
abjad.Wrapper(
context='Staff',
indicator=abjad.Clef('alto'),
tag=abjad.Tag('RED:M1'),
)
.. container:: example
Preserves deactivate flag:
>>> old_staff = abjad.Staff("c'4 d'4 e'4 f'4")
>>> abjad.attach(
... abjad.Clef('alto'),
... old_staff[0],
... deactivate=True,
... tag=abjad.Tag("RED:M1"),
... )
>>> string = abjad.lilypond(old_staff, tags=True)
>>> print(string)
\new Staff
{
%! RED
%! M1
%@% \clef "alto"
c'4
d'4
e'4
f'4
}
>>> leaf = old_staff[0]
>>> wrapper = abjad.get.wrapper(leaf)
>>> string = abjad.storage(wrapper)
>>> print(string)
abjad.Wrapper(
context='Staff',
deactivate=True,
indicator=abjad.Clef('alto'),
tag=abjad.Tag('RED:M1'),
)
>>> new_staff = abjad.mutate.copy(old_staff)
>>> string = abjad.lilypond(new_staff, tags=True)
>>> print(string)
\new Staff
{
%! RED
%! M1
%@% \clef "alto"
c'4
d'4
e'4
f'4
}
>>> leaf = new_staff[0]
>>> wrapper = abjad.get.wrapper(leaf)
>>> string = abjad.storage(wrapper)
>>> print(string)
abjad.Wrapper(
context='Staff',
deactivate=True,
indicator=abjad.Clef('alto'),
tag=abjad.Tag('RED:M1'),
)
Copies all properties except component.
Copy operations must supply component after wrapper copy.
"""
new = type(self)(
annotation=self.annotation,
component=None,
context=self.context,
deactivate=self.deactivate,
indicator=copy.copy(self.indicator),
synthetic_offset=self.synthetic_offset,
tag=self.tag,
)
return new
def __eq__(self, argument) -> bool:
"""
Is true when all initialization values of Abjad value object equal
the initialization values of ``argument``.
"""
return StorageFormatManager.compare_objects(self, argument)
def __hash__(self) -> int:
"""
Hashes Abjad value object.
"""
hash_values = StorageFormatManager(self).get_hash_values()
try:
result = hash(hash_values)
except TypeError:
raise TypeError(f"unhashable type: {self}")
return result
def __repr__(self) -> str:
"""
Gets interpreter representation.
"""
return StorageFormatManager(self).get_repr_format()
### PRIVATE METHODS ###
def _bind_component(self, component):
if getattr(self.indicator, "context", None) is not None:
self._warn_duplicate_indicator(component)
self._unbind_component()
self._component = component
self._update_effective_context()
if getattr(self.indicator, "_mutates_offsets_in_seconds", False):
self._component._update_later(offsets_in_seconds=True)
component._wrappers.append(self)
def _bind_effective_context(self, correct_effective_context):
self._unbind_effective_context()
if correct_effective_context is not None:
correct_effective_context._dependent_wrappers.append(self)
self._effective_context = correct_effective_context
self._update_effective_context()
if getattr(self.indicator, "_mutates_offsets_in_seconds", False):
correct_effective_context._update_later(offsets_in_seconds=True)
def _detach(self):
self._unbind_component()
self._unbind_effective_context()
return self
def _find_correct_effective_context(self):
abjad = importlib.import_module("abjad")
if self.context is None:
return None
context = getattr(abjad, self.context, self.context)
candidate = None
parentage = self.component._get_parentage()
if isinstance(context, type):
for component in parentage:
if not hasattr(component, "_lilypond_type"):
continue
if isinstance(component, context):
candidate = component
break
elif isinstance(context, str):
for component in parentage:
if not hasattr(component, "_lilypond_type"):
continue
if component.name == context or component.lilypond_type == context:
candidate = component
break
else:
raise TypeError("must be context or string: {context!r}.")
if candidate.__class__.__name__ == "Voice":
for component in reversed(parentage):
if not component.__class__.__name__ == "Voice":
continue
if component.name == candidate.name:
candidate = component
break
return candidate
def _get_effective_context(self):
if self.component is not None:
self.component._update_now(indicators=True)
return self._effective_context
def _get_format_pieces(self):
result = []
if self.annotation:
return result
if hasattr(self.indicator, "_get_lilypond_format_bundle"):
bundle = self.indicator._get_lilypond_format_bundle(self.component)
bundle.tag_format_contributions(self.tag, deactivate=self.deactivate)
return bundle
try:
context = self._get_effective_context()
lilypond_format = self.indicator._get_lilypond_format(context=context)
except TypeError:
lilypond_format = self.indicator._get_lilypond_format()
if isinstance(lilypond_format, str):
lilypond_format = [lilypond_format]
assert isinstance(lilypond_format, (tuple, list))
lilypond_format = _tag.double_tag(
lilypond_format, self.tag, deactivate=self.deactivate
)
result.extend(lilypond_format)
if self._get_effective_context() is not None:
return result
result = [rf"%%% {_} %%%" for _ in result]
return result
def _get_format_specification(self):
keywords = [
"annotation",
"context",
"deactivate",
"indicator",
"synthetic_offset",
"tag",
]
return FormatSpecification(
client=self,
storage_format_args_values=None,
storage_format_keyword_names=keywords,
)
def _unbind_component(self):
if self._component is not None and self in self._component._wrappers:
self._component._wrappers.remove(self)
self._component = None
def _unbind_effective_context(self):
if (
self._effective_context is not None
and self in self._effective_context._dependent_wrappers
):
self._effective_context._dependent_wrappers.remove(self)
self._effective_context = None
def _update_effective_context(self):
correct_effective_context = self._find_correct_effective_context()
# print(correct_effective_context)
if self._effective_context is not correct_effective_context:
self._bind_effective_context(correct_effective_context)
def _warn_duplicate_indicator(self, component):
if self.deactivate is True:
return
prototype = type(self.indicator)
command = getattr(self.indicator, "command", None)
wrapper = _inspect._get_effective(
component,
prototype,
attributes={"command": command},
unwrap=False,
)
wrapper_format_slot = None
if wrapper is not None:
wrapper_format_slot = getattr(wrapper.indicator, "format_slot", None)
my_format_slot = getattr(self.indicator, "format_slot", None)
if (
wrapper is None
or wrapper.context is None
or wrapper.deactivate is True
or wrapper.start_offset != self.start_offset
or wrapper_format_slot != my_format_slot
):
return
my_leak = getattr(self.indicator, "leak", None)
if getattr(wrapper.indicator, "leak", None) != my_leak:
return
context = None
for parent in component._get_parentage():
if hasattr(parent, "_lilypond_type"):
context = parent
break
wrapper_context = None
for parent in wrapper.component._get_parentage():
if hasattr(parent, "_lilypond_type"):
wrapper_context = parent
break
if wrapper.indicator == self.indicator and context is not wrapper_context:
return
message = f"\n\nCan not attach ...\n\n{storage(self)}\n\n..."
message += f" to {repr(component)}"
message += f" in {getattr(context, 'name', None)} because ..."
message += f"\n\n{storage(wrapper)}\n\n"
message += "... is already attached"
if component is wrapper.component:
message += " to the same leaf."
else:
message += f" to {repr(wrapper.component)}"
message += f" in {wrapper_context.name}."
message += "\n"
raise exceptions.PersistentIndicatorError(message)
### PUBLIC PROPERTIES ###
@property
def annotation(self) -> typing.Optional[str]:
"""
Gets wrapper annotation.
.. container:: example
>>> note = abjad.Note("c'4")
>>> articulation = abjad.Articulation('accent', direction=abjad.Up)
>>> abjad.attach(articulation, note)
>>> wrapper = abjad.get.wrapper(note)
>>> wrapper.annotation is None
True
.. container:: example
>>> note = abjad.Note("c'4")
>>> articulation = abjad.Articulation('accent', direction=abjad.Up)
>>> abjad.annotate(note, 'foo', articulation)
>>> abjad.get.annotation(note, 'foo')
Articulation('accent', Up)
"""
return self._annotation
@property
def component(self):
"""
Gets start component.
Returns component or none.
"""
return self._component
@property
def context(self) -> typing.Optional[str]:
"""
Gets context (name).
"""
return self._context
@property
def deactivate(self) -> typing.Optional[bool]:
"""
Is true when wrapper deactivates tag.
"""
assert self._deactivate in (True, False, None)
return self._deactivate
@deactivate.setter
def deactivate(self, argument):
assert argument in (True, False, None)
self._deactivate: typing.Optional[bool] = argument
@property
def indicator(self) -> typing.Any:
"""
Gets indicator.
"""
return self._indicator
@property
def leaked_start_offset(self) -> Offset:
r"""
Gets start offset and checks to see whether indicator leaks to the
right.
This is either the wrapper's synthetic offset (if set); or the START
offset of the wrapper's component (if indicator DOES NOT leak); or else
the STOP offset of the wrapper's component (if indicator DOES leak).
.. container:: example
Start- and stop-text-spans attach to the same leaf. But
stop-text-span leaks to the right:
>>> voice = abjad.Voice("c'2 d'2")
>>> start_text_span = abjad.StartTextSpan()
>>> abjad.attach(start_text_span, voice[0])
>>> stop_text_span = abjad.StopTextSpan(leak=True)
>>> abjad.attach(stop_text_span, voice[0])
>>> abjad.show(voice) # doctest: +SKIP
>>> string = abjad.lilypond(voice)
>>> print(string)
\new Voice
{
c'2
\startTextSpan
<> \stopTextSpan
d'2
}
Start offset and leaked start offset are the same for
start-text-span:
>>> wrapper = abjad.get.wrapper(voice[0], abjad.StartTextSpan)
>>> wrapper.start_offset, wrapper.leaked_start_offset
(Offset((0, 1)), Offset((0, 1)))
Start offset and leaked start offset differ for stop-text-span:
>>> wrapper = abjad.get.wrapper(voice[0], abjad.StopTextSpan)
>>> wrapper.start_offset, wrapper.leaked_start_offset
(Offset((0, 1)), Offset((1, 2)))
Returns offset.
"""
if self._synthetic_offset is not None:
return self._synthetic_offset
if not getattr(self.indicator, "leak", False):
return self._component._get_timespan().start_offset
else:
return self._component._get_timespan().stop_offset
@property
def start_offset(self) -> Offset:
"""
Gets start offset.
This is either the wrapper's synthetic offset or the start offset of
the wrapper's component.
"""
if self._synthetic_offset is not None:
return self._synthetic_offset
return self._component._get_timespan().start_offset
@property
def synthetic_offset(self):
"""
Gets synthetic offset.
Returns offset or none.
"""
return self._synthetic_offset
@property
def tag(self) -> _tag.Tag:
"""
Gets and sets tag.
"""
assert isinstance(self._tag, _tag.Tag), repr(self._tag)
return self._tag
@tag.setter
def tag(self, argument):
if not isinstance(argument, (str, _tag.Tag)):
raise Exception(f"string or tag: {argument!r}.")
tag = _tag.Tag(argument)
self._tag = tag
### FUNCTIONS ###
def annotate(component, annotation, indicator) -> None:
r"""
Annotates ``component`` with ``indicator``.
.. container:: example
Annotates first note in staff:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.annotate(staff[0], 'bow_direction', abjad.Down)
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
d'4
e'4
f'4
}
>>> abjad.get.annotation(staff[0], 'bow_direction')
Down
>>> abjad.get.annotation(staff[0], 'bow_fraction') is None
True
>>> abjad.get.annotation(staff[0], 'bow_fraction', 99)
99
"""
if isinstance(annotation, _tag.Tag):
message = "use the tag=None keyword instead of annotate():\n"
message += f" {repr(annotation)}"
raise Exception(message)
assert isinstance(annotation, str), repr(annotation)
Wrapper(annotation=annotation, component=component, indicator=indicator)
def attach( # noqa: 302
attachable,
target,
context=None,
deactivate=None,
do_not_test=None,
synthetic_offset=None,
tag=None,
wrapper=None,
):
r"""
Attaches ``attachable`` to ``target``.
First form attaches indicator ``attachable`` to single leaf ``target``.
Second for attaches grace container ``attachable`` to leaf ``target``.
Third form attaches wrapper ``attachable`` to unknown (?) ``target``.
.. container:: example
Attaches clef to first note in staff:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.attach(abjad.Clef('alto'), staff[0])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\clef "alto"
c'4
d'4
e'4
f'4
}
.. container:: example
Attaches accent to last note in staff:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.attach(abjad.Articulation('>'), staff[-1])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
d'4
e'4
f'4
- \accent
}
.. container:: example
Works with context names:
>>> voice = abjad.Voice("c'4 d' e' f'", name='MusicVoice')
>>> staff = abjad.Staff([voice], name='MusicStaff')
>>> abjad.attach(abjad.Clef('alto'), voice[0], context='MusicStaff')
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\context Staff = "MusicStaff"
{
\context Voice = "MusicVoice"
{
\clef "alto"
c'4
d'4
e'4
f'4
}
}
>>> for leaf in abjad.select(staff).leaves():
... leaf, abjad.get.effective(leaf, abjad.Clef)
...
(Note("c'4"), Clef('alto'))
(Note("d'4"), Clef('alto'))
(Note("e'4"), Clef('alto'))
(Note("f'4"), Clef('alto'))
Derives context from default ``attachable`` context when ``context`` is
none.
.. container:: example
Two contexted indicators can not be attached at the same offset if both
indicators are active:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.attach(abjad.Clef('treble'), staff[0])
>>> abjad.attach(abjad.Clef('alto'), staff[0])
Traceback (most recent call last):
...
abjad...PersistentIndicatorError: Can not attach ...
But simultaneous contexted indicators are allowed if only one is active
(and all others are inactive):
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.attach(abjad.Clef('treble'), staff[0])
>>> abjad.attach(
... abjad.Clef('alto'),
... staff[0],
... deactivate=True,
... tag=abjad.Tag("+PARTS"),
... )
>>> abjad.attach(
... abjad.Clef('tenor'),
... staff[0],
... deactivate=True,
... tag=abjad.Tag("+PARTS"),
... )
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff, tags=True)
>>> print(string)
\new Staff
{
\clef "treble"
%! +PARTS
%@% \clef "alto"
%! +PARTS
%@% \clef "tenor"
c'4
d'4
e'4
f'4
}
Active indicator is always effective when competing inactive indicators
are present:
>>> for note in staff:
... clef = abjad.get.effective(staff[0], abjad.Clef)
... note, clef
...
(Note("c'4"), Clef('treble'))
(Note("d'4"), Clef('treble'))
(Note("e'4"), Clef('treble'))
(Note("f'4"), Clef('treble'))
But a lone inactivate indicator is effective when no active indicator
is present:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.attach(
... abjad.Clef('alto'),
... staff[0],
... deactivate=True,
... tag=abjad.Tag("+PARTS"),
... )
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff, tags=True)
>>> print(string)
\new Staff
{
%! +PARTS
%@% \clef "alto"
c'4
d'4
e'4
f'4
}
>>> for note in staff:
... clef = abjad.get.effective(staff[0], abjad.Clef)
... note, clef
...
(Note("c'4"), Clef('alto'))
(Note("d'4"), Clef('alto'))
(Note("e'4"), Clef('alto'))
(Note("f'4"), Clef('alto'))
.. container:: example
Tag must exist when ``deactivate`` is true:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.attach(abjad.Clef('alto'), staff[0], deactivate=True)
Traceback (most recent call last):
...
Exception: tag must exist when deactivate is true.
.. container:: example
Returns wrapper when ``wrapper`` is true:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> wrapper = abjad.attach(abjad.Clef('alto'), staff[0], wrapper=True)
>>> string = abjad.storage(wrapper)
>>> print(string)
abjad.Wrapper(
context='Staff',
indicator=abjad.Clef('alto'),
tag=abjad.Tag(),
)
Otherwise returns none.
"""
if isinstance(attachable, _tag.Tag):
message = "use the tag=None keyword instead of attach():\n"
message += f" {repr(attachable)}"
raise Exception(message)
if tag is not None and not isinstance(tag, _tag.Tag):
raise Exception(f"must be be tag: {repr(tag)}")
if isinstance(attachable, Multiplier):
message = "use the Leaf.multiplier property to multiply leaf duration."
raise Exception(message)
assert attachable is not None, repr(attachable)
assert target is not None, repr(target)
if context is not None and hasattr(attachable, "_main_leaf"):
raise Exception(f"set context only for indicators, not {attachable!r}.")
if deactivate is True and tag is None:
raise Exception("tag must exist when deactivate is true.")
if hasattr(attachable, "_before_attach"):
attachable._before_attach(target)
if hasattr(attachable, "_attachment_test_all") and not do_not_test:
result = attachable._attachment_test_all(target)
if result is not True:
assert isinstance(result, list), repr(result)
result = [" " + _ for _ in result]
message = f"{attachable!r}._attachment_test_all():"
result.insert(0, message)
message = "\n".join(result)
raise Exception(message)
prototype = (AfterGraceContainer, BeforeGraceContainer)
if isinstance(attachable, prototype):
if not hasattr(target, "written_duration"):
raise Exception("grace containers attach to single leaf only.")
attachable._attach(target)
return
assert isinstance(target, Component), repr(target)
if isinstance(target, Container):
acceptable = False
if isinstance(attachable, (dict, str, _tag.Tag, Wrapper)):
acceptable = True
if getattr(attachable, "_can_attach_to_containers", False):
acceptable = True
if not acceptable:
message = f"can not attach {attachable!r} to containers: {target!r}"
raise Exception(message)
elif not isinstance(target, Leaf):
message = f"indicator {attachable!r} must attach to leaf, not {target!r}."
raise Exception(message)
component = target
assert isinstance(component, Component), repr(component)
annotation = None
if isinstance(attachable, Wrapper):
annotation = attachable.annotation
context = context or attachable.context
deactivate = deactivate or attachable.deactivate
synthetic_offset = synthetic_offset or attachable.synthetic_offset
tag = tag or attachable.tag
attachable._detach()
attachable = attachable.indicator
if hasattr(attachable, "context"):
context = context or attachable.context
wrapper_ = Wrapper(
annotation=annotation,
component=component,
context=context,
deactivate=deactivate,
indicator=attachable,
synthetic_offset=synthetic_offset,
tag=tag,
)
if wrapper is True:
return wrapper_
def detach(argument, target=None, by_id=False):
r"""
Detaches indicators-equal-to-``argument`` from ``target``.
Set ``by_id`` to true to detach exact ``argument`` from ``target`` (rather
than detaching all indicators-equal-to-``argument``).
.. container:: example
Detaches articulations from first note in staff:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.attach(abjad.Articulation('>'), staff[0])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
- \accent
d'4
e'4
f'4
}
>>> abjad.detach(abjad.Articulation, staff[0])
(Articulation('>'),)
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
d'4
e'4
f'4
}
.. container:: example
The use of ``by_id`` is motivated by the following.
Consider the three document-specifier markups below:
>>> markup_1 = abjad.Markup('tutti', direction=abjad.Up)
>>> markup_2 = abjad.Markup('with the others', direction=abjad.Up)
>>> markup_3 = abjad.Markup('with the others', direction=abjad.Up)
Markups two and three compare equal:
>>> markup_2 == markup_3
True
But document-tagging like this makes sense for score and two diferent
parts:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.attach(markup_1, staff[0], tag=abjad.Tag("+SCORE"))
>>> abjad.attach(
... markup_2,
... staff[0],
... deactivate=True,
... tag=abjad.Tag("+PARTS_VIOLIN_1"),
... )
>>> abjad.attach(
... markup_3,
... staff[0],
... deactivate=True,
... tag=abjad.Tag("+PARTS_VIOLIN_2"),
... )
>>> abjad.show(staff) # doctest: +SKIP
>>> string = abjad.lilypond(staff, tags=True)
>>> print(string)
\new Staff
{
c'4
%! +SCORE
^ \markup { tutti }
%! +PARTS_VIOLIN_1
%@% ^ \markup { with the others }
%! +PARTS_VIOLIN_2
%@% ^ \markup { with the others }
d'4
e'4
f'4
}
The question is then how to detach just one of the two markups that
compare equal to each other?
Passing in one of the markup objects directory doesn't work. This is
because detach tests for equality to input argument:
>>> abjad.detach(markup_2, staff[0])
(Markup(contents=['with the others'], direction=Up), Markup(contents=['with the others'], direction=Up))
>>> abjad.show(staff) # doctest: +SKIP
>>> string = abjad.lilypond(staff, tags=True)
>>> print(string)
\new Staff
{
c'4
%! +SCORE
^ \markup { tutti }
d'4
e'4
f'4
}
We start again:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.attach(markup_1, staff[0], tag=abjad.Tag("+SCORE"))
>>> abjad.attach(
... markup_2,
... staff[0],
... deactivate=True,
... tag=abjad.Tag("+PARTS_VIOLIN_1"),
... )
>>> abjad.attach(
... markup_3,
... staff[0],
... deactivate=True,
... tag=abjad.Tag("+PARTS_VIOLIN_2"),
... )
>>> abjad.show(staff) # doctest: +SKIP
>>> string = abjad.lilypond(staff, tags=True)
>>> print(string)
\new Staff
{
c'4
%! +SCORE
^ \markup { tutti }
%! +PARTS_VIOLIN_1
%@% ^ \markup { with the others }
%! +PARTS_VIOLIN_2
%@% ^ \markup { with the others }
d'4
e'4
f'4
}
This time we set ``by_id`` to true. Now detach checks the exact id of
its input argument (rather than just testing for equality). This gives
us what we want:
>>> abjad.detach(markup_2, staff[0], by_id=True)
(Markup(contents=['with the others'], direction=Up),)
>>> abjad.show(staff) # doctest: +SKIP
>>> string = abjad.lilypond(staff, tags=True)
>>> print(string)
\new Staff
{
c'4
%! +SCORE
^ \markup { tutti }
%! +PARTS_VIOLIN_2
%@% ^ \markup { with the others }
d'4
e'4
f'4
}
.. container:: example
REGRESSION. Attach-detach-attach pattern works correctly when detaching
wrappers:
>>> staff = abjad.Staff("c'4 d' e' f'")
>>> abjad.attach(abjad.Clef('alto'), staff[0])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\clef "alto"
c'4
d'4
e'4
f'4
}
>>> wrapper = abjad.get.wrappers(staff[0])[0]
>>> abjad.detach(wrapper, wrapper.component)
(Wrapper(context='Staff', indicator=Clef('alto'), tag=Tag()),)
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
c'4
d'4
e'4
f'4
}
>>> abjad.attach(abjad.Clef('tenor'), staff[0])
>>> abjad.show(staff) # doctest: +SKIP
.. docs::
>>> string = abjad.lilypond(staff)
>>> print(string)
\new Staff
{
\clef "tenor"
c'4
d'4
e'4
f'4
}
Returns tuple of zero or more detached items.
"""
assert target is not None
after_grace_container = None
before_grace_container = None
if isinstance(argument, type):
if "AfterGraceContainer" in argument.__name__:
after_grace_container = target._after_grace_container
elif "BeforeGraceContainer" in argument.__name__:
before_grace_container = target._before_grace_container
else:
assert hasattr(target, "_wrappers")
result = []
for wrapper in target._wrappers[:]:
if isinstance(wrapper, argument):
target._wrappers.remove(wrapper)
result.append(wrapper)
elif isinstance(wrapper.indicator, argument):
wrapper._detach()
result.append(wrapper.indicator)
result = tuple(result)
return result
else:
if "AfterGraceContainer" in argument.__class__.__name__:
after_grace_container = target._after_grace_container
elif "BeforeGraceContainer" in argument.__class__.__name__:
before_grace_container = target._before_grace_container
else:
assert hasattr(target, "_wrappers")
result = []
for wrapper in target._wrappers[:]:
if wrapper is argument:
wrapper._detach()
result.append(wrapper)
elif wrapper.indicator == argument:
if by_id is True and id(argument) != id(wrapper.indicator):
pass
else:
wrapper._detach()
result.append(wrapper.indicator)
result = tuple(result)
return result
items = []
if after_grace_container is not None:
items.append(after_grace_container)
if before_grace_container is not None:
items.append(before_grace_container)
if by_id is True:
items = [_ for _ in items if id(_) == id(argument)]
for item in items:
item._detach()
items = tuple(items)
return items
| gpl-3.0 |
ihsanudin/odoo | openerp/report/render/rml2pdf/color.py | 443 | 1720 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from reportlab.lib import colors
import re
allcols = colors.getAllNamedColors()
regex_t = re.compile('\(([0-9\.]*),([0-9\.]*),([0-9\.]*)\)')
regex_h = re.compile('#([0-9a-zA-Z][0-9a-zA-Z])([0-9a-zA-Z][0-9a-zA-Z])([0-9a-zA-Z][0-9a-zA-Z])')
def get(col_str):
if col_str is None:
col_str = ''
global allcols
if col_str in allcols.keys():
return allcols[col_str]
res = regex_t.search(col_str, 0)
if res:
return float(res.group(1)), float(res.group(2)), float(res.group(3))
res = regex_h.search(col_str, 0)
if res:
return tuple([ float(int(res.group(i),16))/255 for i in range(1,4)])
return colors.red
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rven/odoo | addons/mail/tests/common.py | 1 | 43892 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import email
import email.policy
import time
from collections import defaultdict
from contextlib import contextmanager
from functools import partial
from lxml import html
from unittest.mock import patch
from smtplib import SMTPServerDisconnected
from odoo import exceptions
from odoo.addons.base.models.ir_mail_server import IrMailServer, MailDeliveryException
from odoo.addons.bus.models.bus import ImBus, json_dump
from odoo.addons.mail.models.mail_mail import MailMail
from odoo.addons.mail.models.mail_message import Message
from odoo.addons.mail.models.mail_notification import MailNotification
from odoo.tests import common, new_test_user
from odoo.tools import formataddr, pycompat
mail_new_test_user = partial(new_test_user, context={'mail_create_nolog': True, 'mail_create_nosubscribe': True, 'mail_notrack': True, 'no_reset_password': True})
class MockEmail(common.BaseCase):
""" Tools, helpers and asserts for mailgateway-related tests
Useful reminders
Mail state: ('outgoing', 'Outgoing'), ('sent', 'Sent'),
('received', 'Received'), ('exception', 'Delivery Failed'),
('cancel', 'Cancelled')
"""
# ------------------------------------------------------------
# GATEWAY MOCK
# ------------------------------------------------------------
@contextmanager
def mock_mail_gateway(self, mail_unlink_sent=False, sim_error=None):
build_email_origin = IrMailServer.build_email
mail_create_origin = MailMail.create
mail_unlink_origin = MailMail.unlink
self.mail_unlink_sent = mail_unlink_sent
self._init_mail_mock()
def _ir_mail_server_connect(model, *args, **kwargs):
if sim_error and sim_error == 'connect_smtp_notfound':
raise exceptions.UserError(
"Missing SMTP Server\nPlease define at least one SMTP server, or provide the SMTP parameters explicitly.")
if sim_error and sim_error == 'connect_failure':
raise Exception("Some exception")
return None
def _ir_mail_server_build_email(model, *args, **kwargs):
self._mails.append(kwargs)
self._mails_args.append(args)
return build_email_origin(model, *args, **kwargs)
def _ir_mail_server_send_email(model, message, *args, **kwargs):
if '@' not in message['To']:
raise AssertionError(model.NO_VALID_RECIPIENT)
if sim_error and sim_error == 'send_assert':
raise AssertionError('AssertionError')
elif sim_error and sim_error == 'send_disconnect':
raise SMTPServerDisconnected('SMTPServerDisconnected')
elif sim_error and sim_error == 'send_delivery':
raise MailDeliveryException('MailDeliveryException')
return message['Message-Id']
def _mail_mail_create(model, *args, **kwargs):
res = mail_create_origin(model, *args, **kwargs)
self._new_mails += res.sudo()
return res
def _mail_mail_unlink(model, *args, **kwargs):
if self.mail_unlink_sent:
return mail_unlink_origin(model, *args, **kwargs)
return True
with patch.object(IrMailServer, 'connect', autospec=True, wraps=IrMailServer, side_effect=_ir_mail_server_connect) as ir_mail_server_connect_mock, \
patch.object(IrMailServer, 'build_email', autospec=True, wraps=IrMailServer, side_effect=_ir_mail_server_build_email) as ir_mail_server_build_email_mock, \
patch.object(IrMailServer, 'send_email', autospec=True, wraps=IrMailServer, side_effect=_ir_mail_server_send_email) as ir_mail_server_send_email_mock, \
patch.object(MailMail, 'create', autospec=True, wraps=MailMail, side_effect=_mail_mail_create) as _mail_mail_create_mock, \
patch.object(MailMail, 'unlink', autospec=True, wraps=MailMail, side_effect=_mail_mail_unlink) as mail_mail_unlink_mock:
yield
def _init_mail_mock(self):
self._mails = []
self._mails_args = []
self._new_mails = self.env['mail.mail'].sudo()
# ------------------------------------------------------------
# GATEWAY TOOLS
# ------------------------------------------------------------
@classmethod
def _init_mail_gateway(cls):
cls.alias_domain = 'test.com'
cls.alias_catchall = 'catchall.test'
cls.alias_bounce = 'bounce.test'
cls.env['ir.config_parameter'].set_param('mail.bounce.alias', cls.alias_bounce)
cls.env['ir.config_parameter'].set_param('mail.catchall.domain', cls.alias_domain)
cls.env['ir.config_parameter'].set_param('mail.catchall.alias', cls.alias_catchall)
cls.mailer_daemon_email = formataddr(('MAILER-DAEMON', '%s@%s' % (cls.alias_bounce, cls.alias_domain)))
def format(self, template, to='groups@example.com, other@gmail.com', subject='Frogs',
email_from='Sylvie Lelitre <test.sylvie.lelitre@agrolait.com>', return_path='', cc='',
extra='', msg_id='<1198923581.41972151344608186760.JavaMail@agrolait.com>'):
return template.format(
subject=subject, to=to, cc=cc,
email_from=email_from, return_path=return_path,
extra=extra, msg_id=msg_id)
def format_and_process(self, template, email_from, to, subject='Frogs', cc='',
return_path='', extra='', msg_id=False,
model=None, target_model='mail.test.gateway', target_field='name'):
self.assertFalse(self.env[target_model].search([(target_field, '=', subject)]))
if not msg_id:
msg_id = "<%.7f-test@iron.sky>" % (time.time())
mail = self.format(template, to=to, subject=subject, cc=cc, return_path=return_path, extra=extra, email_from=email_from, msg_id=msg_id)
self.env['mail.thread'].with_context(mail_channel_noautofollow=True).message_process(model, mail)
return self.env[target_model].search([(target_field, '=', subject)])
def gateway_reply_wrecord(self, template, record, use_in_reply_to=True):
""" Deprecated, remove in 14.4 """
return self.gateway_mail_reply_wrecord(template, record, use_in_reply_to=use_in_reply_to)
def gateway_mail_reply_wrecord(self, template, record, use_in_reply_to=True,
target_model=None, target_field=None):
""" Simulate a reply through the mail gateway. Usage: giving a record,
find an email sent to him and use its message-ID to simulate a reply.
Some noise is added in References just to test some robustness. """
mail_mail = self._find_mail_mail_wrecord(record)
if use_in_reply_to:
extra = 'In-Reply-To:\r\n\t%s\n' % mail_mail.message_id
else:
disturbing_other_msg_id = '<123456.654321@another.host.com>'
extra = 'References:\r\n\t%s\n\r%s' % (mail_mail.message_id, disturbing_other_msg_id)
return self.format_and_process(
template,
mail_mail.email_to,
mail_mail.reply_to,
subject='Re: %s' % mail_mail.subject,
extra=extra,
msg_id='<123456.%s.%d@test.example.com>' % (record._name, record.id),
target_model=target_model or record._name,
target_field=target_field or record._rec_name,
)
def gateway_mail_reply_wemail(self, template, email_to, use_in_reply_to=True,
target_model=None, target_field=None):
""" Simulate a reply through the mail gateway. Usage: giving a record,
find an email sent to him and use its message-ID to simulate a reply.
Some noise is added in References just to test some robustness. """
sent_mail = self._find_sent_mail_wemail(email_to)
if use_in_reply_to:
extra = 'In-Reply-To:\r\n\t%s\n' % sent_mail['message_id']
else:
disturbing_other_msg_id = '<123456.654321@another.host.com>'
extra = 'References:\r\n\t%s\n\r%s' % (sent_mail['message_id'], disturbing_other_msg_id)
return self.format_and_process(
template,
sent_mail['email_to'],
sent_mail['reply_to'],
subject='Re: %s' % sent_mail['subject'],
extra=extra,
target_model=target_model,
target_field=target_field or 'name',
)
def from_string(self, text):
return email.message_from_string(pycompat.to_text(text), policy=email.policy.SMTP)
def assertHtmlEqual(self, value, expected, message=None):
tree = html.fragment_fromstring(value, parser=html.HTMLParser(encoding='utf-8'), create_parent='body')
# mass mailing: add base tag we have to remove
for base_node in tree.xpath('//base'):
base_node.getparent().remove(base_node)
# chatter: read more / read less TODO
# mass mailing: add base tag we have to remove
expected_node = html.fragment_fromstring(expected, create_parent='body')
if message:
self.assertEqual(tree, expected_node, message)
else:
self.assertEqual(tree, expected_node)
# ------------------------------------------------------------
# GATEWAY GETTERS
# ------------------------------------------------------------
def _find_sent_mail_wemail(self, email_to):
""" Find a sent email with a given list of recipients. Email should match
exactly the recipients.
:param email-to: a list of emails that will be compared to email_to
of sent emails (also a list of emails);
:return email: an email which is a dictionary mapping values given to
``build_email``;
"""
for sent_email in self._mails:
if set(sent_email['email_to']) == set([email_to]):
break
else:
raise AssertionError('sent mail not found for email_to %s' % (email_to))
return sent_email
def _find_mail_mail_wid(self, mail_id):
""" Find a ``mail.mail`` record based on a given ID (used notably when having
mail ID in mailing traces).
:return mail: a ``mail.mail`` record generated during the mock and matching
given ID;
"""
for mail in self._new_mails:
if mail.id == mail_id:
break
else:
raise AssertionError('mail.mail not found for ID %s' % (mail_id))
return mail
def _find_mail_mail_wpartners(self, recipients, status, mail_message=None, author=None):
""" Find a mail.mail record based on various parameters, notably a list
of recipients (partners).
:param recipients: a ``res.partner`` recordset Check all of them are in mail
recipients to find the right mail.mail record;
:param status: state of mail.mail. If not void use it to filter mail.mail
record;
:param mail_message: optional check/filter on mail_message_id field aka
a ``mail.message`` record;
:param author: optional check/filter on author_id field aka a ``res.partner``
record;
:return mail: a ``mail.mail`` record generated during the mock and matching
given parameters and filters;
"""
for mail in self._new_mails:
if author is not None and mail.author_id != author:
continue
if mail_message is not None and mail.mail_message_id != mail_message:
continue
if status and mail.state != status:
continue
if all(p in mail.recipient_ids for p in recipients):
break
else:
raise AssertionError('mail.mail not found for message %s / status %s / recipients %s / author %s' % (mail_message, status, recipients.ids, author))
return mail
def _find_mail_mail_wemail(self, email_to, status, mail_message=None, author=None):
""" Find a mail.mail record based on various parameters, notably a list
of email to (string emails).
:param email_to: either matching mail.email_to value, either a mail sent
to a single recipient whose email is email_to;
:param status: state of mail.mail. If not void use it to filter mail.mail
record;
:param mail_message: optional check/filter on mail_message_id field aka
a ``mail.message`` record;
:param author: optional check/filter on author_id field aka a ``res.partner``
record;
:return mail: a ``mail.mail`` record generated during the mock and matching
given parameters and filters;
"""
for mail in self._new_mails:
if author is not None and mail.author_id != author:
continue
if mail_message is not None and mail.mail_message_id != mail_message:
continue
if status and mail.state != status:
continue
if (mail.email_to == email_to and not mail.recipient_ids) or (not mail.email_to and mail.recipient_ids.email == email_to):
break
else:
raise AssertionError('mail.mail not found for email_to %s / status %s in %s' % (email_to, status, repr([m.email_to for m in self._new_mails])))
return mail
def _find_mail_mail_wrecord(self, record):
""" Find a mail.mail record based on model / res_id of a record.
:return mail: a ``mail.mail`` record generated during the mock;
"""
for mail in self._new_mails:
if mail.model == record._name and mail.res_id == record.id:
break
else:
raise AssertionError('mail.mail not found for record %s in %s' % (record, repr([m.email_to for m in self._new_mails])))
return mail
# ------------------------------------------------------------
# GATEWAY ASSERTS
# ------------------------------------------------------------
def assertMailMail(self, recipients, status,
check_mail_mail=True, mail_message=None, author=None,
content=None, fields_values=None, email_values=None):
""" Assert mail.mail records are created and maybe sent as emails. Allow
asserting their content. Records to check are the one generated when
using mock (mail.mail and outgoing emails). This method takes partners
as source of record fetch and assert.
:param recipients: a ``res.partner`` recordset. See ``_find_mail_mail_wpartners``;
:param status: mail.mail state used to filter mails. If ``sent`` this method
also check that emails have been sent trough gateway;
:param mail_message: see ``_find_mail_mail_wpartners``;
:param author: see ``_find_mail_mail_wpartners``;
:param content: if given, check it is contained within mail html body;
:param fields_values: if given, should be a dictionary of field names /
values allowing to check ``mail.mail`` additional values (subject,
reply_to, ...);
:param email_values: if given, should be a dictionary of keys / values
allowing to check sent email additional values (if any).
See ``assertSentEmail``;
:param check_mail_mail: deprecated, use ``assertSentEmail`` if False
"""
found_mail = self._find_mail_mail_wpartners(recipients, status, mail_message=mail_message, author=author)
self.assertTrue(bool(found_mail))
if content:
self.assertIn(content, found_mail.body_html)
for fname, fvalue in (fields_values or {}).items():
self.assertEqual(
found_mail[fname], fvalue,
'Mail: expected %s for %s, got %s' % (fvalue, fname, found_mail[fname]))
if status == 'sent':
for recipient in recipients:
self.assertSentEmail(email_values['email_from'] if email_values and email_values.get('email_from') else author, [recipient], **(email_values or {}))
def assertMailMailWEmails(self, emails, status,
mail_message=None, author=None,
content=None, fields_values=None, email_values=None):
""" Assert mail.mail records are created and maybe sent as emails. Allow
asserting their content. Records to check are the one generated when
using mock (mail.mail and outgoing emails). This method takes emails
as source of record fetch and assert.
:param emails: a list of emails. See ``_find_mail_mail_wemail``;
:param status: mail.mail state used to filter mails. If ``sent`` this method
also check that emails have been sent trough gateway;
:param mail_message: see ``_find_mail_mail_wemail``;
:param author: see ``_find_mail_mail_wemail``;;
:param content: if given, check it is contained within mail html body;
:param fields_values: if given, should be a dictionary of field names /
values allowing to check ``mail.mail`` additional values (subject,
reply_to, ...);
:param email_values: if given, should be a dictionary of keys / values
allowing to check sent email additional values (if any).
See ``assertSentEmail``;
:param check_mail_mail: deprecated, use ``assertSentEmail`` if False
"""
for email_to in emails:
found_mail = self._find_mail_mail_wemail(email_to, status, mail_message=mail_message, author=author)
if content:
self.assertIn(content, found_mail.body_html)
for fname, fvalue in (fields_values or {}).items():
self.assertEqual(
found_mail[fname], fvalue,
'Mail: expected %s for %s, got %s' % (fvalue, fname, found_mail[fname]))
if status == 'sent':
for email_to in emails:
self.assertSentEmail(email_values['email_from'] if email_values and email_values.get('email_from') else author, [email_to], **(email_values or {}))
def assertMailMailWId(self, mail_id, status,
content=None, fields_values=None):
""" Assert mail.mail records are created and maybe sent as emails. Allow
asserting their content. Records to check are the one generated when
using mock (mail.mail and outgoing emails). This method takes partners
as source of record fetch and assert.
:param mail_id: a ``mail.mail`` DB ID. See ``_find_mail_mail_wid``;
:param status: mail.mail state to check upon found mail;
:param content: if given, check it is contained within mail html body;
:param fields_values: if given, should be a dictionary of field names /
values allowing to check ``mail.mail`` additional values (subject,
reply_to, ...);
"""
found_mail = self._find_mail_mail_wid(mail_id)
self.assertTrue(bool(found_mail))
if status:
self.assertEqual(found_mail.state, status)
if content:
self.assertIn(content, found_mail.body_html)
for fname, fvalue in (fields_values or {}).items():
self.assertEqual(
found_mail[fname], fvalue,
'Mail: expected %s for %s, got %s' % (fvalue, fname, found_mail[fname]))
def assertNoMail(self, recipients, mail_message=None, author=None):
""" Check no mail.mail and email was generated during gateway mock. """
try:
self._find_mail_mail_wpartners(recipients, False, mail_message=mail_message, author=author)
except AssertionError:
pass
else:
raise AssertionError('mail.mail exists for message %s / recipients %s but should not exist' % (mail_message, recipients.ids))
finally:
self.assertNotSentEmail()
def assertNotSentEmail(self):
""" Check no email was generated during gateway mock. """
self.assertEqual(len(self._mails), 0)
def assertSentEmail(self, author, recipients, **values):
""" Tool method to ease the check of send emails.
:param author: email author, either a string (email), either a partner
record;
:param recipients: list of recipients, each being either a string (email),
either a partner record;
:param values: dictionary of additional values to check email content;
"""
base_expected = {}
for fname in ['reply_to', 'subject', 'attachments', 'body', 'references',
'body_content', 'body_alternative_content', 'references_content']:
if fname in values:
base_expected[fname] = values[fname]
expected = dict(base_expected)
if isinstance(author, self.env['res.partner'].__class__):
expected['email_from'] = formataddr((author.name, author.email))
else:
expected['email_from'] = author
email_to_list = []
for email_to in recipients:
if isinstance(email_to, self.env['res.partner'].__class__):
email_to_list.append(formataddr((email_to.name, email_to.email)))
else:
email_to_list.append(email_to)
expected['email_to'] = email_to_list
sent_mail = next(
(mail for mail in self._mails
if set(mail['email_to']) == set(expected['email_to']) and mail['email_from'] == expected['email_from']
), False)
debug_info = '-'.join('From: %s-To: %s' % (mail['email_from'], mail['email_to']) for mail in self._mails) if not bool(sent_mail) else ''
self.assertTrue(bool(sent_mail), 'Expected mail from %s to %s not found in %s' % (expected['email_from'], expected['email_to'], debug_info))
for val in ['reply_to', 'subject', 'references', 'attachments']:
if val in expected:
self.assertEqual(expected[val], sent_mail[val], 'Value for %s: expected %s, received %s' % (val, expected[val], sent_mail[val]))
if 'attachments_info' in values:
attachments = sent_mail['attachments']
for attachment_info in values['attachments_info']:
attachment = next(attach for attach in attachments if attach[0] == attachment_info['name'])
if attachment_info.get('raw'):
self.assertEqual(attachment[1], attachment_info['raw'])
if attachment_info.get('type'):
self.assertEqual(attachment[2], attachment_info['type'])
self.assertEqual(len(values['attachments_info']), len(attachments))
for val in ['body']:
if val in expected:
self.assertHtmlEqual(expected[val], sent_mail[val], 'Value for %s: expected %s, received %s' % (val, expected[val], sent_mail[val]))
for val in ['body_content', 'body_alternative', 'references_content']:
if val in expected:
self.assertIn(expected[val], sent_mail[val[:-8]], 'Value for %s: %s does not contain %s' % (val, sent_mail[val[:-8]], expected[val]))
class MailCase(MockEmail):
""" Tools, helpers and asserts for mail-related tests, including mail
gateway mock and helpers (see ´´MockEmail´´).
Useful reminders
Notif type: ('inbox', 'Inbox'), ('email', 'Email')
Notif status: ('ready', 'Ready to Send'), ('sent', 'Sent'),
('bounce', 'Bounced'), ('exception', 'Exception'),
('canceled', 'Canceled')
Notif failure type: ("SMTP", "Connection failed (outgoing mail server problem)"),
("RECIPIENT", "Invalid email address"),
("BOUNCE", "Email address rejected by destination"),
("UNKNOWN", "Unknown error")
"""
_test_context = {
'mail_create_nolog': True,
'mail_create_nosubscribe': True,
'mail_notrack': True,
'no_reset_password': True
}
@classmethod
def _reset_mail_context(cls, record):
return record.with_context(
mail_create_nolog=False,
mail_create_nosubscribe=False,
mail_notrack=False
)
def flush_tracking(self):
""" Force the creation of tracking values. """
self.env['base'].flush()
self.cr.precommit.run()
# ------------------------------------------------------------
# MAIL MOCKS
# ------------------------------------------------------------
@contextmanager
def mock_bus(self):
bus_bus_create_origin = ImBus.create
self._init_mock_bus()
def _bus_bus_create(model, *args, **kwargs):
res = bus_bus_create_origin(model, *args, **kwargs)
self._new_bus_notifs += res.sudo()
return res
with patch.object(ImBus, 'create', autospec=True, wraps=ImBus, side_effect=_bus_bus_create) as _bus_bus_create_mock:
yield
def _init_mock_bus(self):
self._new_bus_notifs = self.env['bus.bus'].sudo()
def _reset_bus(self):
self.env['bus.bus'].sudo().search([]).unlink()
@contextmanager
def mock_mail_app(self):
message_create_origin = Message.create
notification_create_origin = MailNotification.create
self._init_mock_mail()
def _mail_message_create(model, *args, **kwargs):
res = message_create_origin(model, *args, **kwargs)
self._new_msgs += res.sudo()
return res
def _mail_notification_create(model, *args, **kwargs):
res = notification_create_origin(model, *args, **kwargs)
self._new_notifs += res.sudo()
return res
with patch.object(Message, 'create', autospec=True, wraps=Message, side_effect=_mail_message_create) as _mail_message_create_mock, \
patch.object(MailNotification, 'create', autospec=True, wraps=MailNotification, side_effect=_mail_notification_create) as _mail_notification_create_mock:
yield
def _init_mock_mail(self):
self._new_msgs = self.env['mail.message'].sudo()
self._new_notifs = self.env['mail.notification'].sudo()
# ------------------------------------------------------------
# MAIL TOOLS
# ------------------------------------------------------------
@classmethod
def _add_messages(cls, record, body_content, count=1, author=None, **kwargs):
""" Helper: add #count messages in record history """
author = author if author else cls.env.user.partner_id
if 'email_from' not in kwargs:
kwargs['email_from'] = author.email_formatted
subtype_id = kwargs.get('subtype_id', cls.env.ref('mail.mt_comment').id)
values = {
'model': record._name,
'res_id': record.id,
'author_id': author.id,
'subtype_id': subtype_id,
}
values.update(kwargs)
create_vals = [dict(
values, body='%s/%02d' % (body_content, counter))
for counter in range(count)]
return cls.env['mail.message'].sudo().create(create_vals)
@classmethod
def _create_template(cls, model, template_values=None):
create_values = {
'name': 'TestTemplate',
'subject': 'About ${object.name}',
'body_html': '<p>Hello ${object.name}</p>',
'model_id': cls.env['ir.model']._get(model).id,
}
if template_values:
create_values.update(template_values)
cls.email_template = cls.env['mail.template'].create(create_values)
return cls.email_template
def _generate_notify_recipients(self, partners):
""" Tool method to generate recipients data according to structure used
in notification methods. Purpose is to allow testing of internals of
some notification methods, notably testing links or group-based notification
details.
See notably ``MailThread._notify_compute_recipients()``.
"""
return [
{'id': partner.id,
'active': True,
'share': partner.partner_share,
'groups': partner.user_ids.groups_id.ids,
'notif': partner.user_ids.notification_type or 'email',
'type': 'user' if partner.user_ids and not partner.partner_share else partner.user_ids and 'portal' or 'customer',
} for partner in partners
]
# ------------------------------------------------------------
# MAIL ASSERTS WRAPPERS
# ------------------------------------------------------------
@contextmanager
def assertSinglePostNotifications(self, recipients_info, message_info=None, mail_unlink_sent=False, sim_error=None):
""" Shortcut to assertMsgNotifications when having a single message to check. """
r_info = dict(message_info if message_info else {})
r_info.setdefault('content', '')
r_info['notif'] = recipients_info
with self.assertPostNotifications([r_info], mail_unlink_sent=mail_unlink_sent, sim_error=sim_error):
yield
@contextmanager
def assertPostNotifications(self, recipients_info, mail_unlink_sent=False, sim_error=None):
""" Check content of notifications. """
try:
with self.mock_mail_gateway(mail_unlink_sent=mail_unlink_sent, sim_error=sim_error), self.mock_bus(), self.mock_mail_app():
yield
finally:
done_msgs, done_notifs = self.assertMailNotifications(self._new_msgs, recipients_info)
self.assertEqual(self._new_msgs, done_msgs, 'Mail: invalid message creation (%s) / expected (%s)' % (len(self._new_msgs), len(done_msgs)))
self.assertEqual(self._new_notifs, done_notifs, 'Mail: invalid notification creation (%s) / expected (%s)' % (len(self._new_notifs), len(done_notifs)))
@contextmanager
def assertBus(self, channels, message_items=None):
""" Check content of bus notifications. """
try:
with self.mock_bus():
yield
finally:
found_bus_notifs = self.assertBusNotifications(channels, message_items=message_items)
self.assertEqual(self._new_bus_notifs, found_bus_notifs)
@contextmanager
def assertNoNotifications(self):
try:
with self.mock_mail_gateway(mail_unlink_sent=False, sim_error=None), self.mock_bus(), self.mock_mail_app():
yield
finally:
self.assertFalse(bool(self._new_msgs))
self.assertFalse(bool(self._new_notifs))
# ------------------------------------------------------------
# MAIL MODELS ASSERTS
# ------------------------------------------------------------
def assertMailNotifications(self, messages, recipients_info):
""" Check bus notifications content. Mandatory and basic check is about
channels being notified. Content check is optional.
GNERATED INPUT
:param messages: generated messages to check;
EXPECTED
:param recipients_info: list of data dict: [
{'content': message content,
'message_type': message_type (default: 'comment'),
'subtype': xml id of message subtype (default: 'mail.mt_comment'),
'notif': list of notified recipients: [
{'partner': res.partner record (may be empty),
'email': NOT SUPPORTED YET,
'status': notification_status to check,
'type': notification_type to check,
'is_read': is_read to check,
'check_send': whether outgoing stuff has to be checked;
'failure_type': optional: one of failure_type key
}, { ... }]
}, {...}]
PARAMETERS
:param unlink_sent: to know whether to compute
"""
partners = self.env['res.partner'].sudo().concat(*list(p['partner'] for i in recipients_info for p in i['notif'] if p.get('partner')))
base_domain = [('res_partner_id', 'in', partners.ids)]
if messages is not None:
base_domain += [('mail_message_id', 'in', messages.ids)]
notifications = self.env['mail.notification'].sudo().search(base_domain)
done_msgs = self.env['mail.message'].sudo()
done_notifs = self.env['mail.notification'].sudo()
for message_info in recipients_info:
mbody, mtype = message_info.get('content', ''), message_info.get('message_type', 'comment')
msubtype = self.env.ref(message_info.get('subtype', 'mail.mt_comment'))
# find message
if messages:
message = messages.filtered(lambda message: mbody in message.body and message.message_type == mtype and message.subtype_id == msubtype)
else:
message = self.env['mail.message'].sudo().search([('body', 'ilike', mbody), ('message_type', '=', mtype), ('subtype_id', '=', msubtype.id)], limit=1, order='id DESC')
self.assertTrue(message, 'Mail: not found message (content: %s, message_type: %s, subtype: %s)' % (mbody, mtype, msubtype.name))
# check notifications and prepare assert data
email_groups = defaultdict(list)
mail_groups = {'failure': []}
for recipient in message_info['notif']:
partner, ntype, ngroup, nstatus = recipient['partner'], recipient['type'], recipient.get('group'), recipient.get('status', 'sent')
nis_read, ncheck_send = recipient.get('is_read', False if recipient['type'] == 'inbox' else True), recipient.get('check_send', True)
if not ngroup:
ngroup = 'user'
if partner and not partner.user_ids:
ngroup = 'customer'
elif partner and partner.partner_share:
ngroup = 'portal'
# find notification
partner_notif = notifications.filtered(
lambda n: n.mail_message_id == message and
n.res_partner_id == partner and
n.notification_type == ntype and
n.notification_status == nstatus and
n.is_read == nis_read
)
self.assertTrue(partner_notif, 'Mail: not found notification for %s (type: %s, state: %s, message: %s)' % (partner, ntype, nstatus, message.id))
# prepare further asserts
if ntype == 'email':
if nstatus == 'sent':
if ncheck_send:
email_groups[ngroup].append(partner)
elif nstatus == 'exception':
mail_groups['failure'].append(partner)
if ncheck_send:
email_groups[ngroup].append(partner)
elif nstatus == 'canceled':
pass
else:
raise NotImplementedError()
done_notifs |= partner_notif
done_msgs |= message
# check bus notifications that should be sent (hint: message author, multiple notifications)
bus_notifications = message.notification_ids._filtered_for_web_client().filtered(lambda n: n.notification_status == 'exception')
if bus_notifications:
self.assertMessageBusNotifications(message)
# check emails that should be sent (hint: mail.mail per group, email par recipient)
for recipients in email_groups.values():
partners = self.env['res.partner'].sudo().concat(*recipients)
if all(p in mail_groups['failure'] for p in partners):
if not self.mail_unlink_sent:
self.assertMailMail(partners, 'exception',
author=message.author_id,
mail_message=message)
else:
for recipient in partners:
self.assertSentEmail(message.author_id, [recipient])
else:
if not self.mail_unlink_sent:
self.assertMailMail(
partners, 'sent',
author=message.author_id if message.author_id else message.email_from,
mail_message=message,
email_values={'body_content': mbody}
)
else:
for recipient in partners:
self.assertSentEmail(message.author_id if message.author_id else message.email_from, [recipient],
email_values={'body_content': mbody})
if not any(p for recipients in email_groups.values() for p in recipients):
self.assertNoMail(partners, mail_message=message, author=message.author_id)
return done_msgs, done_notifs
def assertMessageBusNotifications(self, message):
"""Asserts that the expected notification updates have been sent on the
bus for the given message."""
self.assertBusNotifications(
[(self.cr.dbname, 'res.partner', message.author_id.id)],
[{
'type': 'message_notification_update',
'elements': message._message_notification_format(),
}],
check_unique=False
)
def assertBusNotifications(self, channels, message_items=None, check_unique=True):
""" Check bus notifications content. Mandatory and basic check is about
channels being notified. Content check is optional.
EXPECTED
:param channels: list of expected bus channels, like [
(self.cr.dbname, 'mail.channel', self.channel_1.id),
(self.cr.dbname, 'res.partner', self.partner_employee_2.id)
]
:param message_items: if given, list of expected message making a valid
pair (channel, message) to be found in bus.bus, like [
{'type': 'message_notification_update',
'elements': {self.msg.id: {
'message_id': self.msg.id,
'message_type': 'sms',
'notifications': {...},
...
}}
}, {...}]
"""
bus_notifs = self.env['bus.bus'].sudo().search([('channel', 'in', [json_dump(channel) for channel in channels])])
if check_unique:
self.assertEqual(len(bus_notifs), len(channels))
self.assertEqual(set(bus_notifs.mapped('channel')), set([json_dump(channel) for channel in channels]))
notif_messages = [n.message for n in bus_notifs]
for expected in message_items or []:
for notification in notif_messages:
if json_dump(expected) == notification:
break
else:
raise AssertionError('No notification was found with the expected value.\nExpected:\n%s\nReturned:\n%s' %
(json_dump(expected), '\n'.join([n for n in notif_messages])))
return bus_notifs
def assertNotified(self, message, recipients_info, is_complete=False):
""" Lightweight check for notifications (mail.notification).
:param recipients_info: list notified recipients: [
{'partner': res.partner record (may be empty),
'type': notification_type to check,
'is_read': is_read to check,
}, {...}]
"""
notifications = self._new_notifs.filtered(lambda notif: notif in message.notification_ids)
if is_complete:
self.assertEqual(len(notifications), len(recipients_info))
for rinfo in recipients_info:
recipient_notif = next(
(notif
for notif in notifications
if notif.res_partner_id == rinfo['partner']
), False
)
self.assertTrue(recipient_notif)
self.assertEqual(recipient_notif.is_read, rinfo['is_read'])
self.assertEqual(recipient_notif.notification_type, rinfo['type'])
def assertTracking(self, message, data):
tracking_values = message.sudo().tracking_value_ids
for field_name, value_type, old_value, new_value in data:
tracking = tracking_values.filtered(lambda track: track.field.name == field_name)
self.assertEqual(len(tracking), 1)
if value_type in ('char', 'integer'):
self.assertEqual(tracking.old_value_char, old_value)
self.assertEqual(tracking.new_value_char, new_value)
elif value_type in ('many2one'):
self.assertEqual(tracking.old_value_integer, old_value and old_value.id or False)
self.assertEqual(tracking.new_value_integer, new_value and new_value.id or False)
self.assertEqual(tracking.old_value_char, old_value and old_value.display_name or '')
self.assertEqual(tracking.new_value_char, new_value and new_value.display_name or '')
else:
self.assertEqual(1, 0)
class MailCommon(common.SavepointCase, MailCase):
""" Almost-void class definition setting the savepoint case + mock of mail.
Used mainly for class inheritance in other applications and test modules. """
@classmethod
def setUpClass(cls):
super(MailCommon, cls).setUpClass()
# give default values for all email aliases and domain
cls._init_mail_gateway()
# ensure admin configuration
cls.user_admin = cls.env.ref('base.user_admin')
cls.user_admin.write({'notification_type': 'inbox'})
cls.partner_admin = cls.env.ref('base.partner_admin')
cls.company_admin = cls.user_admin.company_id
cls.company_admin.write({'email': 'company@example.com'})
# test standard employee
cls.user_employee = mail_new_test_user(
cls.env, login='employee',
groups='base.group_user',
company_id=cls.company_admin.id,
name='Ernest Employee',
notification_type='inbox',
signature='--\nErnest'
)
cls.partner_employee = cls.user_employee.partner_id
cls.partner_employee.flush()
@classmethod
def _create_portal_user(cls):
cls.user_portal = mail_new_test_user(
cls.env, login='portal_test', groups='base.group_portal', company_id=cls.company_admin.id,
name='Chell Gladys', notification_type='email')
cls.partner_portal = cls.user_portal.partner_id
return cls.user_portal
@classmethod
def _activate_multi_company(cls):
""" Create another company, add it to admin and create an user that
belongs to that new company. It allows to test flows with users from
different companies. """
cls.company_2 = cls.env['res.company'].create({
'name': 'Company 2',
'email': 'company_2@test.example.com',
})
cls.user_admin.write({'company_ids': [(4, cls.company_2.id)]})
cls.user_employee_c2 = mail_new_test_user(
cls.env, login='employee_c2',
groups='base.group_user',
company_id=cls.company_2.id,
name='Enguerrand Employee C2',
notification_type='inbox',
signature='--\nEnguerrand'
)
cls.partner_employee_c2 = cls.user_employee_c2.partner_id
| agpl-3.0 |
silentfuzzle/calibre | src/html5lib/filters/lint.py | 979 | 4306 | from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name})
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]})
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name})
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value})
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name})
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name})
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data})
if not data:
raise LintError(_("%(type)s token with empty data") % {"type": type})
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data})
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %(type)s") % {"type": type})
yield token
| gpl-3.0 |
Big-B702/python-for-android | python-build/python-libs/gdata/src/gdata/alt/appengine.py | 133 | 10734 | #!/usr/bin/python
#
# Copyright (C) 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Provides HTTP functions for gdata.service to use on Google App Engine
AppEngineHttpClient: Provides an HTTP request method which uses App Engine's
urlfetch API. Set the http_client member of a GDataService object to an
instance of an AppEngineHttpClient to allow the gdata library to run on
Google App Engine.
run_on_appengine: Function which will modify an existing GDataService object
to allow it to run on App Engine. It works by creating a new instance of
the AppEngineHttpClient and replacing the GDataService object's
http_client.
"""
__author__ = 'api.jscudder (Jeff Scudder)'
import StringIO
import pickle
import atom.http_interface
import atom.token_store
from google.appengine.api import urlfetch
from google.appengine.ext import db
from google.appengine.api import users
from google.appengine.api import memcache
def run_on_appengine(gdata_service, store_tokens=True,
single_user_mode=False):
"""Modifies a GDataService object to allow it to run on App Engine.
Args:
gdata_service: An instance of AtomService, GDataService, or any
of their subclasses which has an http_client member and a
token_store member.
store_tokens: Boolean, defaults to True. If True, the gdata_service
will attempt to add each token to it's token_store when
SetClientLoginToken or SetAuthSubToken is called. If False
the tokens will not automatically be added to the
token_store.
single_user_mode: Boolean, defaults to False. If True, the current_token
member of gdata_service will be set when
SetClientLoginToken or SetAuthTubToken is called. If set
to True, the current_token is set in the gdata_service
and anyone who accesses the object will use the same
token.
Note: If store_tokens is set to False and
single_user_mode is set to False, all tokens will be
ignored, since the library assumes: the tokens should not
be stored in the datastore and they should not be stored
in the gdata_service object. This will make it
impossible to make requests which require authorization.
"""
gdata_service.http_client = AppEngineHttpClient()
gdata_service.token_store = AppEngineTokenStore()
gdata_service.auto_store_tokens = store_tokens
gdata_service.auto_set_current_token = single_user_mode
return gdata_service
class AppEngineHttpClient(atom.http_interface.GenericHttpClient):
def __init__(self, headers=None):
self.debug = False
self.headers = headers or {}
def request(self, operation, url, data=None, headers=None):
"""Performs an HTTP call to the server, supports GET, POST, PUT, and
DELETE.
Usage example, perform and HTTP GET on http://www.google.com/:
import atom.http
client = atom.http.HttpClient()
http_response = client.request('GET', 'http://www.google.com/')
Args:
operation: str The HTTP operation to be performed. This is usually one
of 'GET', 'POST', 'PUT', or 'DELETE'
data: filestream, list of parts, or other object which can be converted
to a string. Should be set to None when performing a GET or DELETE.
If data is a file-like object which can be read, this method will
read a chunk of 100K bytes at a time and send them.
If the data is a list of parts to be sent, each part will be
evaluated and sent.
url: The full URL to which the request should be sent. Can be a string
or atom.url.Url.
headers: dict of strings. HTTP headers which should be sent
in the request.
"""
all_headers = self.headers.copy()
if headers:
all_headers.update(headers)
# Construct the full payload.
# Assume that data is None or a string.
data_str = data
if data:
if isinstance(data, list):
# If data is a list of different objects, convert them all to strings
# and join them together.
converted_parts = [_convert_data_part(x) for x in data]
data_str = ''.join(converted_parts)
else:
data_str = _convert_data_part(data)
# If the list of headers does not include a Content-Length, attempt to
# calculate it based on the data object.
if data and 'Content-Length' not in all_headers:
all_headers['Content-Length'] = str(len(data_str))
# Set the content type to the default value if none was set.
if 'Content-Type' not in all_headers:
all_headers['Content-Type'] = 'application/atom+xml'
# Lookup the urlfetch operation which corresponds to the desired HTTP verb.
if operation == 'GET':
method = urlfetch.GET
elif operation == 'POST':
method = urlfetch.POST
elif operation == 'PUT':
method = urlfetch.PUT
elif operation == 'DELETE':
method = urlfetch.DELETE
else:
method = None
return HttpResponse(urlfetch.Fetch(url=str(url), payload=data_str,
method=method, headers=all_headers, follow_redirects=False))
def _convert_data_part(data):
if not data or isinstance(data, str):
return data
elif hasattr(data, 'read'):
# data is a file like object, so read it completely.
return data.read()
# The data object was not a file.
# Try to convert to a string and send the data.
return str(data)
class HttpResponse(object):
"""Translates a urlfetch resoinse to look like an hhtplib resoinse.
Used to allow the resoinse from HttpRequest to be usable by gdata.service
methods.
"""
def __init__(self, urlfetch_response):
self.body = StringIO.StringIO(urlfetch_response.content)
self.headers = urlfetch_response.headers
self.status = urlfetch_response.status_code
self.reason = ''
def read(self, length=None):
if not length:
return self.body.read()
else:
return self.body.read(length)
def getheader(self, name):
if not self.headers.has_key(name):
return self.headers[name.lower()]
return self.headers[name]
class TokenCollection(db.Model):
"""Datastore Model which associates auth tokens with the current user."""
user = db.UserProperty()
pickled_tokens = db.BlobProperty()
class AppEngineTokenStore(atom.token_store.TokenStore):
"""Stores the user's auth tokens in the App Engine datastore.
Tokens are only written to the datastore if a user is signed in (if
users.get_current_user() returns a user object).
"""
def __init__(self):
self.user = None
def add_token(self, token):
"""Associates the token with the current user and stores it.
If there is no current user, the token will not be stored.
Returns:
False if the token was not stored.
"""
tokens = load_auth_tokens(self.user)
if not hasattr(token, 'scopes') or not token.scopes:
return False
for scope in token.scopes:
tokens[str(scope)] = token
key = save_auth_tokens(tokens, self.user)
if key:
return True
return False
def find_token(self, url):
"""Searches the current user's collection of token for a token which can
be used for a request to the url.
Returns:
The stored token which belongs to the current user and is valid for the
desired URL. If there is no current user, or there is no valid user
token in the datastore, a atom.http_interface.GenericToken is returned.
"""
if url is None:
return None
if isinstance(url, (str, unicode)):
url = atom.url.parse_url(url)
tokens = load_auth_tokens(self.user)
if url in tokens:
token = tokens[url]
if token.valid_for_scope(url):
return token
else:
del tokens[url]
save_auth_tokens(tokens, self.user)
for scope, token in tokens.iteritems():
if token.valid_for_scope(url):
return token
return atom.http_interface.GenericToken()
def remove_token(self, token):
"""Removes the token from the current user's collection in the datastore.
Returns:
False if the token was not removed, this could be because the token was
not in the datastore, or because there is no current user.
"""
token_found = False
scopes_to_delete = []
tokens = load_auth_tokens(self.user)
for scope, stored_token in tokens.iteritems():
if stored_token == token:
scopes_to_delete.append(scope)
token_found = True
for scope in scopes_to_delete:
del tokens[scope]
if token_found:
save_auth_tokens(tokens, self.user)
return token_found
def remove_all_tokens(self):
"""Removes all of the current user's tokens from the datastore."""
save_auth_tokens({}, self.user)
def save_auth_tokens(token_dict, user):
"""Associates the tokens with the current user and writes to the datastore.
If there us no current user, the tokens are not written and this function
returns None.
Returns:
The key of the datastore entity containing the user's tokens, or None if
there was no current user.
"""
if user is None:
user = users.get_current_user()
if user is None:
return None
user_tokens = TokenCollection.all().filter('user =', user).get()
if user_tokens:
user_tokens.pickled_tokens = pickle.dumps(token_dict)
return user_tokens.put()
else:
user_tokens = TokenCollection(
user=user,
pickled_tokens=pickle.dumps(token_dict))
return user_tokens.put()
def load_auth_tokens(user):
"""Reads a dictionary of the current user's tokens from the datastore.
If there is no current user (a user is not signed in to the app) or the user
does not have any tokens, an empty dictionary is returned.
"""
if user is None:
user = users.get_current_user()
if user is None:
return {}
user_tokens = TokenCollection.all().filter('user =', user).get()
if user_tokens:
return pickle.loads(user_tokens.pickled_tokens)
return {}
| apache-2.0 |
gholms/euca2ools | euca2ools/commands/ec2/terminateinstances.py | 6 | 1993 | # Copyright 2009-2014 Eucalyptus Systems, Inc.
#
# Redistribution and use of this software in source and binary forms,
# with or without modification, are permitted provided that the following
# conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from euca2ools.commands.ec2 import EC2Request
from requestbuilder import Arg
class TerminateInstances(EC2Request):
DESCRIPTION = 'Terminate one or more instances'
ARGS = [Arg('InstanceId', metavar='INSTANCE', nargs='+',
help='ID(s) of the instance(s) to terminate')]
LIST_TAGS = ['instancesSet']
def print_result(self, result):
for instance in result.get('instancesSet', []):
print self.tabify(('INSTANCE', instance.get('instanceId'),
instance.get('previousState', {}).get('name'),
instance.get('currentState', {}).get('name')))
| bsd-2-clause |
Teagan42/home-assistant | homeassistant/components/mobile_app/config_flow.py | 2 | 1877 | """Config flow for Mobile App."""
import uuid
from homeassistant import config_entries
from homeassistant.components import person
from homeassistant.helpers import entity_registry
from .const import ATTR_APP_ID, ATTR_DEVICE_ID, ATTR_DEVICE_NAME, CONF_USER_ID, DOMAIN
@config_entries.HANDLERS.register(DOMAIN)
class MobileAppFlowHandler(config_entries.ConfigFlow):
"""Handle a Mobile App config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_PUSH
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
placeholders = {
"apps_url": "https://www.home-assistant.io/components/mobile_app/#apps"
}
return self.async_abort(
reason="install_app", description_placeholders=placeholders
)
async def async_step_registration(self, user_input=None):
"""Handle a flow initialized during registration."""
if ATTR_DEVICE_ID in user_input:
# Unique ID is combi of app + device ID.
await self.async_set_unique_id(
f"{user_input[ATTR_APP_ID]}-{user_input[ATTR_DEVICE_ID]}"
)
else:
user_input[ATTR_DEVICE_ID] = str(uuid.uuid4()).replace("-", "")
# Register device tracker entity and add to person registering app
ent_reg = await entity_registry.async_get_registry(self.hass)
devt_entry = ent_reg.async_get_or_create(
"device_tracker",
DOMAIN,
user_input[ATTR_DEVICE_ID],
suggested_object_id=user_input[ATTR_DEVICE_NAME],
)
await person.async_add_user_device_tracker(
self.hass, user_input[CONF_USER_ID], devt_entry.entity_id
)
return self.async_create_entry(
title=user_input[ATTR_DEVICE_NAME], data=user_input
)
| apache-2.0 |
rahul003/mxnet | python/mxnet/rnn/io.py | 8 | 7829 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# coding: utf-8
# pylint: disable=too-many-arguments, too-many-locals
"""Definition of various recurrent neural network cells."""
from __future__ import print_function
import bisect
import random
import numpy as np
from ..io import DataIter, DataBatch, DataDesc
from .. import ndarray
def encode_sentences(sentences, vocab=None, invalid_label=-1, invalid_key='\n',
start_label=0, unknown_token=None):
"""Encode sentences and (optionally) build a mapping
from string tokens to integer indices. Unknown keys
will be added to vocabulary.
Parameters
----------
sentences : list of list of str
A list of sentences to encode. Each sentence
should be a list of string tokens.
vocab : None or dict of str -> int
Optional input Vocabulary
invalid_label : int, default -1
Index for invalid token, like <end-of-sentence>
invalid_key : str, default '\\n'
Key for invalid token. Use '\\n' for end
of sentence by default.
start_label : int
lowest index.
unknown_token: str
Symbol to represent unknown token.
If not specified, unknown token will be skipped.
Returns
-------
result : list of list of int
encoded sentences
vocab : dict of str -> int
result vocabulary
"""
idx = start_label
if vocab is None:
vocab = {invalid_key: invalid_label}
new_vocab = True
else:
new_vocab = False
res = []
for sent in sentences:
coded = []
for word in sent:
if word not in vocab:
assert (new_vocab or unknown_token), "Unknown token %s"%word
if idx == invalid_label:
idx += 1
if unknown_token:
word = unknown_token
vocab[word] = idx
idx += 1
coded.append(vocab[word])
res.append(coded)
return res, vocab
class BucketSentenceIter(DataIter):
"""Simple bucketing iterator for language model.
The label at each sequence step is the following token
in the sequence.
Parameters
----------
sentences : list of list of int
Encoded sentences.
batch_size : int
Batch size of the data.
invalid_label : int, optional
Key for invalid label, e.g. <end-of-sentence>. The default is -1.
dtype : str, optional
Data type of the encoding. The default data type is 'float32'.
buckets : list of int, optional
Size of the data buckets. Automatically generated if None.
data_name : str, optional
Name of the data. The default name is 'data'.
label_name : str, optional
Name of the label. The default name is 'softmax_label'.
layout : str, optional
Format of data and label. 'NT' means (batch_size, length)
and 'TN' means (length, batch_size).
"""
def __init__(self, sentences, batch_size, buckets=None, invalid_label=-1,
data_name='data', label_name='softmax_label', dtype='float32',
layout='NT'):
super(BucketSentenceIter, self).__init__()
if not buckets:
buckets = [i for i, j in enumerate(np.bincount([len(s) for s in sentences]))
if j >= batch_size]
buckets.sort()
ndiscard = 0
self.data = [[] for _ in buckets]
valid_buckets = {}
for item in range(len(buckets)):
valid_buckets[item] = 0
for i, sent in enumerate(sentences):
buck = bisect.bisect_left(buckets, len(sent))
valid_buckets[buck] = 1
if buck == len(buckets):
ndiscard += 1
continue
buff = np.full((buckets[buck],), invalid_label, dtype=dtype)
buff[:len(sent)] = sent
self.data[buck].append(buff)
buckets = [j for i, j in enumerate(buckets) if valid_buckets[i] == 1]
self.data = [np.asarray(i, dtype=dtype) for i in self.data if i]
print("WARNING: discarded %d sentences longer than the largest bucket."%ndiscard)
self.batch_size = batch_size
self.buckets = buckets
self.data_name = data_name
self.label_name = label_name
self.dtype = dtype
self.invalid_label = invalid_label
self.nddata = []
self.ndlabel = []
self.major_axis = layout.find('N')
self.layout = layout
self.default_bucket_key = max(buckets)
if self.major_axis == 0:
self.provide_data = [DataDesc(
name=self.data_name, shape=(batch_size, self.default_bucket_key),
layout=self.layout)]
self.provide_label = [DataDesc(
name=self.label_name, shape=(batch_size, self.default_bucket_key),
layout=self.layout)]
elif self.major_axis == 1:
self.provide_data = [DataDesc(
name=self.data_name, shape=(self.default_bucket_key, batch_size),
layout=self.layout)]
self.provide_label = [DataDesc(
name=self.label_name, shape=(self.default_bucket_key, batch_size),
layout=self.layout)]
else:
raise ValueError("Invalid layout %s: Must by NT (batch major) or TN (time major)")
self.idx = []
for i, buck in enumerate(self.data):
self.idx.extend([(i, j) for j in range(0, len(buck) - batch_size + 1, batch_size)])
self.curr_idx = 0
self.reset()
def reset(self):
"""Resets the iterator to the beginning of the data."""
self.curr_idx = 0
random.shuffle(self.idx)
for buck in self.data:
np.random.shuffle(buck)
self.nddata = []
self.ndlabel = []
for buck in self.data:
label = np.empty_like(buck)
label[:, :-1] = buck[:, 1:]
label[:, -1] = self.invalid_label
self.nddata.append(ndarray.array(buck, dtype=self.dtype))
self.ndlabel.append(ndarray.array(label, dtype=self.dtype))
def next(self):
"""Returns the next batch of data."""
if self.curr_idx == len(self.idx):
raise StopIteration
i, j = self.idx[self.curr_idx]
self.curr_idx += 1
if self.major_axis == 1:
data = self.nddata[i][j:j+self.batch_size].T
label = self.ndlabel[i][j:j+self.batch_size].T
else:
data = self.nddata[i][j:j+self.batch_size]
label = self.ndlabel[i][j:j+self.batch_size]
return DataBatch([data], [label], pad=0,
bucket_key=self.buckets[i],
provide_data=[DataDesc(
name=self.data_name, shape=data.shape,
layout=self.layout)],
provide_label=[DataDesc(
name=self.label_name, shape=label.shape,
layout=self.layout)])
| apache-2.0 |
tmetsch/graph_stitcher | tests/stitcher_evolutionary_test.py | 1 | 16745 | """
Unittest for the evolutionary module.
"""
import itertools
import json
import logging
import unittest
import networkx as nx
from networkx.readwrite import json_graph
from stitcher import evolutionary
FORMAT = "%(asctime)s - %(filename)s - %(lineno)s - " \
"%(levelname)s - %(message)s"
logging.basicConfig(format=FORMAT, level=logging.DEBUG)
class TestCandidate(unittest.TestCase):
"""
Test abstract class.
"""
def setUp(self):
self.cut = evolutionary.Candidate('a')
def test_eq_for_success(self):
"""
Test eq for success.
"""
one = evolutionary.Candidate({'a': '1', 'b': '2'})
another = evolutionary.Candidate({'a': '1', 'b': '2'})
self.assertTrue(one == another)
def test_fitness_for_failure(self):
"""
Test not impl error.
"""
self.assertRaises(NotImplementedError, self.cut.fitness)
def test_mutate_for_failure(self):
"""
Test not impl error.
"""
self.assertRaises(NotImplementedError, self.cut.mutate)
def test_crossover_for_failure(self):
"""
Test not impl error.
"""
self.assertRaises(NotImplementedError, self.cut.crossover, None)
def test_eq_for_sanity(self):
"""
Objects are the same when there genomics are identical.
"""
one = evolutionary.Candidate({'a': '1', 'b': '2'})
another = evolutionary.Candidate({'a': '1', 'b': '2'})
yet_another = evolutionary.Candidate({'a': '1', 'b': '3'})
self.assertTrue(one == another)
self.assertTrue(one != yet_another)
self.assertIn(another, [one])
self.assertNotIn(yet_another, [one, another])
class TestGraphCandidate(unittest.TestCase):
"""
Tests the graph candidate.
"""
def setUp(self):
self.container = nx.DiGraph()
self.container.add_node('1', **{'type': 'a', 'group': 'foo', 'foo': 3,
'retest': 'aaa'})
self.container.add_node('2', **{'type': 'a', 'group': 'foo'})
self.container.add_node('3', **{'type': 'a', 'foo': 5,
'retest': 'bbb'})
self.container.add_node('4', **{'type': 'b', 'group': 'foo'})
self.container.add_node('5', **{'type': 'b', 'group': 'bar'})
self.container.add_node('6', **{'type': 'b', 'group': 'bar'})
self.request = nx.DiGraph()
self.request.add_node('a', **{'type': 'x'})
self.request.add_node('b', **{'type': 'x'})
self.request.add_node('c', **{'type': 'y'})
self.request.add_node('d', **{'type': 'y'})
self.stitch = {'x': 'a', 'y': 'b'} # stitch x -> a and y -> b
def test_fitness_for_success(self):
"""
Test fitness function for success.
"""
cut = evolutionary.GraphCandidate({'a': '1', 'c': '4'}, self.stitch,
{}, [], self.request, self.container)
cut.fitness()
repr(cut)
def test_mutate_for_success(self):
"""
Test mutate function for success.
"""
cut = evolutionary.GraphCandidate({'a': '1', 'c': '4'}, self.stitch,
{}, ['2'], self.request,
self.container)
cut.mutate()
def test_crossover_for_success(self):
"""
Test crossover function for success.
"""
cut = evolutionary.GraphCandidate({'a': '1', 'c': '4'}, self.stitch,
{}, [], self.request, self.container)
partner = evolutionary.GraphCandidate({'a': '2', 'c': '4'},
self.stitch, {}, [],
self.request, self.container)
cut.crossover(partner)
# Test for failures - should not happen.
def test_fitness_for_sanity(self):
"""
Test fitness function for sanity.
"""
# a should not be stitched to 3!
cut = evolutionary.GraphCandidate({'a': '4'}, self.stitch, {}, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 100.0)
# a needs to be stitched to target node with attr foo = 3
condy = {'attributes': [('eq', ('a', ('foo', 3)))]}
cut = evolutionary.GraphCandidate({'a': '1'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 0.0)
cut = evolutionary.GraphCandidate({'a': '2'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 10.1)
condy = {'attributes': [('eq', ('a', ('foo', 9)))]}
cut = evolutionary.GraphCandidate({'a': '1'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 10.2)
# a needs to be stitched to target node with attr foo != 3
condy = {'attributes': [('neq', ('a', ('foo', 3)))]}
cut = evolutionary.GraphCandidate({'a': '3'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 0.0)
cut = evolutionary.GraphCandidate({'a': '2'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 0.0)
condy = {'attributes': [('neq', ('a', ('foo', 3)))]}
cut = evolutionary.GraphCandidate({'a': '1'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 10.1)
# a needs to be stitched to target node with attr foo > 4
condy = {'attributes': [('lg', ('a', ('foo', 4)))]}
cut = evolutionary.GraphCandidate({'a': '3'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 0.0)
cut = evolutionary.GraphCandidate({'a': '2'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 10.1)
condy = {'attributes': [('lg', ('a', ('foo', 4)))]}
cut = evolutionary.GraphCandidate({'a': '1'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 10.2)
# a needs to be stitched to target node with attr foo < 4
condy = {'attributes': [('lt', ('a', ('foo', 4)))]}
cut = evolutionary.GraphCandidate({'a': '1'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 0.0)
cut = evolutionary.GraphCandidate({'a': '2'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 10.1)
condy = {'attributes': [('lt', ('a', ('foo', 4)))]}
cut = evolutionary.GraphCandidate({'a': '3'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 10.2)
# node a requires target node to have an attribute retest which starts
# with an 'c'
condy = {'attributes': [('regex', ('a', ('retest', '^b')))]}
cut = evolutionary.GraphCandidate({'a': '2'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 10.1)
cut = evolutionary.GraphCandidate({'a': '1'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 10.2)
cut = evolutionary.GraphCandidate({'a': '3'}, self.stitch, condy, [],
self.request, self.container)
self.assertEqual(cut.fitness(), 0.0)
# a and b are stitched to 1
condy = {'compositions': [('share', ('group', ['a', 'b']))]}
cut = evolutionary.GraphCandidate({'a': '1', 'b': '1'}, self.stitch,
condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 0.0)
# node c has no group attr.
cut = evolutionary.GraphCandidate({'a': '1', 'b': '3'}, self.stitch,
condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 10.1)
# c and d stitched to nodes with different group value.
condy = {'compositions': [('share', ('group', ['c', 'd']))]}
cut = evolutionary.GraphCandidate({'c': '4', 'd': '5'}, self.stitch,
condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 10.2)
# a and b are stitched to 1
condy = {'compositions': [('nshare', ('group', ['a', 'b']))]}
cut = evolutionary.GraphCandidate({'a': '1', 'b': '1'}, self.stitch,
condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 10.2)
# node c has no group attr.
cut = evolutionary.GraphCandidate({'a': '1', 'b': '3'}, self.stitch,
condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 10.1)
# c and d stitched to nodes with different group value.
condy = {'compositions': [('nshare', ('group', ['c', 'd']))]}
cut = evolutionary.GraphCandidate({'c': '4', 'd': '5'}, self.stitch,
condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 0.0)
# a and b stitched to same target.
condy = {'compositions': [('same', ['a', 'b'])]}
cut = evolutionary.GraphCandidate({'a': '1', 'b': '1'}, self.stitch,
condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 0.0)
# a and b not stitched to same target.
condy = {'compositions': [('same', ['b', 'a'])]}
cut = evolutionary.GraphCandidate({'a': '1', 'b': '2'}, self.stitch,
condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 10.0)
# a and b not stitched to same target.
condy = {'compositions': [('diff', ['a', 'b'])]}
cut = evolutionary.GraphCandidate({'a': '1', 'b': '2'}, self.stitch,
condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 0.0)
# a and n stitched to same target.
condy = {'compositions': [('diff', ['b', 'a'])]}
cut = evolutionary.GraphCandidate({'a': '1', 'b': '1'}, self.stitch,
condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 10.0)
def test_special(self):
"""
Test share condition.
"""
condy = {'compositions': [('share', ('group', ['a', 'b'])),
('share', ('group', ['c', 'd']))]}
cut = evolutionary.GraphCandidate({'a': '1', 'b': '1',
'c': '5', 'd': '6'},
self.stitch, condy, [], self.request,
self.container)
self.assertEqual(cut.fitness(), 0.0)
def test_mutate_for_sanity(self):
"""
Test mutate function for sanity.
"""
cut = evolutionary.GraphCandidate({'a': '1'}, self.stitch,
{}, ['2'], self.request,
self.container)
cut.mutate()
# gens should have flipped - hard to test otherwise as random is
# involved.
self.assertDictEqual({'a': '2'}, cut.gen)
def test_crossover_for_sanity(self):
"""
Test crossover function for sanity.
"""
cut = evolutionary.GraphCandidate({'a': '1'}, self.stitch,
{}, [], self.request, self.container)
partner = evolutionary.GraphCandidate({'a': '2'},
self.stitch, {}, [],
self.request, self.container)
child = cut.crossover(partner)
# child's gens should have been taken from the partner. - hard to test
# otherwise as random is involved.
self.assertDictEqual(child.gen, partner.gen)
# partner has a only non valid mappings
self.container.add_node('y', **{'type': 'boo'})
partner = evolutionary.GraphCandidate({'a': 'y'},
self.stitch, {}, [],
self.request, self.container)
child = cut.crossover(partner)
self.assertTrue(child == cut)
class TestBasicEvolution(unittest.TestCase):
"""
Tests the filter functions and validates that the right candidates are
eliminated.
"""
def setUp(self):
self.cut = evolutionary.BasicEvolution()
def test_run_for_success(self):
"""
Test basic evolutionary algorithm usage.
"""
population = _get_population('b')
self.cut.run(population, 1)
def test_run_for_failure(self):
"""
Test basic evolutionary algorithm usage for failure.
"""
population = _get_population('x')
iterations, _ = self.cut.run(population, 100, stabilizer=True)
self.assertEqual(iterations, 1) # should die in second run
iterations, _ = self.cut.run(population, 0)
self.assertEqual(iterations, 1)
def test_run_for_sanity(self):
"""
Test basic evolutionary algorithm usage.
"""
population = _get_population('b')
iteration, _ = self.cut.run(population, 1)
self.assertEqual(iteration, 0) # done as we flip to b immediately.
class EvolutionaryStitcherTest(unittest.TestCase):
"""
Testcase for the evolutionary algorithm based stitcher.
"""
def setUp(self):
container_tmp = json.load(open('data/container.json'))
self.container = json_graph.node_link_graph(container_tmp,
directed=True)
request_tmp = json.load(open('data/request.json'))
self.request = json_graph.node_link_graph(request_tmp,
directed=True)
rels = json.load(open('data/stitch.json'))
self.cut = evolutionary.EvolutionarySticher(rels)
def test_stitch_for_success(self):
"""
test stitch for success.
"""
self.cut.stitch(self.container, self.request)
def test_stitch_for_sanity(self):
"""
Test stitch for sanity.
"""
for _ in range(0, 25):
# changes are high that within one run the algo finds no solution.
self.cut.stitch(self.container, self.request)
def _get_population(value):
population = []
for item in itertools.permutations('abcde'):
population.append(ExampleCandidate(''.join(item), value))
return population
class ExampleCandidate(evolutionary.Candidate):
"""
Simple candidate.
"""
def __init__(self, gen, test_value):
super(ExampleCandidate, self).__init__(gen)
self.test_value = test_value
def fitness(self):
"""
Simple but stupid fitness function.
"""
fit = 0.0
for item in self.gen:
if item != self.test_value:
fit += 1
return fit
def mutate(self):
"""
Not mutating for stable env.
"""
def crossover(self, partner):
"""
To test (get stable environment) let's return a 'bbbbb'
"""
return self.__class__('bbbbb', self.test_value)
def __repr__(self):
return self.gen + ':' + str(self.fitness())
| mit |
nanophotonics/nplab | nplab/analysis/background_removal/Moving_Gradient_BG_Removal.py | 1 | 4342 | from builtins import range
import numpy as np
"""
Author: jpg66
Module for removing SERS background in cases where Adaptive Polynomial produces too many artifacts. This may include cases with negative curvature and a low SNR.
This should be used in cases where the peaks are sparse.
Run using Run(Signal,Window=50,Maximum_Iterations=10,Peak_Tolerance=0.5). This will return the bacgkround subtracted signal.
Signal is a 1D array containing the signal to the be background removed. All pairs of points Window apart in the array are considered, and the gradient of the straight
line between them calculated. This window must be an interger >=2, and if not will be increases to one. This window should be slightly larger than the peaks in the signal.
Each line gradient will be assigned to every point bounded by the line. The gradient at each point is taken as the median of all the gradient assigned to it. The resulting
smooth background is reconstructed from these gradients. The background substracted signal is shifted to have a median of 0.
This background will be overestimated slightly at peak positions. To account for this, peak postions are estimated. A noise threshold is estimated as the median of the
absolute background subtracted signal. Any runs of points over this theshold that are over a set length are registered as possible peak positions. This set length is given
by 100.*((1./6)**Set Length) = Peak_Tolerance. The backround signal gradients are now recalculated ignoring any contributions from lines including points registered as
possible peak positions.
These iterations are stopped when they reach Maximum_Iterations or when the list of possible peak positions converges.
"""
def Construct_Background(Gradient,Not_Allowed,Window,Signal_Length):
"""
Function that takes a list of gradients (Gradient), a list indicating whether points represent possible peak postions (Not_Allowed), the window size
and the length of the signal and reconstructs the BG signal + a constant.
"""
#---Estimate gradient at each position----
Average=[]
while len(Average)<Signal_Length:
Average.append([])
for i in range(len(Gradient)):
if Not_Allowed[i] is False and Not_Allowed[i+Window] is False:
n=0
while n<=Window:
Average[n+i].append(Gradient[i])
n+=1
#--- Ensure every point has a gradient----
if len(Average[0])==0:
Average[0]=[0]
for i in range(len(Average)):
if len(Average[i])==0:
Average[i]=Average[i-1]
#---Integrate up output------
Output=[0.]
for i in Average:
Output.append(Output[-1]+np.median(i))
return np.array(Output[:-1])
def Run(Signal,Window=50,Maximum_Iterations=10,Peak_Tolerance=0.5):
"""
Main function, explained at the top of the page.
"""
#---Ensure Window fits contraints---
Window=int(Window)
if Window<2:
Window=2
#--Calcuate gradients-------
Gradient=[]
n=Window
while n<len(Signal):
Gradient.append(float(Signal[n]-Signal[n-Window])/Window)
n+=1
Not_Allowed=[]
while len(Not_Allowed)<len(Signal):
Not_Allowed.append(False)
#----Initial estimate-----
Background=Construct_Background(Gradient,Not_Allowed,Window,len(Signal))
Clean=np.array(Signal)-Background
Clean=Clean-np.median(Clean)
#---Calculate number of points over the noise threshold that correspond to a possible peak
Point_Run=0
while 100.*((1./6)**Point_Run)>Peak_Tolerance:
Point_Run+=1
#---Iterate background estimation, ignoring possible peak positions-----
Iterate=True
Iterations=0
while Iterate is True and Iterations<Maximum_Iterations:
Iterations+=1
Possible_Peak_Regions=[]
Current_Run=[]
Threshold=np.median(np.abs(Clean))
for i in range(len(Signal)):
if Clean[i]>=Threshold:
Current_Run.append(i)
else:
if len(Current_Run)>=Point_Run:
Possible_Peak_Regions+=Current_Run
Current_Run=[]
if len(Current_Run)>=Point_Run:
Possible_Peak_Regions+=Current_Run
New_Not_Allowed=[]
for i in range(len(Signal)):
if i in Possible_Peak_Regions:
New_Not_Allowed.append(True)
else:
New_Not_Allowed.append(False)
if np.array_equal(Not_Allowed,New_Not_Allowed)==False:
Not_Allowed=New_Not_Allowed
Background=Construct_Background(Gradient,Not_Allowed,Window,len(Signal))
Clean=np.array(Signal)-Background
Clean=Clean-np.median(Clean)
else:
Iterate=False
return Clean | gpl-3.0 |
Ditmar/plugin.video.pelisalacarta | core/pyload_client.py | 21 | 2037 | # -*- coding: utf-8 -*-
import urllib
import scrapertools
import config
import logger
import urlparse
def login(username,password):
logger.info("pyload_client.login")
#url = config.get_setting("pyload")+"/api/login"
api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/login")
logger.info("pyload_client.login api_url="+api_url)
data = scrapertools.cache_page( api_url , post=urllib.urlencode( {"username":username,"password":password} ) )
logger.info("pyload_client.login data="+data)
return data
def download(url,package_name):
logger.info("pyload_client.download url="+url+", package_name="+package_name)
session = login(config.get_setting("pyload_user"),config.get_setting("pyload_password"))
package_id = find_package_id(package_name)
if package_id is None:
api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/addPackage")
logger.info("pyload_client.download api_url="+api_url)
data = scrapertools.cache_page( api_url , post=urllib.urlencode( {"name":"'"+package_name+"'","links":str([url])} ) )
logger.info("pyload_client.download data="+data)
else:
api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/addFiles")
logger.info("pyload_client.download api_url="+api_url)
data = scrapertools.cache_page( api_url , post=urllib.urlencode( {"pid":str(package_id),"links":str([url])} ) )
logger.info("pyload_client.download data="+data)
return
def find_package_id(package_name):
logger.info("pyload_client.find_package_id package_name="+package_name)
api_url = urlparse.urljoin(config.get_setting("pyload"),"/api/getQueue")
logger.info("pyload_client.find_package_id api_url="+api_url)
data = scrapertools.cache_page( api_url )
logger.info("pyload_client.find_package_id data="+data)
try:
package_id = scrapertools.get_match(data,'"name"\s*:\s*"'+package_name+'".*?"pid"\s*\:\s*(\d+)')
except:
package_id = None
return package_id
| gpl-3.0 |
gugahoi/maraschino | lib/sqlalchemy/dialects/postgresql/base.py | 14 | 61721 | # postgresql/base.py
# Copyright (C) 2005-2011 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""Support for the PostgreSQL database.
For information on connecting using specific drivers, see the documentation
section regarding that driver.
Sequences/SERIAL
----------------
PostgreSQL supports sequences, and SQLAlchemy uses these as the default means
of creating new primary key values for integer-based primary key columns. When
creating tables, SQLAlchemy will issue the ``SERIAL`` datatype for
integer-based primary key columns, which generates a sequence and server side
default corresponding to the column.
To specify a specific named sequence to be used for primary key generation,
use the :func:`~sqlalchemy.schema.Sequence` construct::
Table('sometable', metadata,
Column('id', Integer, Sequence('some_id_seq'), primary_key=True)
)
When SQLAlchemy issues a single INSERT statement, to fulfill the contract of
having the "last insert identifier" available, a RETURNING clause is added to
the INSERT statement which specifies the primary key columns should be
returned after the statement completes. The RETURNING functionality only takes
place if Postgresql 8.2 or later is in use. As a fallback approach, the
sequence, whether specified explicitly or implicitly via ``SERIAL``, is
executed independently beforehand, the returned value to be used in the
subsequent insert. Note that when an
:func:`~sqlalchemy.sql.expression.insert()` construct is executed using
"executemany" semantics, the "last inserted identifier" functionality does not
apply; no RETURNING clause is emitted nor is the sequence pre-executed in this
case.
To force the usage of RETURNING by default off, specify the flag
``implicit_returning=False`` to :func:`.create_engine`.
Transaction Isolation Level
---------------------------
:func:`.create_engine` accepts an ``isolation_level`` parameter which results
in the command ``SET SESSION CHARACTERISTICS AS TRANSACTION ISOLATION LEVEL
<level>`` being invoked for every new connection. Valid values for this
parameter are ``READ_COMMITTED``, ``READ_UNCOMMITTED``, ``REPEATABLE_READ``,
and ``SERIALIZABLE``. Note that the psycopg2 dialect does *not* use this
technique and uses psycopg2-specific APIs (see that dialect for details).
Remote / Cross-Schema Table Introspection
-----------------------------------------
Tables can be introspected from any accessible schema, including
inter-schema foreign key relationships. However, care must be taken
when specifying the "schema" argument for a given :class:`.Table`, when
the given schema is also present in PostgreSQL's ``search_path`` variable
for the current connection.
If a FOREIGN KEY constraint reports that the remote table's schema is within
the current ``search_path``, the "schema" attribute of the resulting
:class:`.Table` will be set to ``None``, unless the actual schema of the
remote table matches that of the referencing table, and the "schema" argument
was explicitly stated on the referencing table.
The best practice here is to not use the ``schema`` argument
on :class:`.Table` for any schemas that are present in ``search_path``.
``search_path`` defaults to "public", but care should be taken
to inspect the actual value using::
SHOW search_path;
Prior to version 0.7.3, cross-schema foreign keys when the schemas
were also in the ``search_path`` could make an incorrect assumption
if the schemas were explicitly stated on each :class:`.Table`.
Background on PG's ``search_path`` is at:
http://www.postgresql.org/docs/9.0/static/ddl-schemas.html#DDL-SCHEMAS-PATH
INSERT/UPDATE...RETURNING
-------------------------
The dialect supports PG 8.2's ``INSERT..RETURNING``, ``UPDATE..RETURNING`` and
``DELETE..RETURNING`` syntaxes. ``INSERT..RETURNING`` is used by default
for single-row INSERT statements in order to fetch newly generated
primary key identifiers. To specify an explicit ``RETURNING`` clause,
use the :meth:`._UpdateBase.returning` method on a per-statement basis::
# INSERT..RETURNING
result = table.insert().returning(table.c.col1, table.c.col2).\\
values(name='foo')
print result.fetchall()
# UPDATE..RETURNING
result = table.update().returning(table.c.col1, table.c.col2).\\
where(table.c.name=='foo').values(name='bar')
print result.fetchall()
# DELETE..RETURNING
result = table.delete().returning(table.c.col1, table.c.col2).\\
where(table.c.name=='foo')
print result.fetchall()
.. _postgresql_indexes:
Postgresql-Specific Index Options
---------------------------------
Several extensions to the :class:`.Index` construct are available, specific
to the PostgreSQL dialect.
Partial Indexes
^^^^^^^^^^^^^^^^
Partial indexes add criterion to the index definition so that the index is
applied to a subset of rows. These can be specified on :class:`.Index`
using the ``postgresql_where`` keyword argument::
Index('my_index', my_table.c.id, postgresql_where=tbl.c.value > 10)
Operator Classes
^^^^^^^^^^^^^^^^^
PostgreSQL allows the specification of an *operator class* for each column of
an index (see http://www.postgresql.org/docs/8.3/interactive/indexes-opclass.html).
The :class:`.Index` construct allows these to be specified via the ``postgresql_ops``
keyword argument (new as of SQLAlchemy 0.7.2)::
Index('my_index', my_table.c.id, my_table.c.data,
postgresql_ops={
'data': 'text_pattern_ops',
'id': 'int4_ops'
})
Note that the keys in the ``postgresql_ops`` dictionary are the "key" name of
the :class:`.Column`, i.e. the name used to access it from the ``.c`` collection
of :class:`.Table`, which can be configured to be different than the actual
name of the column as expressed in the database.
Index Types
^^^^^^^^^^^^
PostgreSQL provides several index types: B-Tree, Hash, GiST, and GIN, as well as
the ability for users to create their own (see
http://www.postgresql.org/docs/8.3/static/indexes-types.html). These can be
specified on :class:`.Index` using the ``postgresql_using`` keyword argument::
Index('my_index', my_table.c.data, postgresql_using='gin')
The value passed to the keyword argument will be simply passed through to the
underlying CREATE INDEX command, so it *must* be a valid index type for your
version of PostgreSQL.
"""
import re
from sqlalchemy import sql, schema, exc, util
from sqlalchemy.engine import default, reflection
from sqlalchemy.sql import compiler, expression, util as sql_util
from sqlalchemy import types as sqltypes
try:
from uuid import UUID as _python_UUID
except ImportError:
_python_UUID = None
from sqlalchemy.types import INTEGER, BIGINT, SMALLINT, VARCHAR, \
CHAR, TEXT, FLOAT, NUMERIC, \
DATE, BOOLEAN, REAL
RESERVED_WORDS = set(
["all", "analyse", "analyze", "and", "any", "array", "as", "asc",
"asymmetric", "both", "case", "cast", "check", "collate", "column",
"constraint", "create", "current_catalog", "current_date",
"current_role", "current_time", "current_timestamp", "current_user",
"default", "deferrable", "desc", "distinct", "do", "else", "end",
"except", "false", "fetch", "for", "foreign", "from", "grant", "group",
"having", "in", "initially", "intersect", "into", "leading", "limit",
"localtime", "localtimestamp", "new", "not", "null", "off", "offset",
"old", "on", "only", "or", "order", "placing", "primary", "references",
"returning", "select", "session_user", "some", "symmetric", "table",
"then", "to", "trailing", "true", "union", "unique", "user", "using",
"variadic", "when", "where", "window", "with", "authorization",
"between", "binary", "cross", "current_schema", "freeze", "full",
"ilike", "inner", "is", "isnull", "join", "left", "like", "natural",
"notnull", "outer", "over", "overlaps", "right", "similar", "verbose"
])
_DECIMAL_TYPES = (1231, 1700)
_FLOAT_TYPES = (700, 701, 1021, 1022)
_INT_TYPES = (20, 21, 23, 26, 1005, 1007, 1016)
class BYTEA(sqltypes.LargeBinary):
__visit_name__ = 'BYTEA'
class DOUBLE_PRECISION(sqltypes.Float):
__visit_name__ = 'DOUBLE_PRECISION'
class INET(sqltypes.TypeEngine):
__visit_name__ = "INET"
PGInet = INET
class CIDR(sqltypes.TypeEngine):
__visit_name__ = "CIDR"
PGCidr = CIDR
class MACADDR(sqltypes.TypeEngine):
__visit_name__ = "MACADDR"
PGMacAddr = MACADDR
class TIMESTAMP(sqltypes.TIMESTAMP):
def __init__(self, timezone=False, precision=None):
super(TIMESTAMP, self).__init__(timezone=timezone)
self.precision = precision
class TIME(sqltypes.TIME):
def __init__(self, timezone=False, precision=None):
super(TIME, self).__init__(timezone=timezone)
self.precision = precision
class INTERVAL(sqltypes.TypeEngine):
"""Postgresql INTERVAL type.
The INTERVAL type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000 or zxjdbc.
"""
__visit_name__ = 'INTERVAL'
def __init__(self, precision=None):
self.precision = precision
@classmethod
def _adapt_from_generic_interval(cls, interval):
return INTERVAL(precision=interval.second_precision)
@property
def _type_affinity(self):
return sqltypes.Interval
PGInterval = INTERVAL
class BIT(sqltypes.TypeEngine):
__visit_name__ = 'BIT'
def __init__(self, length=None, varying=False):
if not varying:
# BIT without VARYING defaults to length 1
self.length = length or 1
else:
# but BIT VARYING can be unlimited-length, so no default
self.length = length
self.varying = varying
PGBit = BIT
class UUID(sqltypes.TypeEngine):
"""Postgresql UUID type.
Represents the UUID column type, interpreting
data either as natively returned by the DBAPI
or as Python uuid objects.
The UUID type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
"""
__visit_name__ = 'UUID'
def __init__(self, as_uuid=False):
"""Construct a UUID type.
:param as_uuid=False: if True, values will be interpreted
as Python uuid objects, converting to/from string via the
DBAPI.
"""
if as_uuid and _python_UUID is None:
raise NotImplementedError(
"This version of Python does not support the native UUID type."
)
self.as_uuid = as_uuid
def bind_processor(self, dialect):
if self.as_uuid:
def process(value):
if value is not None:
value = str(value)
return value
return process
else:
return None
def result_processor(self, dialect, coltype):
if self.as_uuid:
def process(value):
if value is not None:
value = _python_UUID(value)
return value
return process
else:
return None
PGUuid = UUID
class ARRAY(sqltypes.MutableType, sqltypes.Concatenable, sqltypes.TypeEngine):
"""Postgresql ARRAY type.
Represents values as Python lists.
The ARRAY type may not be supported on all DBAPIs.
It is known to work on psycopg2 and not pg8000.
"""
__visit_name__ = 'ARRAY'
def __init__(self, item_type, mutable=False, as_tuple=False):
"""Construct an ARRAY.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such. The type mapping figures out on
the fly
:param mutable=False: Specify whether lists passed to this
class should be considered mutable - this enables
"mutable types" mode in the ORM. Be sure to read the
notes for :class:`.MutableType` regarding ORM
performance implications (default changed from ``True`` in
0.7.0).
.. note:: This functionality is now superseded by the
``sqlalchemy.ext.mutable`` extension described in
:ref:`mutable_toplevel`.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. DBAPIs such
as psycopg2 return lists by default. When tuples are
returned, the results are hashable. This flag can only
be set to ``True`` when ``mutable`` is set to
``False``. (new in 0.6.5)
"""
if isinstance(item_type, ARRAY):
raise ValueError("Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype")
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.mutable = mutable
if mutable and as_tuple:
raise exc.ArgumentError(
"mutable must be set to False if as_tuple is True."
)
self.as_tuple = as_tuple
def copy_value(self, value):
if value is None:
return None
elif self.mutable:
return list(value)
else:
return value
def compare_values(self, x, y):
return x == y
def is_mutable(self):
return self.mutable
def bind_processor(self, dialect):
item_proc = self.item_type.dialect_impl(dialect).bind_processor(dialect)
if item_proc:
def convert_item(item):
if isinstance(item, (list, tuple)):
return [convert_item(child) for child in item]
else:
return item_proc(item)
else:
def convert_item(item):
if isinstance(item, (list, tuple)):
return [convert_item(child) for child in item]
else:
return item
def process(value):
if value is None:
return value
return [convert_item(item) for item in value]
return process
def result_processor(self, dialect, coltype):
item_proc = self.item_type.dialect_impl(dialect).result_processor(dialect, coltype)
if item_proc:
def convert_item(item):
if isinstance(item, list):
r = [convert_item(child) for child in item]
if self.as_tuple:
r = tuple(r)
return r
else:
return item_proc(item)
else:
def convert_item(item):
if isinstance(item, list):
r = [convert_item(child) for child in item]
if self.as_tuple:
r = tuple(r)
return r
else:
return item
def process(value):
if value is None:
return value
r = [convert_item(item) for item in value]
if self.as_tuple:
r = tuple(r)
return r
return process
PGArray = ARRAY
class ENUM(sqltypes.Enum):
"""Postgresql ENUM type.
This is a subclass of :class:`.types.Enum` which includes
support for PG's ``CREATE TYPE``.
:class:`~.postgresql.ENUM` is used automatically when
using the :class:`.types.Enum` type on PG assuming
the ``native_enum`` is left as ``True``. However, the
:class:`~.postgresql.ENUM` class can also be instantiated
directly in order to access some additional Postgresql-specific
options, namely finer control over whether or not
``CREATE TYPE`` should be emitted.
Note that both :class:`.types.Enum` as well as
:class:`~.postgresql.ENUM` feature create/drop
methods; the base :class:`.types.Enum` type ultimately
delegates to the :meth:`~.postgresql.ENUM.create` and
:meth:`~.postgresql.ENUM.drop` methods present here.
"""
def __init__(self, *enums, **kw):
"""Construct an :class:`~.postgresql.ENUM`.
Arguments are the same as that of
:class:`.types.Enum`, but also including
the following parameters.
:param create_type: Defaults to True.
Indicates that ``CREATE TYPE`` should be
emitted, after optionally checking for the
presence of the type, when the parent
table is being created; and additionally
that ``DROP TYPE`` is called when the table
is dropped. When ``False``, no check
will be performed and no ``CREATE TYPE``
or ``DROP TYPE`` is emitted, unless
:meth:`~.postgresql.ENUM.create`
or :meth:`~.postgresql.ENUM.drop`
are called directly.
Setting to ``False`` is helpful
when invoking a creation scheme to a SQL file
without access to the actual database -
the :meth:`~.postgresql.ENUM.create` and
:meth:`~.postgresql.ENUM.drop` methods can
be used to emit SQL to a target bind.
(new in 0.7.4)
"""
self.create_type = kw.pop("create_type", True)
super(ENUM, self).__init__(*enums, **kw)
def create(self, bind=None, checkfirst=True):
"""Emit ``CREATE TYPE`` for this
:class:`~.postgresql.ENUM`.
If the underlying dialect does not support
Postgresql CREATE TYPE, no action is taken.
:param bind: a connectable :class:`.Engine`,
:class:`.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type does not exist already before
creating.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or \
not bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(CreateEnumType(self))
def drop(self, bind=None, checkfirst=True):
"""Emit ``DROP TYPE`` for this
:class:`~.postgresql.ENUM`.
If the underlying dialect does not support
Postgresql DROP TYPE, no action is taken.
:param bind: a connectable :class:`.Engine`,
:class:`.Connection`, or similar object to emit
SQL.
:param checkfirst: if ``True``, a query against
the PG catalog will be first performed to see
if the type actually exists before dropping.
"""
if not bind.dialect.supports_native_enum:
return
if not checkfirst or \
bind.dialect.has_type(bind, self.name, schema=self.schema):
bind.execute(DropEnumType(self))
def _check_for_name_in_memos(self, checkfirst, kw):
"""Look in the 'ddl runner' for 'memos', then
note our name in that collection.
This to ensure a particular named enum is operated
upon only once within any kind of create/drop
sequence without relying upon "checkfirst".
"""
if not self.create_type:
return True
if '_ddl_runner' in kw:
ddl_runner = kw['_ddl_runner']
if '_pg_enums' in ddl_runner.memo:
pg_enums = ddl_runner.memo['_pg_enums']
else:
pg_enums = ddl_runner.memo['_pg_enums'] = set()
present = self.name in pg_enums
pg_enums.add(self.name)
return present
else:
return False
def _on_table_create(self, target, bind, checkfirst, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
def _on_metadata_create(self, target, bind, checkfirst, **kw):
if self.metadata is not None and \
not self._check_for_name_in_memos(checkfirst, kw):
self.create(bind=bind, checkfirst=checkfirst)
def _on_metadata_drop(self, target, bind, checkfirst, **kw):
if not self._check_for_name_in_memos(checkfirst, kw):
self.drop(bind=bind, checkfirst=checkfirst)
colspecs = {
sqltypes.Interval:INTERVAL,
sqltypes.Enum:ENUM,
}
ischema_names = {
'integer' : INTEGER,
'bigint' : BIGINT,
'smallint' : SMALLINT,
'character varying' : VARCHAR,
'character' : CHAR,
'"char"' : sqltypes.String,
'name' : sqltypes.String,
'text' : TEXT,
'numeric' : NUMERIC,
'float' : FLOAT,
'real' : REAL,
'inet': INET,
'cidr': CIDR,
'uuid': UUID,
'bit': BIT,
'bit varying': BIT,
'macaddr': MACADDR,
'double precision' : DOUBLE_PRECISION,
'timestamp' : TIMESTAMP,
'timestamp with time zone' : TIMESTAMP,
'timestamp without time zone' : TIMESTAMP,
'time with time zone' : TIME,
'time without time zone' : TIME,
'date' : DATE,
'time': TIME,
'bytea' : BYTEA,
'boolean' : BOOLEAN,
'interval':INTERVAL,
'interval year to month':INTERVAL,
'interval day to second':INTERVAL,
}
class PGCompiler(compiler.SQLCompiler):
def visit_match_op(self, binary, **kw):
return "%s @@ to_tsquery(%s)" % (
self.process(binary.left),
self.process(binary.right))
def visit_ilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s ILIKE %s' % \
(self.process(binary.left), self.process(binary.right)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def visit_notilike_op(self, binary, **kw):
escape = binary.modifiers.get("escape", None)
return '%s NOT ILIKE %s' % \
(self.process(binary.left), self.process(binary.right)) \
+ (escape and
(' ESCAPE ' + self.render_literal_value(escape, None))
or '')
def render_literal_value(self, value, type_):
value = super(PGCompiler, self).render_literal_value(value, type_)
# TODO: need to inspect "standard_conforming_strings"
if self.dialect._backslash_escapes:
value = value.replace('\\', '\\\\')
return value
def visit_sequence(self, seq):
return "nextval('%s')" % self.preparer.format_sequence(seq)
def limit_clause(self, select):
text = ""
if select._limit is not None:
text += " \n LIMIT " + self.process(sql.literal(select._limit))
if select._offset is not None:
if select._limit is None:
text += " \n LIMIT ALL"
text += " OFFSET " + self.process(sql.literal(select._offset))
return text
def get_select_precolumns(self, select):
if select._distinct is not False:
if select._distinct is True:
return "DISTINCT "
elif isinstance(select._distinct, (list, tuple)):
return "DISTINCT ON (" + ', '.join(
[self.process(col) for col in select._distinct]
)+ ") "
else:
return "DISTINCT ON (" + self.process(select._distinct) + ") "
else:
return ""
def for_update_clause(self, select):
if select.for_update == 'nowait':
return " FOR UPDATE NOWAIT"
else:
return super(PGCompiler, self).for_update_clause(select)
def returning_clause(self, stmt, returning_cols):
columns = [
self.process(
self.label_select_column(None, c, asfrom=False),
within_columns_clause=True,
result_map=self.result_map)
for c in expression._select_iterables(returning_cols)
]
return 'RETURNING ' + ', '.join(columns)
def visit_extract(self, extract, **kwargs):
field = self.extract_map.get(extract.field, extract.field)
if extract.expr.type:
affinity = extract.expr.type._type_affinity
else:
affinity = None
casts = {
sqltypes.Date:'date',
sqltypes.DateTime:'timestamp',
sqltypes.Interval:'interval', sqltypes.Time:'time'
}
cast = casts.get(affinity, None)
if isinstance(extract.expr, sql.ColumnElement) and cast is not None:
expr = extract.expr.op('::')(sql.literal_column(cast))
else:
expr = extract.expr
return "EXTRACT(%s FROM %s)" % (
field, self.process(expr))
class PGDDLCompiler(compiler.DDLCompiler):
def get_column_specification(self, column, **kwargs):
colspec = self.preparer.format_column(column)
impl_type = column.type.dialect_impl(self.dialect)
if column.primary_key and \
column is column.table._autoincrement_column and \
not isinstance(impl_type, sqltypes.SmallInteger) and \
(
column.default is None or
(
isinstance(column.default, schema.Sequence) and
column.default.optional
)
):
if isinstance(impl_type, sqltypes.BigInteger):
colspec += " BIGSERIAL"
else:
colspec += " SERIAL"
else:
colspec += " " + self.dialect.type_compiler.process(column.type)
default = self.get_column_default_string(column)
if default is not None:
colspec += " DEFAULT " + default
if not column.nullable:
colspec += " NOT NULL"
return colspec
def visit_create_enum_type(self, create):
type_ = create.element
return "CREATE TYPE %s AS ENUM (%s)" % (
self.preparer.format_type(type_),
",".join("'%s'" % e for e in type_.enums)
)
def visit_drop_enum_type(self, drop):
type_ = drop.element
return "DROP TYPE %s" % (
self.preparer.format_type(type_)
)
def visit_create_index(self, create):
preparer = self.preparer
index = create.element
text = "CREATE "
if index.unique:
text += "UNIQUE "
ops = index.kwargs.get('postgresql_ops', {})
text += "INDEX %s ON %s " % (
preparer.quote(
self._index_identifier(index.name), index.quote),
preparer.format_table(index.table)
)
if 'postgresql_using' in index.kwargs:
using = index.kwargs['postgresql_using']
text += "USING %s " % preparer.quote(using, index.quote)
text += "(%s)" \
% (
', '.join([
preparer.format_column(c) +
(c.key in ops and (' ' + ops[c.key]) or '')
for c in index.columns])
)
if "postgres_where" in index.kwargs:
whereclause = index.kwargs['postgres_where']
util.warn_deprecated(
"The 'postgres_where' argument has been renamed "
"to 'postgresql_where'.")
elif 'postgresql_where' in index.kwargs:
whereclause = index.kwargs['postgresql_where']
else:
whereclause = None
if whereclause is not None:
whereclause = sql_util.expression_as_ddl(whereclause)
where_compiled = self.sql_compiler.process(whereclause)
text += " WHERE " + where_compiled
return text
class PGTypeCompiler(compiler.GenericTypeCompiler):
def visit_INET(self, type_):
return "INET"
def visit_CIDR(self, type_):
return "CIDR"
def visit_MACADDR(self, type_):
return "MACADDR"
def visit_FLOAT(self, type_):
if not type_.precision:
return "FLOAT"
else:
return "FLOAT(%(precision)s)" % {'precision': type_.precision}
def visit_DOUBLE_PRECISION(self, type_):
return "DOUBLE PRECISION"
def visit_BIGINT(self, type_):
return "BIGINT"
def visit_datetime(self, type_):
return self.visit_TIMESTAMP(type_)
def visit_enum(self, type_):
if not type_.native_enum or not self.dialect.supports_native_enum:
return super(PGTypeCompiler, self).visit_enum(type_)
else:
return self.visit_ENUM(type_)
def visit_ENUM(self, type_):
return self.dialect.identifier_preparer.format_type(type_)
def visit_TIMESTAMP(self, type_):
return "TIMESTAMP%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_TIME(self, type_):
return "TIME%s %s" % (
getattr(type_, 'precision', None) and "(%d)" %
type_.precision or "",
(type_.timezone and "WITH" or "WITHOUT") + " TIME ZONE"
)
def visit_INTERVAL(self, type_):
if type_.precision is not None:
return "INTERVAL(%d)" % type_.precision
else:
return "INTERVAL"
def visit_BIT(self, type_):
if type_.varying:
compiled = "BIT VARYING"
if type_.length is not None:
compiled += "(%d)" % type_.length
else:
compiled = "BIT(%d)" % type_.length
return compiled
def visit_UUID(self, type_):
return "UUID"
def visit_large_binary(self, type_):
return self.visit_BYTEA(type_)
def visit_BYTEA(self, type_):
return "BYTEA"
def visit_ARRAY(self, type_):
return self.process(type_.item_type) + '[]'
class PGIdentifierPreparer(compiler.IdentifierPreparer):
reserved_words = RESERVED_WORDS
def _unquote_identifier(self, value):
if value[0] == self.initial_quote:
value = value[1:-1].\
replace(self.escape_to_quote, self.escape_quote)
return value
def format_type(self, type_, use_schema=True):
if not type_.name:
raise exc.ArgumentError("Postgresql ENUM type requires a name.")
name = self.quote(type_.name, type_.quote)
if not self.omit_schema and use_schema and type_.schema is not None:
name = self.quote_schema(type_.schema, type_.quote) + "." + name
return name
class PGInspector(reflection.Inspector):
def __init__(self, conn):
reflection.Inspector.__init__(self, conn)
def get_table_oid(self, table_name, schema=None):
"""Return the oid from `table_name` and `schema`."""
return self.dialect.get_table_oid(self.bind, table_name, schema,
info_cache=self.info_cache)
class CreateEnumType(schema._CreateDropBase):
__visit_name__ = "create_enum_type"
class DropEnumType(schema._CreateDropBase):
__visit_name__ = "drop_enum_type"
class PGExecutionContext(default.DefaultExecutionContext):
def fire_sequence(self, seq, type_):
return self._execute_scalar(("select nextval('%s')" % \
self.dialect.identifier_preparer.format_sequence(seq)), type_)
def get_insert_default(self, column):
if column.primary_key and column is column.table._autoincrement_column:
if column.server_default and column.server_default.has_argument:
# pre-execute passive defaults on primary key columns
return self._execute_scalar("select %s" %
column.server_default.arg, column.type)
elif (column.default is None or
(column.default.is_sequence and
column.default.optional)):
# execute the sequence associated with a SERIAL primary
# key column. for non-primary-key SERIAL, the ID just
# generates server side.
try:
seq_name = column._postgresql_seq_name
except AttributeError:
tab = column.table.name
col = column.name
tab = tab[0:29 + max(0, (29 - len(col)))]
col = col[0:29 + max(0, (29 - len(tab)))]
column._postgresql_seq_name = seq_name = "%s_%s_seq" % (tab, col)
sch = column.table.schema
if sch is not None:
exc = "select nextval('\"%s\".\"%s\"')" % \
(sch, seq_name)
else:
exc = "select nextval('\"%s\"')" % \
(seq_name, )
return self._execute_scalar(exc, column.type)
return super(PGExecutionContext, self).get_insert_default(column)
class PGDialect(default.DefaultDialect):
name = 'postgresql'
supports_alter = True
max_identifier_length = 63
supports_sane_rowcount = True
supports_native_enum = True
supports_native_boolean = True
supports_sequences = True
sequences_optional = True
preexecute_autoincrement_sequences = True
postfetch_lastrowid = False
supports_default_values = True
supports_empty_insert = False
default_paramstyle = 'pyformat'
ischema_names = ischema_names
colspecs = colspecs
statement_compiler = PGCompiler
ddl_compiler = PGDDLCompiler
type_compiler = PGTypeCompiler
preparer = PGIdentifierPreparer
execution_ctx_cls = PGExecutionContext
inspector = PGInspector
isolation_level = None
# TODO: need to inspect "standard_conforming_strings"
_backslash_escapes = True
def __init__(self, isolation_level=None, **kwargs):
default.DefaultDialect.__init__(self, **kwargs)
self.isolation_level = isolation_level
def initialize(self, connection):
super(PGDialect, self).initialize(connection)
self.implicit_returning = self.server_version_info > (8, 2) and \
self.__dict__.get('implicit_returning', True)
self.supports_native_enum = self.server_version_info >= (8, 3)
if not self.supports_native_enum:
self.colspecs = self.colspecs.copy()
# pop base Enum type
self.colspecs.pop(sqltypes.Enum, None)
# psycopg2, others may have placed ENUM here as well
self.colspecs.pop(ENUM, None)
def on_connect(self):
if self.isolation_level is not None:
def connect(conn):
self.set_isolation_level(conn, self.isolation_level)
return connect
else:
return None
_isolation_lookup = set(['SERIALIZABLE',
'READ UNCOMMITTED', 'READ COMMITTED', 'REPEATABLE READ'])
def set_isolation_level(self, connection, level):
level = level.replace('_', ' ')
if level not in self._isolation_lookup:
raise exc.ArgumentError(
"Invalid value '%s' for isolation_level. "
"Valid isolation levels for %s are %s" %
(level, self.name, ", ".join(self._isolation_lookup))
)
cursor = connection.cursor()
cursor.execute(
"SET SESSION CHARACTERISTICS AS TRANSACTION "
"ISOLATION LEVEL %s" % level)
cursor.execute("COMMIT")
cursor.close()
def get_isolation_level(self, connection):
cursor = connection.cursor()
cursor.execute('show transaction isolation level')
val = cursor.fetchone()[0]
cursor.close()
return val.upper()
def do_begin_twophase(self, connection, xid):
self.do_begin(connection.connection)
def do_prepare_twophase(self, connection, xid):
connection.execute("PREPARE TRANSACTION '%s'" % xid)
def do_rollback_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
#FIXME: ugly hack to get out of transaction
# context when commiting recoverable transactions
# Must find out a way how to make the dbapi not
# open a transaction.
connection.execute("ROLLBACK")
connection.execute("ROLLBACK PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_rollback(connection.connection)
def do_commit_twophase(self, connection, xid,
is_prepared=True, recover=False):
if is_prepared:
if recover:
connection.execute("ROLLBACK")
connection.execute("COMMIT PREPARED '%s'" % xid)
connection.execute("BEGIN")
self.do_rollback(connection.connection)
else:
self.do_commit(connection.connection)
def do_recover_twophase(self, connection):
resultset = connection.execute(
sql.text("SELECT gid FROM pg_prepared_xacts"))
return [row[0] for row in resultset]
def _get_default_schema_name(self, connection):
return connection.scalar("select current_schema()")
def has_schema(self, connection, schema):
cursor = connection.execute(
sql.text(
"select nspname from pg_namespace where lower(nspname)=:schema",
bindparams=[
sql.bindparam(
'schema', unicode(schema.lower()),
type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
def has_table(self, connection, table_name, schema=None):
# seems like case gets folded in pg_class...
if schema is None:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=current_schema() and "
"relname=:name",
bindparams=[
sql.bindparam('name', unicode(table_name),
type_=sqltypes.Unicode)]
)
)
else:
cursor = connection.execute(
sql.text(
"select relname from pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where n.nspname=:schema and "
"relname=:name",
bindparams=[
sql.bindparam('name',
unicode(table_name), type_=sqltypes.Unicode),
sql.bindparam('schema',
unicode(schema), type_=sqltypes.Unicode)]
)
)
return bool(cursor.first())
def has_sequence(self, connection, sequence_name, schema=None):
if schema is None:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=current_schema() "
"and relname=:name",
bindparams=[
sql.bindparam('name', unicode(sequence_name),
type_=sqltypes.Unicode)
]
)
)
else:
cursor = connection.execute(
sql.text(
"SELECT relname FROM pg_class c join pg_namespace n on "
"n.oid=c.relnamespace where relkind='S' and "
"n.nspname=:schema and relname=:name",
bindparams=[
sql.bindparam('name', unicode(sequence_name),
type_=sqltypes.Unicode),
sql.bindparam('schema',
unicode(schema), type_=sqltypes.Unicode)
]
)
)
return bool(cursor.first())
def has_type(self, connection, type_name, schema=None):
bindparams = [
sql.bindparam('typname',
unicode(type_name), type_=sqltypes.Unicode),
sql.bindparam('nspname',
unicode(schema), type_=sqltypes.Unicode),
]
if schema is not None:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t, pg_catalog.pg_namespace n
WHERE t.typnamespace = n.oid
AND t.typname = :typname
AND n.nspname = :nspname
)
"""
else:
query = """
SELECT EXISTS (
SELECT * FROM pg_catalog.pg_type t
WHERE t.typname = :typname
AND pg_type_is_visible(t.oid)
)
"""
cursor = connection.execute(sql.text(query, bindparams=bindparams))
return bool(cursor.scalar())
def _get_server_version_info(self, connection):
v = connection.execute("select version()").scalar()
m = re.match('PostgreSQL (\d+)\.(\d+)(?:\.(\d+))?(?:devel)?', v)
if not m:
raise AssertionError(
"Could not determine version from string '%s'" % v)
return tuple([int(x) for x in m.group(1, 2, 3) if x is not None])
@reflection.cache
def get_table_oid(self, connection, table_name, schema=None, **kw):
"""Fetch the oid for schema.table_name.
Several reflection methods require the table oid. The idea for using
this method is that it can be fetched one time and cached for
subsequent calls.
"""
table_oid = None
if schema is not None:
schema_where_clause = "n.nspname = :schema"
else:
schema_where_clause = "pg_catalog.pg_table_is_visible(c.oid)"
query = """
SELECT c.oid
FROM pg_catalog.pg_class c
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = c.relnamespace
WHERE (%s)
AND c.relname = :table_name AND c.relkind in ('r','v')
""" % schema_where_clause
# Since we're binding to unicode, table_name and schema_name must be
# unicode.
table_name = unicode(table_name)
if schema is not None:
schema = unicode(schema)
s = sql.text(query, bindparams=[
sql.bindparam('table_name', type_=sqltypes.Unicode),
sql.bindparam('schema', type_=sqltypes.Unicode)
],
typemap={'oid':sqltypes.Integer}
)
c = connection.execute(s, table_name=table_name, schema=schema)
table_oid = c.scalar()
if table_oid is None:
raise exc.NoSuchTableError(table_name)
return table_oid
@reflection.cache
def get_schema_names(self, connection, **kw):
s = """
SELECT nspname
FROM pg_namespace
ORDER BY nspname
"""
rp = connection.execute(s)
# what about system tables?
# Py3K
#schema_names = [row[0] for row in rp \
# if not row[0].startswith('pg_')]
# Py2K
schema_names = [row[0].decode(self.encoding) for row in rp \
if not row[0].startswith('pg_')]
# end Py2K
return schema_names
@reflection.cache
def get_table_names(self, connection, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
result = connection.execute(
sql.text(u"SELECT relname FROM pg_class c "
"WHERE relkind = 'r' "
"AND '%s' = (select nspname from pg_namespace n "
"where n.oid = c.relnamespace) " %
current_schema,
typemap = {'relname':sqltypes.Unicode}
)
)
return [row[0] for row in result]
@reflection.cache
def get_view_names(self, connection, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
s = """
SELECT relname
FROM pg_class c
WHERE relkind = 'v'
AND '%(schema)s' = (select nspname from pg_namespace n
where n.oid = c.relnamespace)
""" % dict(schema=current_schema)
# Py3K
#view_names = [row[0] for row in connection.execute(s)]
# Py2K
view_names = [row[0].decode(self.encoding)
for row in connection.execute(s)]
# end Py2K
return view_names
@reflection.cache
def get_view_definition(self, connection, view_name, schema=None, **kw):
if schema is not None:
current_schema = schema
else:
current_schema = self.default_schema_name
s = """
SELECT definition FROM pg_views
WHERE schemaname = :schema
AND viewname = :view_name
"""
rp = connection.execute(sql.text(s),
view_name=view_name, schema=current_schema)
if rp:
# Py3K
#view_def = rp.scalar()
# Py2K
view_def = rp.scalar().decode(self.encoding)
# end Py2K
return view_def
@reflection.cache
def get_columns(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
SQL_COLS = """
SELECT a.attname,
pg_catalog.format_type(a.atttypid, a.atttypmod),
(SELECT substring(pg_catalog.pg_get_expr(d.adbin, d.adrelid)
for 128)
FROM pg_catalog.pg_attrdef d
WHERE d.adrelid = a.attrelid AND d.adnum = a.attnum
AND a.atthasdef)
AS DEFAULT,
a.attnotnull, a.attnum, a.attrelid as table_oid
FROM pg_catalog.pg_attribute a
WHERE a.attrelid = :table_oid
AND a.attnum > 0 AND NOT a.attisdropped
ORDER BY a.attnum
"""
s = sql.text(SQL_COLS,
bindparams=[sql.bindparam('table_oid', type_=sqltypes.Integer)],
typemap={'attname':sqltypes.Unicode, 'default':sqltypes.Unicode}
)
c = connection.execute(s, table_oid=table_oid)
rows = c.fetchall()
domains = self._load_domains(connection)
enums = self._load_enums(connection)
# format columns
columns = []
for name, format_type, default, notnull, attnum, table_oid in rows:
## strip (5) from character varying(5), timestamp(5)
# with time zone, etc
attype = re.sub(r'\([\d,]+\)', '', format_type)
# strip '[]' from integer[], etc.
attype = re.sub(r'\[\]', '', attype)
nullable = not notnull
is_array = format_type.endswith('[]')
charlen = re.search('\(([\d,]+)\)', format_type)
if charlen:
charlen = charlen.group(1)
kwargs = {}
args = None
if attype == 'numeric':
if charlen:
prec, scale = charlen.split(',')
args = (int(prec), int(scale))
else:
args = ()
elif attype == 'double precision':
args = (53, )
elif attype == 'integer':
args = ()
elif attype in ('timestamp with time zone',
'time with time zone'):
kwargs['timezone'] = True
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype in ('timestamp without time zone',
'time without time zone', 'time'):
kwargs['timezone'] = False
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif attype == 'bit varying':
kwargs['varying'] = True
if charlen:
args = (int(charlen),)
else:
args = ()
elif attype in ('interval','interval year to month',
'interval day to second'):
if charlen:
kwargs['precision'] = int(charlen)
args = ()
elif charlen:
args = (int(charlen),)
else:
args = ()
while True:
if attype in self.ischema_names:
coltype = self.ischema_names[attype]
break
elif attype in enums:
enum = enums[attype]
coltype = ENUM
if "." in attype:
kwargs['schema'], kwargs['name'] = attype.split('.')
else:
kwargs['name'] = attype
args = tuple(enum['labels'])
break
elif attype in domains:
domain = domains[attype]
attype = domain['attype']
# A table can't override whether the domain is nullable.
nullable = domain['nullable']
if domain['default'] and not default:
# It can, however, override the default
# value, but can't set it to null.
default = domain['default']
continue
else:
coltype = None
break
if coltype:
coltype = coltype(*args, **kwargs)
if is_array:
coltype = ARRAY(coltype)
else:
util.warn("Did not recognize type '%s' of column '%s'" %
(attype, name))
coltype = sqltypes.NULLTYPE
# adjust the default value
autoincrement = False
if default is not None:
match = re.search(r"""(nextval\(')([^']+)('.*$)""", default)
if match is not None:
autoincrement = True
# the default is related to a Sequence
sch = schema
if '.' not in match.group(2) and sch is not None:
# unconditionally quote the schema name. this could
# later be enhanced to obey quoting rules /
# "quote schema"
default = match.group(1) + \
('"%s"' % sch) + '.' + \
match.group(2) + match.group(3)
column_info = dict(name=name, type=coltype, nullable=nullable,
default=default, autoincrement=autoincrement)
columns.append(column_info)
return columns
@reflection.cache
def get_primary_keys(self, connection, table_name, schema=None, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
PK_SQL = """
SELECT a.attname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_attribute a
on t.oid=a.attrelid and a.attnum=ANY(ix.indkey)
WHERE
t.oid = :table_oid and
ix.indisprimary = 't'
ORDER BY
a.attnum
"""
t = sql.text(PK_SQL, typemap={'attname':sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
primary_keys = [r[0] for r in c.fetchall()]
return primary_keys
@reflection.cache
def get_pk_constraint(self, connection, table_name, schema=None, **kw):
cols = self.get_primary_keys(connection, table_name,
schema=schema, **kw)
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
PK_CONS_SQL = """
SELECT conname
FROM pg_catalog.pg_constraint r
WHERE r.conrelid = :table_oid AND r.contype = 'p'
ORDER BY 1
"""
t = sql.text(PK_CONS_SQL, typemap={'conname':sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
name = c.scalar()
return {
'constrained_columns':cols,
'name':name
}
@reflection.cache
def get_foreign_keys(self, connection, table_name, schema=None, **kw):
preparer = self.identifier_preparer
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
FK_SQL = """
SELECT r.conname,
pg_catalog.pg_get_constraintdef(r.oid, true) as condef,
n.nspname as conschema
FROM pg_catalog.pg_constraint r,
pg_namespace n,
pg_class c
WHERE r.conrelid = :table AND
r.contype = 'f' AND
c.oid = confrelid AND
n.oid = c.relnamespace
ORDER BY 1
"""
t = sql.text(FK_SQL, typemap={
'conname':sqltypes.Unicode,
'condef':sqltypes.Unicode})
c = connection.execute(t, table=table_oid)
fkeys = []
for conname, condef, conschema in c.fetchall():
m = re.search('FOREIGN KEY \((.*?)\) REFERENCES '
'(?:(.*?)\.)?(.*?)\((.*?)\)', condef).groups()
constrained_columns, referred_schema, \
referred_table, referred_columns = m
constrained_columns = [preparer._unquote_identifier(x)
for x in re.split(r'\s*,\s*', constrained_columns)]
if referred_schema:
referred_schema =\
preparer._unquote_identifier(referred_schema)
elif schema is not None and schema == conschema:
# no schema was returned by pg_get_constraintdef(). This
# means the schema is in the search path. We will leave
# it as None, unless the actual schema, which we pull out
# from pg_namespace even though pg_get_constraintdef() doesn't
# want to give it to us, matches that of the referencing table,
# and an explicit schema was given for the referencing table.
referred_schema = schema
referred_table = preparer._unquote_identifier(referred_table)
referred_columns = [preparer._unquote_identifier(x)
for x in re.split(r'\s*,\s', referred_columns)]
fkey_d = {
'name' : conname,
'constrained_columns' : constrained_columns,
'referred_schema' : referred_schema,
'referred_table' : referred_table,
'referred_columns' : referred_columns
}
fkeys.append(fkey_d)
return fkeys
@reflection.cache
def get_indexes(self, connection, table_name, schema, **kw):
table_oid = self.get_table_oid(connection, table_name, schema,
info_cache=kw.get('info_cache'))
IDX_SQL = """
SELECT
i.relname as relname,
ix.indisunique, ix.indexprs, ix.indpred,
a.attname
FROM
pg_class t
join pg_index ix on t.oid = ix.indrelid
join pg_class i on i.oid=ix.indexrelid
left outer join
pg_attribute a
on t.oid=a.attrelid and a.attnum=ANY(ix.indkey)
WHERE
t.relkind = 'r'
and t.oid = :table_oid
and ix.indisprimary = 'f'
ORDER BY
t.relname,
i.relname
"""
t = sql.text(IDX_SQL, typemap={'attname':sqltypes.Unicode})
c = connection.execute(t, table_oid=table_oid)
index_names = {}
indexes = []
sv_idx_name = None
for row in c.fetchall():
idx_name, unique, expr, prd, col = row
if expr:
if idx_name != sv_idx_name:
util.warn(
"Skipped unsupported reflection of "
"expression-based index %s"
% idx_name)
sv_idx_name = idx_name
continue
if prd and not idx_name == sv_idx_name:
util.warn(
"Predicate of partial index %s ignored during reflection"
% idx_name)
sv_idx_name = idx_name
if idx_name in index_names:
index_d = index_names[idx_name]
else:
index_d = {'column_names':[]}
indexes.append(index_d)
index_names[idx_name] = index_d
index_d['name'] = idx_name
if col is not None:
index_d['column_names'].append(col)
index_d['unique'] = unique
return indexes
def _load_enums(self, connection):
if not self.supports_native_enum:
return {}
## Load data types for enums:
SQL_ENUMS = """
SELECT t.typname as "name",
-- no enum defaults in 8.4 at least
-- t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema",
e.enumlabel as "label"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_constraint r ON t.oid = r.contypid
LEFT JOIN pg_catalog.pg_enum e ON t.oid = e.enumtypid
WHERE t.typtype = 'e'
ORDER BY "name", e.oid -- e.oid gives us label order
"""
s = sql.text(SQL_ENUMS, typemap={
'attname':sqltypes.Unicode,
'label':sqltypes.Unicode})
c = connection.execute(s)
enums = {}
for enum in c.fetchall():
if enum['visible']:
# 'visible' just means whether or not the enum is in a
# schema that's on the search path -- or not overriden by
# a schema with higher presedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = enum['name']
else:
name = "%s.%s" % (enum['schema'], enum['name'])
if name in enums:
enums[name]['labels'].append(enum['label'])
else:
enums[name] = {
'labels': [enum['label']],
}
return enums
def _load_domains(self, connection):
## Load data types for domains:
SQL_DOMAINS = """
SELECT t.typname as "name",
pg_catalog.format_type(t.typbasetype, t.typtypmod) as "attype",
not t.typnotnull as "nullable",
t.typdefault as "default",
pg_catalog.pg_type_is_visible(t.oid) as "visible",
n.nspname as "schema"
FROM pg_catalog.pg_type t
LEFT JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace
LEFT JOIN pg_catalog.pg_constraint r ON t.oid = r.contypid
WHERE t.typtype = 'd'
"""
s = sql.text(SQL_DOMAINS, typemap={'attname':sqltypes.Unicode})
c = connection.execute(s)
domains = {}
for domain in c.fetchall():
## strip (30) from character varying(30)
attype = re.search('([^\(]+)', domain['attype']).group(1)
if domain['visible']:
# 'visible' just means whether or not the domain is in a
# schema that's on the search path -- or not overriden by
# a schema with higher presedence. If it's not visible,
# it will be prefixed with the schema-name when it's used.
name = domain['name']
else:
name = "%s.%s" % (domain['schema'], domain['name'])
domains[name] = {
'attype':attype,
'nullable': domain['nullable'],
'default': domain['default']
}
return domains
| mit |
kaday/rose | lib/python/rose/config_editor/upgrade_controller.py | 1 | 13138 | # -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# (C) British Crown Copyright 2012-6 Met Office.
#
# This file is part of Rose, a framework for meteorological suites.
#
# Rose is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Rose is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Rose. If not, see <http://www.gnu.org/licenses/>.
# -----------------------------------------------------------------------------
import copy
import os
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import rose.gtk.util
import rose.macro
import rose.upgrade
class UpgradeController(object):
"""Configure the upgrade of configurations."""
def __init__(self, app_config_dict, handle_transform_func,
parent_window=None, upgrade_inspector=None):
buttons = (gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,
gtk.STOCK_APPLY, gtk.RESPONSE_ACCEPT)
self.window = gtk.Dialog(buttons=buttons)
self.window.set_transient_for(parent_window)
self.window.set_title(rose.config_editor.DIALOG_TITLE_UPGRADE)
self.config_dict = {}
self.config_directory_dict = {}
self.config_manager_dict = {}
config_names = sorted(app_config_dict.keys())
self._config_version_model_dict = {}
self.use_all_versions = False
self.treemodel = gtk.TreeStore(str, str, str, bool)
self.treeview = rose.gtk.util.TooltipTreeView(
get_tooltip_func=self._get_tooltip)
self.treeview.show()
old_pwd = os.getcwd()
for config_name in config_names:
app_config = app_config_dict[config_name]["config"]
app_directory = app_config_dict[config_name]["directory"]
meta_value = app_config.get_value([rose.CONFIG_SECT_TOP,
rose.CONFIG_OPT_META_TYPE], "")
if len(meta_value.split("/")) < 2:
continue
try:
os.chdir(app_directory)
manager = rose.upgrade.MacroUpgradeManager(app_config)
except OSError:
# This can occur when access is not allowed to metadata files.
continue
self.config_dict[config_name] = app_config
self.config_directory_dict[config_name] = app_directory
self.config_manager_dict[config_name] = manager
self._update_treemodel_data(config_name)
os.chdir(old_pwd)
self.treeview.set_model(self.treemodel)
self.treeview.set_rules_hint(True)
self.treewindow = gtk.ScrolledWindow()
self.treewindow.show()
self.treewindow.set_policy(gtk.POLICY_NEVER,
gtk.POLICY_NEVER)
columns = rose.config_editor.DIALOG_COLUMNS_UPGRADE
for i, title in enumerate(columns):
column = gtk.TreeViewColumn()
column.set_title(title)
if self.treemodel.get_column_type(i) == gobject.TYPE_BOOLEAN:
cell = gtk.CellRendererToggle()
cell.connect("toggled", self._handle_toggle_upgrade, i)
cell.set_property("activatable", True)
elif i == 2:
self._combo_cell = gtk.CellRendererCombo()
self._combo_cell.set_property("has-entry", False)
self._combo_cell.set_property("editable", True)
try:
self._combo_cell.connect("changed",
self._handle_change_version, 2)
except TypeError:
# PyGTK 2.14 - changed signal.
self._combo_cell.connect("edited",
self._handle_change_version, 2)
cell = self._combo_cell
else:
cell = gtk.CellRendererText()
if i == len(columns) - 1:
column.pack_start(cell, expand=True)
else:
column.pack_start(cell, expand=False)
column.set_cell_data_func(cell, self._set_cell_data, i)
self.treeview.append_column(column)
self.treeview.connect("cursor-changed", self._handle_change_cursor)
self.treewindow.add(self.treeview)
self.window.vbox.pack_start(
self.treewindow, expand=True, fill=True,
padding=rose.config_editor.SPACING_PAGE)
button_hbox = gtk.HBox()
button_hbox.show()
all_versions_toggle_button = gtk.CheckButton(
label=rose.config_editor.DIALOG_LABEL_UPGRADE_ALL,
use_underline=False)
all_versions_toggle_button.set_active(self.use_all_versions)
all_versions_toggle_button.connect("toggled",
self._handle_toggle_all_versions)
all_versions_toggle_button.show()
button_hbox.pack_start(all_versions_toggle_button, expand=False,
fill=False,
padding=rose.config_editor.SPACING_SUB_PAGE)
self.window.vbox.pack_end(button_hbox, expand=False, fill=False)
self.ok_button = self.window.action_area.get_children()[0]
self.window.set_focus(all_versions_toggle_button)
self.window.set_focus(self.ok_button)
self._set_ok_to_upgrade()
max_size = rose.config_editor.SIZE_MACRO_DIALOG_MAX
my_size = self.window.size_request()
new_size = [-1, -1]
extra = 2 * rose.config_editor.SPACING_PAGE
for i in [0, 1]:
new_size[i] = min([my_size[i] + extra, max_size[i]])
self.treewindow.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.window.set_default_size(*new_size)
response = self.window.run()
old_pwd = os.getcwd()
if response == gtk.RESPONSE_ACCEPT:
iter_ = self.treemodel.get_iter_first()
while iter_ is not None:
config_name = self.treemodel.get_value(iter_, 0)
curr_version = self.treemodel.get_value(iter_, 1)
next_version = self.treemodel.get_value(iter_, 2)
ok_to_upgrade = self.treemodel.get_value(iter_, 3)
config = self.config_dict[config_name]
manager = self.config_manager_dict[config_name]
directory = self.config_directory_dict[config_name]
if not ok_to_upgrade or next_version == curr_version:
iter_ = self.treemodel.iter_next(iter_)
continue
os.chdir(directory)
manager.set_new_tag(next_version)
macro_config = copy.deepcopy(config)
try:
new_config, change_list = manager.transform(
macro_config, custom_inspector=upgrade_inspector)
except Exception as e:
rose.gtk.dialog.run_dialog(
rose.gtk.dialog.DIALOG_TYPE_ERROR,
type(e).__name__ + ": " + str(e),
rose.config_editor.ERROR_UPGRADE.format(
config_name.lstrip("/"))
)
iter_ = self.treemodel.iter_next(iter_)
continue
macro_id = (type(manager).__name__ + "." +
rose.macro.TRANSFORM_METHOD)
if handle_transform_func(config_name, macro_id,
new_config, change_list,
triggers_ok=True):
meta_config = rose.macro.load_meta_config(
new_config, config_type=rose.SUB_CONFIG_NAME,
ignore_meta_error=True
)
trig_macro = rose.macros.trigger.TriggerMacro()
macro_config = copy.deepcopy(new_config)
macro_id = (
rose.upgrade.MACRO_UPGRADE_TRIGGER_NAME + "." +
rose.macro.TRANSFORM_METHOD
)
if not trig_macro.validate_dependencies(macro_config,
meta_config):
new_trig_config, trig_change_list = (
rose.macros.trigger.TriggerMacro().transform(
macro_config, meta_config)
)
handle_transform_func(config_name, macro_id,
new_trig_config,
trig_change_list,
triggers_ok=True)
iter_ = self.treemodel.iter_next(iter_)
os.chdir(old_pwd)
self.window.destroy()
def _get_tooltip(self, view, row_iter, col_index, tip):
name = self.treeview.get_column(col_index).get_title()
value = str(self.treemodel.get_value(row_iter, col_index))
tip.set_text(name + ": " + value)
return True
def _handle_change_cursor(self, view):
path, column = self.treeview.get_cursor()
iter_ = self.treemodel.get_iter(path)
config_name = self.treemodel.get_value(iter_, 0)
listmodel = self._config_version_model_dict[config_name]
self._combo_cell.set_property("model", listmodel)
self._combo_cell.set_property("text-column", 0)
def _handle_change_version(self, cell, path, new, col_index):
iter_ = self.treemodel.get_iter(path)
if isinstance(new, basestring):
new_value = new
else:
new_value = cell.get_property("model").get_value(new, 0)
self.treemodel.set_value(iter_, col_index, new_value)
def _handle_toggle_all_versions(self, button):
self.use_all_versions = button.get_active()
self.treemodel = gtk.TreeStore(str, str, str, bool)
self._config_version_model_dict.clear()
for config_name in sorted(self.config_dict.keys()):
self._update_treemodel_data(config_name)
self.treeview.set_model(self.treemodel)
def _handle_toggle_upgrade(self, cell, path, col_index):
iter_ = self.treemodel.get_iter(path)
value = self.treemodel.get_value(iter_, col_index)
if (self.treemodel.get_value(iter_, 1) ==
self.treemodel.get_value(iter_, 2)):
self.treemodel.set_value(iter_, col_index, False)
else:
self.treemodel.set_value(iter_, col_index, not value)
self._set_ok_to_upgrade()
def _set_ok_to_upgrade(self, *args):
any_upgrades_toggled = False
iter_ = self.treemodel.get_iter_first()
while iter_ is not None:
if self.treemodel.get_value(iter_, 3):
any_upgrades_toggled = True
break
iter_ = self.treemodel.iter_next(iter_)
self.ok_button.set_sensitive(any_upgrades_toggled)
def _set_cell_data(self, column, cell, model, r_iter, col_index):
if model.get_column_type(col_index) == gobject.TYPE_BOOLEAN:
cell.set_property("active", model.get_value(r_iter, col_index))
if model.get_value(r_iter, 1) == model.get_value(r_iter, 2):
model.set_value(r_iter, col_index, False)
cell.set_property("inconsistent", True)
cell.set_property("sensitive", False)
else:
cell.set_property("inconsistent", False)
cell.set_property("sensitive", True)
elif col_index == 2:
config_name = model.get_value(r_iter, 0)
cell.set_property("text", model.get_value(r_iter, 2))
else:
text = model.get_value(r_iter, col_index)
if col_index == 0:
text = text.lstrip("/")
cell.set_property("text", text)
def _update_treemodel_data(self, config_name):
manager = self.config_manager_dict[config_name]
current_tag = manager.tag
next_tag = manager.get_new_tag(only_named=not self.use_all_versions)
if next_tag is None:
self.treemodel.append(
None, [config_name, current_tag, current_tag, False])
else:
self.treemodel.append(
None, [config_name, current_tag, next_tag, True])
listmodel = gtk.ListStore(str)
tags = manager.get_tags(only_named=not self.use_all_versions)
if not tags:
tags = [manager.tag]
for tag in tags:
listmodel.append([tag])
self._config_version_model_dict[config_name] = listmodel
| gpl-3.0 |
kamyu104/django | tests/transactions/tests.py | 239 | 19163 | from __future__ import unicode_literals
import sys
import threading
import time
from unittest import skipIf, skipUnless
from django.db import (
DatabaseError, Error, IntegrityError, OperationalError, connection,
transaction,
)
from django.test import (
TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature,
)
from django.utils import six
from .models import Reporter
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
available_apps = ['transactions']
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_force_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# atomic block shouldn't rollback, but force it.
self.assertFalse(transaction.get_rollback())
transaction.set_rollback(True)
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_prevent_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
sid = transaction.savepoint()
# trigger a database error inside an inner atomic without savepoint
with self.assertRaises(DatabaseError):
with transaction.atomic(savepoint=False):
with connection.cursor() as cursor:
cursor.execute(
"SELECT no_such_col FROM transactions_reporter")
# prevent atomic from rolling back since we're recovering manually
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
transaction.savepoint_rollback(sid)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
@skipIf(connection.features.autocommits_when_autocommit_is_off,
"This test requires a non-autocommit mode that doesn't autocommit.")
class AtomicWithoutAutocommitTests(AtomicTests):
"""All basic tests for atomic should also pass when autocommit is turned off."""
def setUp(self):
transaction.set_autocommit(False)
def tearDown(self):
# The tests access the database after exercising 'atomic', initiating
# a transaction ; a rollback is required before restoring autocommit.
transaction.rollback()
transaction.set_autocommit(True)
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
available_apps = ['transactions']
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The first block has a savepoint and must roll back.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Calculus")
raise Exception("Oops, that's his last name")
# The third insert couldn't be roll back. Temporarily mark the
# connection as not needing rollback to check it.
self.assertTrue(transaction.get_rollback())
transaction.set_rollback(False)
self.assertEqual(Reporter.objects.count(), 3)
transaction.set_rollback(True)
# The second block has a savepoint and must roll back.
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
available_apps = ['transactions']
def test_atomic_prevents_setting_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_queries_in_broken_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# The transaction is marked as needing rollback.
with self.assertRaises(transaction.TransactionManagementError):
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Haddock")
@skipIfDBFeature('atomic_transactions')
def test_atomic_allows_queries_after_fixing_transaction(self):
r1 = Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with transaction.atomic():
r2 = Reporter(first_name="Cuthbert", last_name="Calculus", id=r1.id)
with self.assertRaises(IntegrityError):
r2.save(force_insert=True)
# Mark the transaction as no longer needing rollback.
transaction.set_rollback(False)
r2.save(force_update=True)
self.assertEqual(Reporter.objects.get(pk=r1.pk).last_name, "Calculus")
@skipUnlessDBFeature('test_db_allows_multiple_connections')
def test_atomic_prevents_queries_in_broken_transaction_after_client_close(self):
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
connection.close()
# The connection is closed and the transaction is marked as
# needing rollback. This will raise an InterfaceError on databases
# that refuse to create cursors on closed connections (PostgreSQL)
# and a TransactionManagementError on other databases.
with self.assertRaises(Error):
Reporter.objects.create(first_name="Cuthbert", last_name="Calculus")
# The connection is usable again .
self.assertEqual(Reporter.objects.count(), 0)
@skipUnless(connection.vendor == 'mysql', "MySQL-specific behaviors")
class AtomicMySQLTests(TransactionTestCase):
available_apps = ['transactions']
@skipIf(threading is None, "Test requires threading")
def test_implicit_savepoint_rollback(self):
"""MySQL implicitly rolls back savepoints when it deadlocks (#22291)."""
other_thread_ready = threading.Event()
def other_thread():
try:
with transaction.atomic():
Reporter.objects.create(id=1, first_name="Tintin")
other_thread_ready.set()
# We cannot synchronize the two threads with an event here
# because the main thread locks. Sleep for a little while.
time.sleep(1)
# 2) ... and this line deadlocks. (see below for 1)
Reporter.objects.exclude(id=1).update(id=2)
finally:
# This is the thread-local connection, not the main connection.
connection.close()
other_thread = threading.Thread(target=other_thread)
other_thread.start()
other_thread_ready.wait()
with six.assertRaisesRegex(self, OperationalError, 'Deadlock found'):
# Double atomic to enter a transaction and create a savepoint.
with transaction.atomic():
with transaction.atomic():
# 1) This line locks... (see above for 2)
Reporter.objects.create(id=1, first_name="Tintin")
other_thread.join()
class AtomicMiscTests(TransactionTestCase):
available_apps = []
def test_wrap_callable_instance(self):
# Regression test for #20028
class Callable(object):
def __call__(self):
pass
# Must not raise an exception
transaction.atomic(Callable())
@skipUnlessDBFeature('can_release_savepoints')
def test_atomic_does_not_leak_savepoints_on_failure(self):
# Regression test for #23074
# Expect an error when rolling back a savepoint that doesn't exist.
# Done outside of the transaction block to ensure proper recovery.
with self.assertRaises(Error):
# Start a plain transaction.
with transaction.atomic():
# Swallow the intentional error raised in the sub-transaction.
with six.assertRaisesRegex(self, Exception, "Oops"):
# Start a sub-transaction with a savepoint.
with transaction.atomic():
sid = connection.savepoint_ids[-1]
raise Exception("Oops")
# This is expected to fail because the savepoint no longer exists.
connection.savepoint_rollback(sid)
| bsd-3-clause |
valkjsaaa/sl4a | python/src/Lib/test/test_collections.py | 48 | 18805 | import unittest, doctest
from test import test_support
from collections import namedtuple
import pickle, cPickle, copy
from collections import Hashable, Iterable, Iterator
from collections import Sized, Container, Callable
from collections import Set, MutableSet
from collections import Mapping, MutableMapping
from collections import Sequence, MutableSequence
TestNT = namedtuple('TestNT', 'x y z') # type used for pickle tests
class TestNamedTuple(unittest.TestCase):
def test_factory(self):
Point = namedtuple('Point', 'x y')
self.assertEqual(Point.__name__, 'Point')
self.assertEqual(Point.__doc__, 'Point(x, y)')
self.assertEqual(Point.__slots__, ())
self.assertEqual(Point.__module__, __name__)
self.assertEqual(Point.__getitem__, tuple.__getitem__)
self.assertEqual(Point._fields, ('x', 'y'))
self.assertRaises(ValueError, namedtuple, 'abc%', 'efg ghi') # type has non-alpha char
self.assertRaises(ValueError, namedtuple, 'class', 'efg ghi') # type has keyword
self.assertRaises(ValueError, namedtuple, '9abc', 'efg ghi') # type starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', 'efg g%hi') # field with non-alpha char
self.assertRaises(ValueError, namedtuple, 'abc', 'abc class') # field has keyword
self.assertRaises(ValueError, namedtuple, 'abc', '8efg 9ghi') # field starts with digit
self.assertRaises(ValueError, namedtuple, 'abc', '_efg ghi') # field with leading underscore
self.assertRaises(ValueError, namedtuple, 'abc', 'efg efg ghi') # duplicate field
namedtuple('Point0', 'x1 y2') # Verify that numbers are allowed in names
namedtuple('_', 'a b c') # Test leading underscores in a typename
nt = namedtuple('nt', u'the quick brown fox') # check unicode input
self.assert_("u'" not in repr(nt._fields))
nt = namedtuple('nt', (u'the', u'quick')) # check unicode input
self.assert_("u'" not in repr(nt._fields))
self.assertRaises(TypeError, Point._make, [11]) # catch too few args
self.assertRaises(TypeError, Point._make, [11, 22, 33]) # catch too many args
def test_instance(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assertEqual(p, Point(x=11, y=22))
self.assertEqual(p, Point(11, y=22))
self.assertEqual(p, Point(y=22, x=11))
self.assertEqual(p, Point(*(11, 22)))
self.assertEqual(p, Point(**dict(x=11, y=22)))
self.assertRaises(TypeError, Point, 1) # too few args
self.assertRaises(TypeError, Point, 1, 2, 3) # too many args
self.assertRaises(TypeError, eval, 'Point(XXX=1, y=2)', locals()) # wrong keyword argument
self.assertRaises(TypeError, eval, 'Point(x=1)', locals()) # missing keyword argument
self.assertEqual(repr(p), 'Point(x=11, y=22)')
self.assert_('__dict__' not in dir(p)) # verify instance has no dict
self.assert_('__weakref__' not in dir(p))
self.assertEqual(p, Point._make([11, 22])) # test _make classmethod
self.assertEqual(p._fields, ('x', 'y')) # test _fields attribute
self.assertEqual(p._replace(x=1), (1, 22)) # test _replace method
self.assertEqual(p._asdict(), dict(x=11, y=22)) # test _asdict method
try:
p._replace(x=1, error=2)
except ValueError:
pass
else:
self._fail('Did not detect an incorrect fieldname')
# verify that field string can have commas
Point = namedtuple('Point', 'x, y')
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
# verify that fieldspec can be a non-string sequence
Point = namedtuple('Point', ('x', 'y'))
p = Point(x=11, y=22)
self.assertEqual(repr(p), 'Point(x=11, y=22)')
def test_tupleness(self):
Point = namedtuple('Point', 'x y')
p = Point(11, 22)
self.assert_(isinstance(p, tuple))
self.assertEqual(p, (11, 22)) # matches a real tuple
self.assertEqual(tuple(p), (11, 22)) # coercable to a real tuple
self.assertEqual(list(p), [11, 22]) # coercable to a list
self.assertEqual(max(p), 22) # iterable
self.assertEqual(max(*p), 22) # star-able
x, y = p
self.assertEqual(p, (x, y)) # unpacks like a tuple
self.assertEqual((p[0], p[1]), (11, 22)) # indexable like a tuple
self.assertRaises(IndexError, p.__getitem__, 3)
self.assertEqual(p.x, x)
self.assertEqual(p.y, y)
self.assertRaises(AttributeError, eval, 'p.z', locals())
def test_odd_sizes(self):
Zero = namedtuple('Zero', '')
self.assertEqual(Zero(), ())
self.assertEqual(Zero._make([]), ())
self.assertEqual(repr(Zero()), 'Zero()')
self.assertEqual(Zero()._asdict(), {})
self.assertEqual(Zero()._fields, ())
Dot = namedtuple('Dot', 'd')
self.assertEqual(Dot(1), (1,))
self.assertEqual(Dot._make([1]), (1,))
self.assertEqual(Dot(1).d, 1)
self.assertEqual(repr(Dot(1)), 'Dot(d=1)')
self.assertEqual(Dot(1)._asdict(), {'d':1})
self.assertEqual(Dot(1)._replace(d=999), (999,))
self.assertEqual(Dot(1)._fields, ('d',))
n = 5000
import string, random
names = list(set(''.join([random.choice(string.ascii_letters)
for j in range(10)]) for i in range(n)))
n = len(names)
Big = namedtuple('Big', names)
b = Big(*range(n))
self.assertEqual(b, tuple(range(n)))
self.assertEqual(Big._make(range(n)), tuple(range(n)))
for pos, name in enumerate(names):
self.assertEqual(getattr(b, name), pos)
repr(b) # make sure repr() doesn't blow-up
d = b._asdict()
d_expected = dict(zip(names, range(n)))
self.assertEqual(d, d_expected)
b2 = b._replace(**dict([(names[1], 999),(names[-5], 42)]))
b2_expected = range(n)
b2_expected[1] = 999
b2_expected[-5] = 42
self.assertEqual(b2, tuple(b2_expected))
self.assertEqual(b._fields, tuple(names))
def test_pickle(self):
p = TestNT(x=10, y=20, z=30)
for module in pickle, cPickle:
loads = getattr(module, 'loads')
dumps = getattr(module, 'dumps')
for protocol in -1, 0, 1, 2:
q = loads(dumps(p, protocol))
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
def test_copy(self):
p = TestNT(x=10, y=20, z=30)
for copier in copy.copy, copy.deepcopy:
q = copier(p)
self.assertEqual(p, q)
self.assertEqual(p._fields, q._fields)
class ABCTestCase(unittest.TestCase):
def validate_abstract_methods(self, abc, *names):
methodstubs = dict.fromkeys(names, lambda s, *args: 0)
# everything should work will all required methods are present
C = type('C', (abc,), methodstubs)
C()
# instantiation should fail if a required method is missing
for name in names:
stubs = methodstubs.copy()
del stubs[name]
C = type('C', (abc,), stubs)
self.assertRaises(TypeError, C, name)
class TestOneTrickPonyABCs(ABCTestCase):
def test_Hashable(self):
# Check some non-hashables
non_samples = [list(), set(), dict()]
for x in non_samples:
self.failIf(isinstance(x, Hashable), repr(x))
self.failIf(issubclass(type(x), Hashable), repr(type(x)))
# Check some hashables
samples = [None,
int(), float(), complex(),
str(),
tuple(), frozenset(),
int, list, object, type,
]
for x in samples:
self.failUnless(isinstance(x, Hashable), repr(x))
self.failUnless(issubclass(type(x), Hashable), repr(type(x)))
self.assertRaises(TypeError, Hashable)
# Check direct subclassing
class H(Hashable):
def __hash__(self):
return super(H, self).__hash__()
__eq__ = Hashable.__eq__ # Silence Py3k warning
self.assertEqual(hash(H()), 0)
self.failIf(issubclass(int, H))
self.validate_abstract_methods(Hashable, '__hash__')
def test_Iterable(self):
# Check some non-iterables
non_samples = [None, 42, 3.14, 1j]
for x in non_samples:
self.failIf(isinstance(x, Iterable), repr(x))
self.failIf(issubclass(type(x), Iterable), repr(type(x)))
# Check some iterables
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.failUnless(isinstance(x, Iterable), repr(x))
self.failUnless(issubclass(type(x), Iterable), repr(type(x)))
# Check direct subclassing
class I(Iterable):
def __iter__(self):
return super(I, self).__iter__()
self.assertEqual(list(I()), [])
self.failIf(issubclass(str, I))
self.validate_abstract_methods(Iterable, '__iter__')
def test_Iterator(self):
non_samples = [None, 42, 3.14, 1j, "".encode('ascii'), "", (), [],
{}, set()]
for x in non_samples:
self.failIf(isinstance(x, Iterator), repr(x))
self.failIf(issubclass(type(x), Iterator), repr(type(x)))
samples = [iter(str()),
iter(tuple()), iter(list()), iter(dict()),
iter(set()), iter(frozenset()),
iter(dict().keys()), iter(dict().items()),
iter(dict().values()),
(lambda: (yield))(),
(x for x in []),
]
for x in samples:
self.failUnless(isinstance(x, Iterator), repr(x))
self.failUnless(issubclass(type(x), Iterator), repr(type(x)))
self.validate_abstract_methods(Iterator, 'next')
def test_Sized(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Sized), repr(x))
self.failIf(issubclass(type(x), Sized), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(), dict().values(),
]
for x in samples:
self.failUnless(isinstance(x, Sized), repr(x))
self.failUnless(issubclass(type(x), Sized), repr(type(x)))
self.validate_abstract_methods(Sized, '__len__')
def test_Container(self):
non_samples = [None, 42, 3.14, 1j,
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Container), repr(x))
self.failIf(issubclass(type(x), Container), repr(type(x)))
samples = [str(),
tuple(), list(), set(), frozenset(), dict(),
dict().keys(), dict().items(),
]
for x in samples:
self.failUnless(isinstance(x, Container), repr(x))
self.failUnless(issubclass(type(x), Container), repr(type(x)))
self.validate_abstract_methods(Container, '__contains__')
def test_Callable(self):
non_samples = [None, 42, 3.14, 1j,
"", "".encode('ascii'), (), [], {}, set(),
(lambda: (yield))(),
(x for x in []),
]
for x in non_samples:
self.failIf(isinstance(x, Callable), repr(x))
self.failIf(issubclass(type(x), Callable), repr(type(x)))
samples = [lambda: None,
type, int, object,
len,
list.append, [].append,
]
for x in samples:
self.failUnless(isinstance(x, Callable), repr(x))
self.failUnless(issubclass(type(x), Callable), repr(type(x)))
self.validate_abstract_methods(Callable, '__call__')
def test_direct_subclassing(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C(B):
pass
self.failUnless(issubclass(C, B))
self.failIf(issubclass(int, C))
def test_registration(self):
for B in Hashable, Iterable, Iterator, Sized, Container, Callable:
class C:
__metaclass__ = type
__hash__ = None # Make sure it isn't hashable by default
self.failIf(issubclass(C, B), B.__name__)
B.register(C)
self.failUnless(issubclass(C, B))
class WithSet(MutableSet):
def __init__(self, it=()):
self.data = set(it)
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
def __contains__(self, item):
return item in self.data
def add(self, item):
self.data.add(item)
def discard(self, item):
self.data.discard(item)
class TestCollectionABCs(ABCTestCase):
# XXX For now, we only test some virtual inheritance properties.
# We should also test the proper behavior of the collection ABCs
# as real base classes or mix-in classes.
def test_Set(self):
for sample in [set, frozenset]:
self.failUnless(isinstance(sample(), Set))
self.failUnless(issubclass(sample, Set))
self.validate_abstract_methods(Set, '__contains__', '__iter__', '__len__')
def test_hash_Set(self):
class OneTwoThreeSet(Set):
def __init__(self):
self.contents = [1, 2, 3]
def __contains__(self, x):
return x in self.contents
def __len__(self):
return len(self.contents)
def __iter__(self):
return iter(self.contents)
def __hash__(self):
return self._hash()
a, b = OneTwoThreeSet(), OneTwoThreeSet()
self.failUnless(hash(a) == hash(b))
def test_MutableSet(self):
self.failUnless(isinstance(set(), MutableSet))
self.failUnless(issubclass(set, MutableSet))
self.failIf(isinstance(frozenset(), MutableSet))
self.failIf(issubclass(frozenset, MutableSet))
self.validate_abstract_methods(MutableSet, '__contains__', '__iter__', '__len__',
'add', 'discard')
def test_issue_5647(self):
# MutableSet.__iand__ mutated the set during iteration
s = WithSet('abcd')
s &= WithSet('cdef') # This used to fail
self.assertEqual(set(s), set('cd'))
def test_issue_4920(self):
# MutableSet.pop() method did not work
class MySet(collections.MutableSet):
__slots__=['__s']
def __init__(self,items=None):
if items is None:
items=[]
self.__s=set(items)
def __contains__(self,v):
return v in self.__s
def __iter__(self):
return iter(self.__s)
def __len__(self):
return len(self.__s)
def add(self,v):
result=v not in self.__s
self.__s.add(v)
return result
def discard(self,v):
result=v in self.__s
self.__s.discard(v)
return result
def __repr__(self):
return "MySet(%s)" % repr(list(self))
s = MySet([5,43,2,1])
self.assertEqual(s.pop(), 1)
def test_Mapping(self):
for sample in [dict]:
self.failUnless(isinstance(sample(), Mapping))
self.failUnless(issubclass(sample, Mapping))
self.validate_abstract_methods(Mapping, '__contains__', '__iter__', '__len__',
'__getitem__')
def test_MutableMapping(self):
for sample in [dict]:
self.failUnless(isinstance(sample(), MutableMapping))
self.failUnless(issubclass(sample, MutableMapping))
self.validate_abstract_methods(MutableMapping, '__contains__', '__iter__', '__len__',
'__getitem__', '__setitem__', '__delitem__')
def test_Sequence(self):
for sample in [tuple, list, str]:
self.failUnless(isinstance(sample(), Sequence))
self.failUnless(issubclass(sample, Sequence))
self.failUnless(issubclass(basestring, Sequence))
self.failUnless(isinstance(range(10), Sequence))
self.failUnless(issubclass(xrange, Sequence))
self.failUnless(issubclass(str, Sequence))
self.validate_abstract_methods(Sequence, '__contains__', '__iter__', '__len__',
'__getitem__')
def test_MutableSequence(self):
for sample in [tuple, str]:
self.failIf(isinstance(sample(), MutableSequence))
self.failIf(issubclass(sample, MutableSequence))
for sample in [list]:
self.failUnless(isinstance(sample(), MutableSequence))
self.failUnless(issubclass(sample, MutableSequence))
self.failIf(issubclass(basestring, MutableSequence))
self.validate_abstract_methods(MutableSequence, '__contains__', '__iter__',
'__len__', '__getitem__', '__setitem__', '__delitem__', 'insert')
import doctest, collections
def test_main(verbose=None):
NamedTupleDocs = doctest.DocTestSuite(module=collections)
test_classes = [TestNamedTuple, NamedTupleDocs, TestOneTrickPonyABCs, TestCollectionABCs]
test_support.run_unittest(*test_classes)
test_support.run_doctest(collections, verbose)
if __name__ == "__main__":
test_main(verbose=True)
| apache-2.0 |
IONISx/edx-platform | common/djangoapps/util/tests/test_disable_rate_limit.py | 148 | 1832 | """Tests for disabling rate limiting. """
import unittest
from django.test import TestCase
from django.core.cache import cache
from django.conf import settings
import mock
from rest_framework.views import APIView
from rest_framework.throttling import BaseThrottle
from rest_framework.exceptions import Throttled
from util.disable_rate_limit import can_disable_rate_limit
from util.models import RateLimitConfiguration
class FakeThrottle(BaseThrottle):
def allow_request(self, request, view):
return False
@can_disable_rate_limit
class FakeApiView(APIView):
authentication_classes = []
permission_classes = []
throttle_classes = [FakeThrottle]
@unittest.skipUnless(settings.ROOT_URLCONF == 'lms.urls', 'Test only valid in lms')
class DisableRateLimitTest(TestCase):
"""Check that we can disable rate limiting for perf testing. """
def setUp(self):
super(DisableRateLimitTest, self).setUp()
cache.clear()
self.view = FakeApiView()
def test_enable_rate_limit(self):
# Enable rate limiting using model-based config
RateLimitConfiguration.objects.create(enabled=True)
# By default, should enforce rate limiting
# Since our fake throttle always rejects requests,
# we should expect the request to be rejected.
request = mock.Mock()
with self.assertRaises(Throttled):
self.view.check_throttles(request)
def test_disable_rate_limit(self):
# Disable rate limiting using model-based config
RateLimitConfiguration.objects.create(enabled=False)
# With rate-limiting disabled, the request
# should get through. The `check_throttles()` call
# should return without raising an exception.
request = mock.Mock()
self.view.check_throttles(request)
| agpl-3.0 |
sencha/chromium-spacewalk | native_client_sdk/src/build_tools/tests/sdktools_commands_test.py | 76 | 18779 | #!/usr/bin/env python
# Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import sys
import re
import tarfile
import tempfile
import unittest
from sdktools_test import SdkToolsTestCase
SCRIPT_DIR = os.path.dirname(os.path.abspath(__file__))
BUILD_TOOLS_DIR = os.path.dirname(SCRIPT_DIR)
TOOLS_DIR = os.path.join(os.path.dirname(BUILD_TOOLS_DIR), 'tools')
sys.path.extend([BUILD_TOOLS_DIR, TOOLS_DIR])
import manifest_util
import oshelpers
class TestCommands(SdkToolsTestCase):
def setUp(self):
self.SetupDefault()
def _AddDummyBundle(self, manifest, bundle_name):
bundle = manifest_util.Bundle(bundle_name)
bundle.revision = 1337
bundle.version = 23
bundle.description = bundle_name
bundle.stability = 'beta'
bundle.recommended = 'no'
bundle.repath = bundle_name
archive = self._MakeDummyArchive(bundle_name)
bundle.AddArchive(archive)
manifest.SetBundle(bundle)
# Need to get the bundle from the manifest -- it doesn't use the one we
# gave it.
return manifest.GetBundle(bundle_name)
def _MakeDummyArchive(self, bundle_name, tarname=None, filename='dummy.txt'):
tarname = (tarname or bundle_name) + '.tar.bz2'
temp_dir = tempfile.mkdtemp(prefix='archive')
try:
dummy_path = os.path.join(temp_dir, filename)
with open(dummy_path, 'w') as stream:
stream.write('Dummy stuff for %s' % bundle_name)
# Build the tarfile directly into the server's directory.
tar_path = os.path.join(self.basedir, tarname)
tarstream = tarfile.open(tar_path, 'w:bz2')
try:
tarstream.add(dummy_path, os.path.join(bundle_name, filename))
finally:
tarstream.close()
with open(tar_path, 'rb') as archive_stream:
sha1, size = manifest_util.DownloadAndComputeHash(archive_stream)
archive = manifest_util.Archive(manifest_util.GetHostOS())
archive.url = self.server.GetURL(os.path.basename(tar_path))
archive.size = size
archive.checksum = sha1
return archive
finally:
oshelpers.Remove(['-rf', temp_dir])
def testInfoBasic(self):
"""The info command should display information about the given bundle."""
self._WriteManifest()
output = self._Run(['info', 'sdk_tools'])
# Make sure basic information is there
bundle = self.manifest.GetBundle('sdk_tools')
archive = bundle.GetHostOSArchive();
self.assertTrue(bundle.name in output)
self.assertTrue(bundle.description in output)
self.assertTrue(str(bundle.revision) in output)
self.assertTrue(str(archive.size) in output)
self.assertTrue(archive.checksum in output)
self.assertTrue(bundle.stability in output)
def testInfoUnknownBundle(self):
"""The info command should notify the user of unknown bundles."""
self._WriteManifest()
bogus_bundle = 'foobar'
output = self._Run(['info', bogus_bundle])
self.assertTrue(re.search(r'[uU]nknown', output))
self.assertTrue(bogus_bundle in output)
def testInfoMultipleBundles(self):
"""The info command should support listing multiple bundles."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._AddDummyBundle(self.manifest, 'pepper_24')
self._WriteManifest()
output = self._Run(['info', 'pepper_23', 'pepper_24'])
self.assertTrue('pepper_23' in output)
self.assertTrue('pepper_24' in output)
self.assertFalse(re.search(r'[uU]nknown', output))
def testInfoMultipleArchives(self):
"""The info command should display multiple archives."""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive2 = self._MakeDummyArchive('pepper_26', tarname='pepper_26_more',
filename='dummy2.txt')
archive2.host_os = 'all'
bundle.AddArchive(archive2)
self._WriteManifest()
output = self._Run(['info', 'pepper_26'])
self.assertTrue('pepper_26' in output)
self.assertTrue('pepper_26_more' in output)
def testListBasic(self):
"""The list command should display basic information about remote
bundles."""
self._WriteManifest()
output = self._Run(['list'])
self.assertTrue(re.search('I.*?sdk_tools.*?stable', output, re.MULTILINE))
# This line is important (it's used by the updater to determine if the
# sdk_tools bundle needs to be updated), so let's be explicit.
self.assertTrue('All installed bundles are up-to-date.')
def testListMultiple(self):
"""The list command should display multiple bundles."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['list'])
# Added pepper_23 to the remote manifest not the local manifest, so it
# shouldn't be installed.
self.assertTrue(re.search('^[^I]*pepper_23', output, re.MULTILINE))
self.assertTrue('sdk_tools' in output)
def testListWithRevision(self):
"""The list command should display the revision, if desired."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['list', '-r'])
self.assertTrue(re.search('pepper_23.*?r1337', output))
def testListWithUpdatedRevision(self):
"""The list command should display when there is an update available."""
p23bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteCacheManifest(self.manifest)
# Modify the remote manifest to have a newer revision.
p23bundle.revision += 1
self._WriteManifest()
output = self._Run(['list', '-r'])
# We should see a display like this: I* pepper_23 (r1337 -> r1338)
# The star indicates the bundle has an update.
self.assertTrue(re.search('I\*\s+pepper_23.*?r1337.*?r1338', output))
def testListLocalVersionNotOnRemote(self):
"""The list command should tell the user if they have a bundle installed
that doesn't exist in the remote manifest."""
self._WriteManifest()
p23bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteCacheManifest(self.manifest)
output = self._Run(['list', '-r'])
message = 'Bundles installed locally that are not available remotely:'
message_loc = output.find(message)
self.assertNotEqual(message_loc, -1)
# Make sure pepper_23 is listed after the message above.
self.assertTrue('pepper_23' in output[message_loc:])
def testSources(self):
"""The sources command should allow adding/listing/removing of sources.
When a source is added, it will provide an additional set of bundles."""
other_manifest = manifest_util.SDKManifest()
self._AddDummyBundle(other_manifest, 'naclmono_23')
with open(os.path.join(self.basedir, 'source.json'), 'w') as stream:
stream.write(other_manifest.GetDataAsString())
source_json_url = self.server.GetURL('source.json')
self._WriteManifest()
output = self._Run(['sources', '--list'])
self.assertTrue('No external sources installed.' in output)
output = self._Run(['sources', '--add', source_json_url])
output = self._Run(['sources', '--list'])
self.assertTrue(source_json_url in output)
# Should be able to get info about that bundle.
output = self._Run(['info', 'naclmono_23'])
self.assertTrue('Unknown bundle' not in output)
self._Run(['sources', '--remove', source_json_url])
output = self._Run(['sources', '--list'])
self.assertTrue('No external sources installed.' in output)
def testUpdateBasic(self):
"""The update command should install the contents of a bundle to the SDK."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
def testUpdateInCacheButDirectoryRemoved(self):
"""The update command should update if the bundle directory does not exist,
even if the bundle is already in the cache manifest."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteCacheManifest(self.manifest)
self._WriteManifest()
self._Run(['update', 'pepper_23'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
def testUpdateNoNewVersion(self):
"""The update command should do nothing if the bundle is already up-to-date.
"""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
output = self._Run(['update', 'pepper_23'])
self.assertTrue('is already up-to-date.' in output)
def testUpdateWithNewVersion(self):
"""The update command should update to a new version if it exists."""
bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
bundle.revision += 1
self._WriteManifest()
output = self._Run(['update', 'pepper_23'])
self.assertTrue('already exists, but has an update available' in output)
# Now update using --force.
output = self._Run(['update', 'pepper_23', '--force'])
self.assertTrue('Updating bundle' in output)
cache_manifest = self._ReadCacheManifest()
num_archives = len(cache_manifest.GetBundle('pepper_23').GetArchives())
self.assertEqual(num_archives, 1)
def testUpdateUnknownBundles(self):
"""The update command should ignore unknown bundles and notify the user."""
self._WriteManifest()
output = self._Run(['update', 'foobar'])
self.assertTrue('unknown bundle' in output)
def testUpdateRecommended(self):
"""The update command should update only recommended bundles when run
without args.
"""
bundle_25 = self._AddDummyBundle(self.manifest, 'pepper_25')
bundle_25.recommended = 'no'
bundle_26 = self._AddDummyBundle(self.manifest, 'pepper_26')
bundle_26.recommended = 'yes'
self._WriteManifest()
output = self._Run(['update'])
# Should not try to update sdk_tools (even though it is recommended)
self.assertTrue('Ignoring manual update request.' not in output)
self.assertFalse(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_25')))
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_26', 'dummy.txt')))
def testUpdateCanary(self):
"""The update command should create the correct directory name for repath'd
bundles.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
bundle.name = 'pepper_canary'
self._WriteManifest()
output = self._Run(['update', 'pepper_canary'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_canary', 'dummy.txt')))
def testUpdateMultiArchive(self):
"""The update command should include download/untar multiple archives
specified in the bundle.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive2 = self._MakeDummyArchive('pepper_26', tarname='pepper_26_more',
filename='dummy2.txt')
archive2.host_os = 'all'
bundle.AddArchive(archive2)
self._WriteManifest()
output = self._Run(['update', 'pepper_26'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_26', 'dummy.txt')))
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_26', 'dummy2.txt')))
def testUpdateBadSize(self):
"""If an archive has a bad size, print an error.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive = bundle.GetHostOSArchive();
archive.size = -1
self._WriteManifest()
stdout = self._Run(['update', 'pepper_26'], expect_error=True)
self.assertTrue('Size mismatch' in stdout)
def testUpdateBadSHA(self):
"""If an archive has a bad SHA, print an error.
"""
bundle = self._AddDummyBundle(self.manifest, 'pepper_26')
archive = bundle.GetHostOSArchive();
archive.checksum = 0
self._WriteManifest()
stdout = self._Run(['update', 'pepper_26'], expect_error=True)
self.assertTrue('SHA1 checksum mismatch' in stdout)
def testUninstall(self):
"""The uninstall command should remove the installed bundle, if it
exists.
"""
# First install the bundle.
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['update', 'pepper_23'])
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
# Now remove it.
self._Run(['uninstall', 'pepper_23'])
self.assertFalse(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23')))
# The bundle should not be marked as installed.
output = self._Run(['list'])
self.assertTrue(re.search('^[^I]*pepper_23', output, re.MULTILINE))
def testReinstall(self):
"""The reinstall command should remove, then install, the specified
bundles.
"""
# First install the bundle.
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
output = self._Run(['update', 'pepper_23'])
dummy_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
# Change some files.
foo_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'foo.txt')
with open(foo_txt, 'w') as f:
f.write('Another dummy file. This one is not part of the bundle.')
with open(dummy_txt, 'w') as f:
f.write('changed dummy.txt')
# Reinstall the bundle.
self._Run(['reinstall', 'pepper_23'])
self.assertFalse(os.path.exists(foo_txt))
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
cache_manifest = self._ReadCacheManifest()
num_archives = len(cache_manifest.GetBundle('pepper_23').GetArchives())
self.assertEqual(num_archives, 1)
def testReinstallWithDuplicatedArchives(self):
"""The reinstall command should only use the most recent archive if there
are duplicated archives.
NOTE: There was a bug where the sdk_cache/naclsdk_manifest2.json file was
duplicating archives from different revisions. Make sure that reinstall
ignores old archives in the bundle.
"""
# First install the bundle.
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
manifest = self._ReadCacheManifest()
bundle = manifest.GetBundle('pepper_23')
self.assertEqual(len(bundle.GetArchives()), 1)
# Now add a bogus duplicate archive
archive2 = self._MakeDummyArchive('pepper_23', tarname='pepper_23',
filename='dummy2.txt')
bundle.AddArchive(archive2)
self._WriteCacheManifest(manifest)
output = self._Run(['reinstall', 'pepper_23'])
# When updating just one file, there is no (file 1/2 - "...") output.
self.assertFalse('file 1/' in output)
# Should be using the last archive.
self.assertFalse(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')))
self.assertTrue(os.path.exists(
os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy2.txt')))
def testReinstallDoesntUpdate(self):
"""The reinstall command should not update a bundle that has an update."""
# First install the bundle.
bundle = self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
dummy_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'dummy.txt')
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
# Update the revision.
bundle.revision += 1
self._WriteManifest()
# Change the file.
foo_txt = os.path.join(self.basedir, 'nacl_sdk', 'pepper_23', 'foo.txt')
with open(dummy_txt, 'w') as f:
f.write('changed dummy.txt')
# Reinstall.
self._Run(['reinstall', 'pepper_23'])
# The data has been reinstalled.
self.assertTrue(os.path.exists(dummy_txt))
with open(dummy_txt) as f:
self.assertEqual(f.read(), 'Dummy stuff for pepper_23')
# ... but the version hasn't been updated.
output = self._Run(['list', '-r'])
self.assertTrue(re.search('I\*\s+pepper_23.*?r1337.*?r1338', output))
def testArchiveCacheBasic(self):
"""Downloaded archives should be stored in the cache by default."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
archive_cache = os.path.join(self.cache_dir, 'archives')
cache_contents = os.listdir(archive_cache)
self.assertEqual(cache_contents, ['pepper_23'])
cache_contents = os.listdir(os.path.join(archive_cache, 'pepper_23'))
self.assertEqual(cache_contents, ['pepper_23.tar.bz2'])
def testArchiveCacheEviction(self):
archive_cache = os.path.join(self.cache_dir, 'archives')
self._AddDummyBundle(self.manifest, 'pepper_23')
self._AddDummyBundle(self.manifest, 'pepper_22')
self._WriteManifest()
# First install pepper_23
self._Run(['update', 'pepper_23'])
archive = os.path.join(archive_cache, 'pepper_23', 'pepper_23.tar.bz2')
archive_size = os.path.getsize(archive)
# Set the mtime on the pepper_23 bundle to be a few seconds in the past.
# This is needed so that the two bundles don't end up with the same
# timestamp which can happen on systems that don't report sub-second
# timestamps.
atime = os.path.getatime(archive)
mtime = os.path.getmtime(archive)
os.utime(archive, (atime, mtime-10))
# Set cache limit to size of pepper archive * 1.5
self._WriteConfig('{ "cache_max": %d }' % int(archive_size * 1.5))
# Now install pepper_22, which should cause pepper_23 to be evicted
self._Run(['update', 'pepper_22'])
cache_contents = os.listdir(archive_cache)
self.assertEqual(cache_contents, ['pepper_22'])
def testArchiveCacheZero(self):
"""Archives should not be cached when cache_max is zero."""
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteConfig('{ "cache_max": 0 }')
self._AddDummyBundle(self.manifest, 'pepper_23')
self._WriteManifest()
self._Run(['update', 'pepper_23'])
archive_cache = os.path.join(self.cache_dir, 'archives')
# Archive folder should be completely remove by cache cleanup
self.assertFalse(os.path.exists(archive_cache))
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
eranchetz/nupic | nupic/regions/PictureSensorExplorers/rotate_block.py | 17 | 3076 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero Public License for more details.
#
# You should have received a copy of the GNU Affero Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
"""
This file defines RotatePictureExplorer, an explorer for
PictureSensor.
"""
from nupic.regions.PictureSensor import PictureSensor
class RotatePictureExplorer(PictureSensor.PictureExplorer):
@classmethod
def queryRelevantParams(klass):
"""
Returns a sequence of parameter names that are relevant to
the operation of the explorer.
May be extended or overridden by sub-classes as appropriate.
"""
return super(RotatePictureExplorer, klass).queryRelevantParams() + \
( 'radialLength', 'radialStep' )
def initSequence(self, state, params):
self._presentNextRotation(state, params)
def updateSequence(self, state, params):
self._presentNextRotation(state, params)
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Internal helper method(s)
def _presentNextRotation(self, state, params):
"""
We will visit each grid position. For each grid position,
we rotate the object in 2D
"""
# Compute iteration indices
numRotations = 1 + int((params['maxAngularPosn'] - params['minAngularPosn'])
/ params['minAngularVelocity'])
edgeLen = 2 * params['radialLength'] + 1
numItersPerCat = edgeLen * edgeLen * numRotations
numCats = self._getNumCategories()
numIters = numItersPerCat * numCats
catIndex = self._getIterCount() // numItersPerCat
index = self._getIterCount() % numItersPerCat
blockIndex = index / numRotations
rotationIndex = index % numRotations
# Compute position within onion block
posnX = ((blockIndex % edgeLen) - params['radialLength']) * params['radialStep']
posnY = ((blockIndex // edgeLen) - params['radialLength']) * params['radialStep']
# Compute rotation angle
angularPosn = params['maxAngularPosn'] - params['minAngularVelocity'] * rotationIndex
# Update state
state['posnX'] = posnX
state['posnY'] = posnY
state['velocityX'] = 0
state['velocityY'] = 0
state['angularVelocity'] = params['minAngularVelocity']
state['angularPosn'] = angularPosn
state['catIndex'] = catIndex
| agpl-3.0 |
kapilt/cloud-custodian | tools/c7n_gcp/c7n_gcp/resources/resource_map.py | 1 | 5770 | ResourceMap = {
"gcp.app-engine": "c7n_gcp.resources.appengine.AppEngineApp",
"gcp.app-engine-certificate": "c7n_gcp.resources.appengine.AppEngineCertificate",
"gcp.app-engine-domain": "c7n_gcp.resources.appengine.AppEngineDomain",
"gcp.app-engine-domain-mapping": "c7n_gcp.resources.appengine.AppEngineDomainMapping",
"gcp.app-engine-firewall-ingress-rule": (
"c7n_gcp.resources.appengine.AppEngineFirewallIngressRule"),
"gcp.autoscaler": "c7n_gcp.resources.compute.Autoscaler",
"gcp.bq-dataset": "c7n_gcp.resources.bigquery.DataSet",
"gcp.bq-job": "c7n_gcp.resources.bigquery.BigQueryJob",
"gcp.bq-project": "c7n_gcp.resources.bigquery.BigQueryProject",
"gcp.bq-table": "c7n_gcp.resources.bigquery.BigQueryTable",
"gcp.bucket": "c7n_gcp.resources.storage.Bucket",
"gcp.build": "c7n_gcp.resources.build.CloudBuild",
"gcp.cloudbilling-account": "c7n_gcp.resources.cloudbilling.CloudBillingAccount",
"gcp.dataflow-job": "c7n_gcp.resources.dataflow.DataflowJob",
"gcp.disk": "c7n_gcp.resources.compute.Disk",
"gcp.dm-deployment": "c7n_gcp.resources.deploymentmanager.DMDeployment",
"gcp.dns-managed-zone": "c7n_gcp.resources.dns.DnsManagedZone",
"gcp.dns-policy": "c7n_gcp.resources.dns.DnsPolicy",
"gcp.firewall": "c7n_gcp.resources.network.Firewall",
"gcp.folder": "c7n_gcp.resources.resourcemanager.Folder",
"gcp.function": "c7n_gcp.resources.function.Function",
"gcp.gke-cluster": "c7n_gcp.resources.gke.KubernetesCluster",
"gcp.gke-nodepool": "c7n_gcp.resources.gke.KubernetesClusterNodePool",
"gcp.iam-role": "c7n_gcp.resources.iam.Role",
"gcp.image": "c7n_gcp.resources.compute.Image",
"gcp.instance": "c7n_gcp.resources.compute.Instance",
"gcp.instance-template": "c7n_gcp.resources.compute.InstanceTemplate",
"gcp.interconnect": "c7n_gcp.resources.network.Interconnect",
"gcp.interconnect-attachment": "c7n_gcp.resources.network.InterconnectAttachment",
"gcp.kms-cryptokey": "c7n_gcp.resources.kms.KmsCryptoKey",
"gcp.kms-cryptokey-version": "c7n_gcp.resources.kms.KmsCryptoKeyVersion",
"gcp.kms-keyring": "c7n_gcp.resources.kms.KmsKeyRing",
"gcp.loadbalancer-address": "c7n_gcp.resources.loadbalancer.LoadBalancingAddress",
"gcp.loadbalancer-backend-bucket": "c7n_gcp.resources.loadbalancer.LoadBalancingBackendBucket",
"gcp.loadbalancer-backend-service": (
"c7n_gcp.resources.loadbalancer.LoadBalancingBackendService"),
"gcp.loadbalancer-forwarding-rule": (
"c7n_gcp.resources.loadbalancer.LoadBalancingForwardingRule"),
"gcp.loadbalancer-global-address": "c7n_gcp.resources.loadbalancer.LoadBalancingGlobalAddress",
"gcp.loadbalancer-global-forwarding-rule": (
"c7n_gcp.resources.loadbalancer.LoadBalancingGlobalForwardingRule"),
"gcp.loadbalancer-health-check": "c7n_gcp.resources.loadbalancer.LoadBalancingHealthCheck",
"gcp.loadbalancer-http-health-check": (
"c7n_gcp.resources.loadbalancer.LoadBalancingHttpHealthCheck"),
"gcp.loadbalancer-https-health-check": (
"c7n_gcp.resources.loadbalancer.LoadBalancingHttpsHealthCheck"),
"gcp.loadbalancer-ssl-certificate": (
"c7n_gcp.resources.loadbalancer.LoadBalancingSslCertificate"),
"gcp.loadbalancer-ssl-policy": "c7n_gcp.resources.loadbalancer.LoadBalancingSslPolicy",
"gcp.loadbalancer-target-http-proxy": (
"c7n_gcp.resources.loadbalancer.LoadBalancingTargetHttpProxy"),
"gcp.loadbalancer-target-https-proxy": (
"c7n_gcp.resources.loadbalancer.LoadBalancingTargetHttpsProxy"),
"gcp.loadbalancer-target-instance": (
"c7n_gcp.resources.loadbalancer.LoadBalancingTargetInstance"),
"gcp.loadbalancer-target-pool": "c7n_gcp.resources.loadbalancer.LoadBalancingTargetPool",
"gcp.loadbalancer-target-ssl-proxy": (
"c7n_gcp.resources.loadbalancer.LoadBalancingTargetSslProxy"),
"gcp.loadbalancer-target-tcp-proxy": (
"c7n_gcp.resources.loadbalancer.LoadBalancingTargetTcpProxy"),
"gcp.loadbalancer-url-map": "c7n_gcp.resources.loadbalancer.LoadBalancingUrlMap",
"gcp.log-exclusion": "c7n_gcp.resources.logging.LogExclusion",
"gcp.log-project-metric": "c7n_gcp.resources.logging.LogProjectMetric",
"gcp.log-project-sink": "c7n_gcp.resources.logging.LogProjectSink",
"gcp.logsink": "c7n_gcp.resources.logging.LogSink",
"gcp.ml-job": "c7n_gcp.resources.mlengine.MLJob",
"gcp.ml-model": "c7n_gcp.resources.mlengine.MLModel",
"gcp.organization": "c7n_gcp.resources.resourcemanager.Organization",
"gcp.project": "c7n_gcp.resources.resourcemanager.Project",
"gcp.project-role": "c7n_gcp.resources.iam.ProjectRole",
"gcp.pubsub-snapshot": "c7n_gcp.resources.pubsub.PubSubSnapshot",
"gcp.pubsub-subscription": "c7n_gcp.resources.pubsub.PubSubSubscription",
"gcp.pubsub-topic": "c7n_gcp.resources.pubsub.PubSubTopic",
"gcp.route": "c7n_gcp.resources.network.Route",
"gcp.router": "c7n_gcp.resources.network.Router",
"gcp.service": "c7n_gcp.resources.service.Service",
"gcp.service-account": "c7n_gcp.resources.iam.ServiceAccount",
"gcp.snapshot": "c7n_gcp.resources.compute.Snapshot",
"gcp.sourcerepo": "c7n_gcp.resources.source.SourceRepository",
"gcp.spanner-database-instance": "c7n_gcp.resources.spanner.SpannerDatabaseInstance",
"gcp.spanner-instance": "c7n_gcp.resources.spanner.SpannerInstance",
"gcp.sql-backup-run": "c7n_gcp.resources.sql.SqlBackupRun",
"gcp.sql-instance": "c7n_gcp.resources.sql.SqlInstance",
"gcp.sql-ssl-cert": "c7n_gcp.resources.sql.SqlSslCert",
"gcp.sql-user": "c7n_gcp.resources.sql.SqlUser",
"gcp.subnet": "c7n_gcp.resources.network.Subnet",
"gcp.vpc": "c7n_gcp.resources.network.Network"
}
| apache-2.0 |
sbesson/snoopycrimecop | test/integration/Sandbox.py | 2 | 5211 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2012-2013 University of Dundee & Open Microscopy Environment
# All Rights Reserved.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
from builtins import str
from builtins import range
from builtins import object
import os
import uuid
import shutil
import logging
import tempfile
from scc.git import get_github, get_token_or_user
from subprocess import Popen
sandbox_url = "https://github.com/ome/snoopys-sandbox.git"
class SandboxTest(object):
def setup_method(self, method):
# Basic logging configuration so if a test fails we can see
# the statements at WARN or ERROR at least.
logging.basicConfig()
self.method = method.__name__
self.cwd = os.getcwd()
self.token = get_token_or_user(local=False)
self.gh = get_github(self.token, dont_ask=True)
self.user = self.gh.get_login()
self.path = tempfile.mkdtemp("", "sandbox-", ".")
self.path = os.path.abspath(self.path)
try:
with open(os.devnull, 'w') as dev_null:
p = Popen(["git", "clone", "-q", sandbox_url, self.path],
stdout=dev_null, stderr=dev_null)
assert p.wait() == 0
self.sandbox = self.gh.git_repo(self.path)
self.origin_remote = "origin"
except Exception:
try:
shutil.rmtree(self.path)
finally:
# Return to cwd regardless.
os.chdir(self.cwd)
raise
# If we succeed, then we change to this dir.
os.chdir(self.path)
def shortDescription(self):
return None
def init_submodules(self):
"""
Fetch submodules after cloning the repository
"""
try:
with open(os.devnull, 'w') as dev_null:
p = Popen(["git", "submodule", "update", "--init"],
stdout=dev_null, stderr=dev_null)
assert p.wait() == 0
except Exception:
os.chdir(self.path)
raise
def uuid(self):
"""
Return a string representing a uuid.uuid4
"""
return str(uuid.uuid4())
def fake_branch(self, head="master", commits=None):
"""
Return a local branch with a list of commits, defaults to a single
commit adding a unique file
"""
name = self.uuid()
if commits is None:
commits = [(name, "hi")]
self.sandbox.new_branch(name, head=head)
for n in range(len(commits)):
fname, txt = commits[n]
fname = os.path.join(self.path, fname)
with open(fname, 'w') as f:
f.write(txt)
self.sandbox.add(fname)
self.sandbox.commit("%d: Writing %s" % (n, name))
self.sandbox.get_status()
return name
def add_remote(self):
"""
Add the remote of the authenticated Github user
"""
if self.user not in self.sandbox.list_remotes():
remote_url = "https://%s:x-oauth-basic@github.com/%s/%s.git" \
% (self.token, self.user, self.sandbox.origin.name)
self.sandbox.add_remote(self.user, remote_url)
def rename_origin_remote(self, new_name):
"""
Rename the remote used for the upstream repository
"""
self.sandbox.call("git", "remote", "rename", self.origin_remote,
new_name)
self.origin_remote = new_name
def push_branch(self, branch):
"""
Push a local branch to GitHub
"""
self.add_remote()
self.sandbox.push_branch(branch, remote=self.user)
def open_pr(self, branch, base, description=None):
"""
Push a local branch and open a PR against the selected base
"""
self.push_branch(branch)
if description is None:
description = ("This is a call to Sandbox.open_pr by %s" %
self.method)
new_pr = self.sandbox.origin.open_pr(
title="test %s" % branch,
description=description,
base=base,
head="%s:%s" % (self.user, branch))
return new_pr
def teardown_method(self, method):
try:
self.sandbox.cleanup()
finally:
try:
shutil.rmtree(self.path)
finally:
# Return to cwd regardless.
os.chdir(self.cwd)
| gpl-2.0 |
koushikcgit/xen | tools/python/xen/xend/server/SrvDmesg.py | 51 | 1767 | #============================================================================
# This library is free software; you can redistribute it and/or
# modify it under the terms of version 2.1 of the GNU Lesser General Public
# License as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
#============================================================================
# Copyright (C) 2004, 2005 Mike Wray <mike.wray@hp.com>
# Copyright (C) 2005 XenSource Ltd
#============================================================================
from xen.xend import XendDmesg
from xen.web.SrvDir import SrvDir
class SrvDmesg(SrvDir):
"""Xen Dmesg output.
"""
def __init__(self):
SrvDir.__init__(self)
self.xd = XendDmesg.instance()
def render_POST(self, req):
self.perform(req)
def render_GET(self, req):
if self.use_sxp(req):
req.setHeader("Content-Type", "text/plain")
req.write(self.info())
else:
req.write('<html><head></head><body>')
self.print_path(req)
req.write('<pre>')
req.write(self.info())
req.write('</pre></body></html>')
def info(self):
return self.xd.info()
def op_clear(self, _1, _2):
self.xd.clear()
return 0
| gpl-2.0 |
isra17/DIE | DIE/UI/ValueViewEx.py | 8 | 9711 |
from PySide import QtGui, QtCore
import idaapi
import idautils
import idc
from idaapi import PluginForm
import DIE.Lib.DIEDb
import DIE.UI.FunctionViewEx
class ValueView(PluginForm):
"""
DIE Value View
"""
def __init__(self):
super(ValueView, self).__init__()
self.die_db = None
self.function_view = None
self.highligthed_items = []
def Show(self):
return PluginForm.Show(self,
"Value View",
options=PluginForm.FORM_PERSIST)
def OnCreate(self, form):
"""
Called when the view is created
"""
self.die_db = DIE.Lib.DIEDb.get_db()
self.function_view = DIE.UI.FunctionViewEx.get_view()
# Get parent widget
self.parent = self.FormToPySideWidget(form)
self.valueModel = QtGui.QStandardItemModel()
self.valueTreeView = QtGui.QTreeView()
self.valueTreeView.setExpandsOnDoubleClick(False)
self.valueTreeView.doubleClicked.connect(self.itemDoubleClickSlot)
self._model_builder(self.valueModel)
self.valueTreeView.setModel(self.valueModel)
# Toolbar
self.value_toolbar = QtGui.QToolBar()
# Value type combobox
type_list = []
if self.die_db:
type_list = self.die_db.get_all_value_types()
type_list.insert(0, "All Values")
self.value_type_combo = QtGui.QComboBox()
self.value_type_combo.addItems(type_list)
self.value_type_combo.activated[str].connect(self.on_value_type_combobox_change)
self.value_type_label = QtGui.QLabel("Value Type: ")
self.value_toolbar.addWidget(self.value_type_label)
self.value_toolbar.addWidget(self.value_type_combo)
# Layout
layout = QtGui.QGridLayout()
layout.addWidget(self.value_toolbar)
layout.addWidget(self.valueTreeView)
self.parent.setLayout(layout)
def isVisible(self):
"""
Is valueview visible
@return: True if visible, otherwise False
"""
try:
return self.valueTreeView.isVisible()
except:
return False
def _model_builder(self, model):
"""
Build the function model.
@param model: QStandardItemModel object
"""
model.clear() # Clear the model
root_node = model.invisibleRootItem()
model.setHorizontalHeaderLabels(("Type", "Score", "Value", "Description", "Raw Value"))
if self.die_db is None:
return
value_list = self.die_db.get_all_values()
for value in value_list:
value_data_item_list = self._make_value_item(value)
root_node.appendRow(value_data_item_list)
def _make_value_type_item(self, type):
"""
Make a value item type
@param type: item type
"""
item_value_type = QtGui.QStandardItem(type)
item_value_type.setEditable(False)
return [item_value_type]
def _make_value_item(self, value):
"""
Make a value model item
@param value: dbParsed_Value object
@return: a list of items for this row.
"""
null_item = QtGui.QStandardItem()
null_item.setEditable(False)
null_item.setData(value.type, role=DIE.UI.ValueType_Role)
null_item.setData(value.__hash__(), role=DIE.UI.Value_Role)
item_value_score = QtGui.QStandardItem(str(value.score))
item_value_score.setEditable(False)
item_value_data = QtGui.QStandardItem(value.data)
ea_list = self.die_db.get_parsed_value_contexts(value)
item_value_data.setData(ea_list, role=DIE.UI.ContextList_Role)
item_value_data.setEditable(False)
item_value_desc = QtGui.QStandardItem(value.description)
item_value_desc.setEditable(False)
item_value_raw = QtGui.QStandardItem(value.raw)
item_value_raw.setEditable(False)
return [null_item, item_value_score, item_value_data, item_value_desc, item_value_raw]
###############################################################################################
# Highlight Items
#
###############################################################################################
def highlight_item(self, item):
"""
Highlight a single item
@param item: module item
"""
try:
item.setBackground(QtCore.Qt.GlobalColor.yellow)
cur_font = item.font()
cur_font.setBold(True)
item.setFont(cur_font)
except Exception as ex:
idaapi.msg("Error while highlighting item: %s\n" % ex)
def highlight_item_row(self, item):
"""
highlight the entire row containing a table item
@param item: table item
"""
try:
if not item.index().isValid():
return
parent = item.parent()
if parent is None:
parent = item
if not parent.hasChildren():
self.highlight_item(parent)
return
row = item.row()
column_num = parent.columnCount()
for column in xrange(0, column_num):
if self.valueModel.hasIndex(row, column, parent.index()):
cur_index = self.valueModel.index(row, column, parent.index())
self.highlight_item(self.valueModel.itemFromIndex(cur_index))
persistent_index = QtCore.QPersistentModelIndex(cur_index)
self.highligthed_items.append(persistent_index)
except Exception as ex:
idaapi.msg("Error while highlighting item row: %s\n" % ex)
def clear_highlights(self):
"""
Clear all highlighted items
@return:
"""
try:
self.valueTreeView.collapseAll()
for persistent_index in self.highligthed_items:
if persistent_index.isValid():
item = self.valueModel.itemFromIndex(persistent_index)
item.setBackground(QtCore.Qt.GlobalColor.white)
cur_font = item.font()
cur_font.setBold(False)
item.setFont(cur_font)
self.highligthed_items = []
except Exception as ex:
idaapi.msg("Error while clearing highlights: %s\n" % ex)
###############################################################################################
# Find Items
#
###############################################################################################
def find_value(self, value):
"""
Find and highlight a function in current module
@param value object (of type dbParsed_Value)
"""
try:
root_index = self.valueModel.index(0, 0)
if not root_index.isValid():
return
matched_items = self.valueModel.match(root_index, DIE.UI.Value_Role, value.__hash__(), -1,
QtCore.Qt.MatchFlag.MatchRecursive | QtCore.Qt.MatchFlag.MatchExactly)
for index in matched_items:
if not index.isValid():
continue
item = self.valueModel.itemFromIndex(index)
self.valueTreeView.expand(index)
self.valueTreeView.scrollTo(index, QtGui.QAbstractItemView.ScrollHint.PositionAtTop)
self.highlight_item_row(item)
except Exception as ex:
idaapi.msg("Error while finding value: %s\n" % ex)
###############################################################################################
# Slots
#
###############################################################################################
@QtCore.Slot(QtCore.QModelIndex)
def itemDoubleClickSlot(self, index):
"""
TreeView DoubleClicked Slot.
@param index: QModelIndex object of the clicked tree index item.
@return:
"""
func_context_list = index.data(role=DIE.UI.ContextList_Role)
try:
if self.function_view is None:
self.function_view = DIE.UI.FunctionViewEx.get_view()
if func_context_list is not None and len(func_context_list) > 0:
if not self.function_view.isVisible():
self.function_view.Show()
self.function_view.find_context_list(func_context_list)
except Exception as ex:
idaapi.msg("Error while loading function view: %s\n" % ex)
def on_value_type_combobox_change(self, value_type):
"""
Value type Combobox item changed slot.
"""
if value_type == "All Values":
if not self.valueTreeView.model() is self.valueModel:
self.valueTreeView.setModel(self.valueModel)
return
valuetypeProxyModel = QtGui.QSortFilterProxyModel()
valuetypeProxyModel.setFilterRole(DIE.UI.ValueType_Role)
valuetypeProxyModel.setFilterRegExp(value_type)
valuetypeProxyModel.setSourceModel(self.valueModel)
self.valueTreeView.setModel(valuetypeProxyModel)
# Singelton
_value_view = None
def initialize():
global _value_view
_value_view = ValueView()
def get_view():
return _value_view | mit |
javachengwc/hue | desktop/core/ext-py/pysaml2-2.4.0/src/saml2/extension/algsupport.py | 35 | 4007 | #!/usr/bin/env python
#
# Generated Sat Mar 8 16:15:12 2014 by parse_xsd.py version 0.5.
#
import saml2
from saml2 import SamlBase
NAMESPACE = 'urn:oasis:names:tc:SAML:metadata:algsupport'
class DigestMethodType_(SamlBase):
"""The urn:oasis:names:tc:SAML:metadata:algsupport:DigestMethodType
element """
c_tag = 'DigestMethodType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Algorithm'] = ('algorithm', 'anyURI', True)
def __init__(self,
algorithm=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.algorithm = algorithm
def digest_method_type__from_string(xml_string):
return saml2.create_class_from_xml_string(DigestMethodType_, xml_string)
class SigningMethodType_(SamlBase):
"""The urn:oasis:names:tc:SAML:metadata:algsupport:SigningMethodType
element """
c_tag = 'SigningMethodType'
c_namespace = NAMESPACE
c_children = SamlBase.c_children.copy()
c_attributes = SamlBase.c_attributes.copy()
c_child_order = SamlBase.c_child_order[:]
c_cardinality = SamlBase.c_cardinality.copy()
c_attributes['Algorithm'] = ('algorithm', 'anyURI', True)
c_attributes['MinKeySize'] = ('min_key_size', 'positiveInteger', False)
c_attributes['MaxKeySize'] = ('max_key_size', 'positiveInteger', False)
def __init__(self,
algorithm=None,
min_key_size=None,
max_key_size=None,
text=None,
extension_elements=None,
extension_attributes=None):
SamlBase.__init__(self,
text=text,
extension_elements=extension_elements,
extension_attributes=extension_attributes)
self.algorithm = algorithm
self.min_key_size = min_key_size
self.max_key_size = max_key_size
def signing_method_type__from_string(xml_string):
return saml2.create_class_from_xml_string(SigningMethodType_, xml_string)
class DigestMethod(DigestMethodType_):
"""The urn:oasis:names:tc:SAML:metadata:algsupport:DigestMethod element """
c_tag = 'DigestMethod'
c_namespace = NAMESPACE
c_children = DigestMethodType_.c_children.copy()
c_attributes = DigestMethodType_.c_attributes.copy()
c_child_order = DigestMethodType_.c_child_order[:]
c_cardinality = DigestMethodType_.c_cardinality.copy()
def digest_method_from_string(xml_string):
return saml2.create_class_from_xml_string(DigestMethod, xml_string)
class SigningMethod(SigningMethodType_):
"""The urn:oasis:names:tc:SAML:metadata:algsupport:SigningMethod element """
c_tag = 'SigningMethod'
c_namespace = NAMESPACE
c_children = SigningMethodType_.c_children.copy()
c_attributes = SigningMethodType_.c_attributes.copy()
c_child_order = SigningMethodType_.c_child_order[:]
c_cardinality = SigningMethodType_.c_cardinality.copy()
def signing_method_from_string(xml_string):
return saml2.create_class_from_xml_string(SigningMethod, xml_string)
ELEMENT_FROM_STRING = {
DigestMethod.c_tag: digest_method_from_string,
DigestMethodType_.c_tag: digest_method_type__from_string,
SigningMethod.c_tag: signing_method_from_string,
SigningMethodType_.c_tag: signing_method_type__from_string,
}
ELEMENT_BY_TAG = {
'DigestMethod': DigestMethod,
'DigestMethodType': DigestMethodType_,
'SigningMethod': SigningMethod,
'SigningMethodType': SigningMethodType_,
}
def factory(tag, **kwargs):
return ELEMENT_BY_TAG[tag](**kwargs)
| apache-2.0 |
dimagi/rapidsms-core | lib/pygsm/errors.py | 65 | 3781 | #!/usr/bin/env python
# vim: ai ts=4 sts=4 et sw=4
import serial
class GsmError(serial.SerialException):
pass
class GsmIOError(GsmError):
pass
class GsmWriteError(GsmIOError):
pass
class GsmReadError(GsmIOError):
pass
class GsmReadTimeoutError(GsmReadError):
def __init__(self, pending_data):
self.pending_data = pending_data
class GsmModemError(GsmError):
STRINGS = {
"CME": {
3: "Operation not allowed",
4: "Operation not supported",
5: "PH-SIM PIN required (SIM lock)",
10: "SIM not inserted",
11: "SIM PIN required",
12: "SIM PUK required",
13: "SIM failure",
16: "Incorrect password",
17: "SIM PIN2 required",
18: "SIM PUK2 required",
20: "Memory full",
21: "Invalid index",
22: "Not found",
24: "Text string too long",
26: "Dial string too long",
27: "Invalid characters in dial string",
30: "No network service",
32: "Network not allowed. Emergency calls only",
40: "Network personal PIN required (Network lock)",
103: "Illegal MS (#3)",
106: "Illegal ME (#6)",
107: "GPRS services not allowed",
111: "PLMN not allowed",
112: "Location area not allowed",
113: "Roaming not allowed in this area",
132: "Service option not supported",
133: "Requested service option not subscribed",
134: "Service option temporarily out of order",
148: "unspecified GPRS error",
149: "PDP authentication failure",
150: "Invalid mobile class" },
"CMS": {
021: "Call Rejected (out of credit?)",
301: "SMS service of ME reserved",
302: "Operation not allowed",
303: "Operation not supported",
304: "Invalid PDU mode parameter",
305: "Invalid text mode parameter",
310: "SIM not inserted",
311: "SIM PIN required",
312: "PH-SIM PIN required",
313: "SIM failure",
316: "SIM PUK required",
317: "SIM PIN2 required",
318: "SIM PUK2 required",
321: "Invalid memory index",
322: "SIM memory full",
330: "SC address unknown",
340: "No +CNMA acknowledgement expected",
500: "Unknown error",
512: "MM establishment failure (for SMS)",
513: "Lower layer failure (for SMS)",
514: "CP error (for SMS)",
515: "Please wait, init or command processing in progress",
517: "SIM Toolkit facility not supported",
518: "SIM Toolkit indication not received",
519: "Reset product to activate or change new echo cancellation algo",
520: "Automatic abort about get PLMN list for an incomming call",
526: "PIN deactivation forbidden with this SIM card",
527: "Please wait, RR or MM is busy. Retry your selection later",
528: "Location update failure. Emergency calls only",
529: "PLMN selection failure. Emergency calls only",
531: "SMS not send: the <da> is not in FDN phonebook, and FDN lock is enabled (for SMS)" }}
def __init__(self, type=None, code=None):
self.type = type
self.code = code
def __str__(self):
if self.type and self.code:
return "%s ERROR %d: %s" % (
self.type, self.code,
self.STRINGS[self.type][self.code])
# no type and/or code were provided
else: return "Unknown GSM Error"
| lgpl-3.0 |
dgiunchi/googletest | test/gtest_color_test.py | 3259 | 4911 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Verifies that Google Test correctly determines whether to use colors."""
__author__ = 'wan@google.com (Zhanyong Wan)'
import os
import gtest_test_utils
IS_WINDOWS = os.name = 'nt'
COLOR_ENV_VAR = 'GTEST_COLOR'
COLOR_FLAG = 'gtest_color'
COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_')
def SetEnvVar(env_var, value):
"""Sets the env variable to 'value'; unsets it when 'value' is None."""
if value is not None:
os.environ[env_var] = value
elif env_var in os.environ:
del os.environ[env_var]
def UsesColor(term, color_env_var, color_flag):
"""Runs gtest_color_test_ and returns its exit code."""
SetEnvVar('TERM', term)
SetEnvVar(COLOR_ENV_VAR, color_env_var)
if color_flag is None:
args = []
else:
args = ['--%s=%s' % (COLOR_FLAG, color_flag)]
p = gtest_test_utils.Subprocess([COMMAND] + args)
return not p.exited or p.exit_code
class GTestColorTest(gtest_test_utils.TestCase):
def testNoEnvVarNoFlag(self):
"""Tests the case when there's neither GTEST_COLOR nor --gtest_color."""
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', None, None))
self.assert_(not UsesColor('emacs', None, None))
self.assert_(not UsesColor('xterm-mono', None, None))
self.assert_(not UsesColor('unknown', None, None))
self.assert_(not UsesColor(None, None, None))
self.assert_(UsesColor('linux', None, None))
self.assert_(UsesColor('cygwin', None, None))
self.assert_(UsesColor('xterm', None, None))
self.assert_(UsesColor('xterm-color', None, None))
self.assert_(UsesColor('xterm-256color', None, None))
def testFlagOnly(self):
"""Tests the case when there's --gtest_color but not GTEST_COLOR."""
self.assert_(not UsesColor('dumb', None, 'no'))
self.assert_(not UsesColor('xterm-color', None, 'no'))
if not IS_WINDOWS:
self.assert_(not UsesColor('emacs', None, 'auto'))
self.assert_(UsesColor('xterm', None, 'auto'))
self.assert_(UsesColor('dumb', None, 'yes'))
self.assert_(UsesColor('xterm', None, 'yes'))
def testEnvVarOnly(self):
"""Tests the case when there's GTEST_COLOR but not --gtest_color."""
self.assert_(not UsesColor('dumb', 'no', None))
self.assert_(not UsesColor('xterm-color', 'no', None))
if not IS_WINDOWS:
self.assert_(not UsesColor('dumb', 'auto', None))
self.assert_(UsesColor('xterm-color', 'auto', None))
self.assert_(UsesColor('dumb', 'yes', None))
self.assert_(UsesColor('xterm-color', 'yes', None))
def testEnvVarAndFlag(self):
"""Tests the case when there are both GTEST_COLOR and --gtest_color."""
self.assert_(not UsesColor('xterm-color', 'no', 'no'))
self.assert_(UsesColor('dumb', 'no', 'yes'))
self.assert_(UsesColor('xterm-color', 'no', 'auto'))
def testAliasesOfYesAndNo(self):
"""Tests using aliases in specifying --gtest_color."""
self.assert_(UsesColor('dumb', None, 'true'))
self.assert_(UsesColor('dumb', None, 'YES'))
self.assert_(UsesColor('dumb', None, 'T'))
self.assert_(UsesColor('dumb', None, '1'))
self.assert_(not UsesColor('xterm', None, 'f'))
self.assert_(not UsesColor('xterm', None, 'false'))
self.assert_(not UsesColor('xterm', None, '0'))
self.assert_(not UsesColor('xterm', None, 'unknown'))
if __name__ == '__main__':
gtest_test_utils.Main()
| bsd-3-clause |
ojengwa/odoo | addons/point_of_sale/account_bank_statement.py | 313 | 2159 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2008 PC Solutions (<http://pcsol.be>). All Rights Reserved
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_journal(osv.osv):
_inherit = 'account.journal'
_columns = {
'journal_user': fields.boolean('PoS Payment Method', help="Check this box if this journal define a payment method that can be used in point of sales."),
'amount_authorized_diff' : fields.float('Amount Authorized Difference', help="This field depicts the maximum difference allowed between the ending balance and the theorical cash when closing a session, for non-POS managers. If this maximum is reached, the user will have an error message at the closing of his session saying that he needs to contact his manager."),
'self_checkout_payment_method' : fields.boolean('Self Checkout Payment Method'), #FIXME : this field is obsolete
}
_defaults = {
'self_checkout_payment_method' : False,
}
class account_cash_statement(osv.osv):
_inherit = 'account.bank.statement'
_columns = {
'pos_session_id' : fields.many2one('pos.session', copy=False),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
odoomrp/odoomrp-wip | mrp_operations_rejected_quantity/models/operation_time_line.py | 8 | 1038 | # -*- coding: utf-8 -*-
# (c) 2016 Alfredo de la Fuente - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
from openerp import models, fields, api
class OperationTimeLine(models.Model):
_inherit = 'operation.time.line'
@api.depends('accepted_amount', 'rejected_amount')
@api.multi
def _compute_total_amount(self):
for line in self:
line.total_amount = line.accepted_amount + line.rejected_amount
employee_id = fields.Many2one(
comodel_name='hr.employee', string='Employee', readonly=True)
accepted_amount = fields.Integer(
string='Accepted amount', default=0)
rejected_amount = fields.Integer(
string='Rejected amount', default=0)
total_amount = fields.Integer(
string='Total amount', default=0, compute='_compute_total_amount')
state = fields.Selection(
[('pending', 'Pending'),
('processed', 'Processed'),
('canceled', 'Canceled')
], string="State", default='pending', required=True)
| agpl-3.0 |
coreynicholson/youtube-dl | youtube_dl/extractor/tube8.py | 48 | 2892 | from __future__ import unicode_literals
import re
from ..utils import (
int_or_none,
str_to_int,
)
from .keezmovies import KeezMoviesIE
class Tube8IE(KeezMoviesIE):
_VALID_URL = r'https?://(?:www\.)?tube8\.com/(?:[^/]+/)+(?P<display_id>[^/]+)/(?P<id>\d+)'
_TESTS = [{
'url': 'http://www.tube8.com/teen/kasia-music-video/229795/',
'md5': '65e20c48e6abff62ed0c3965fff13a39',
'info_dict': {
'id': '229795',
'display_id': 'kasia-music-video',
'ext': 'mp4',
'description': 'hot teen Kasia grinding',
'uploader': 'unknown',
'title': 'Kasia music video',
'age_limit': 18,
'duration': 230,
'categories': ['Teen'],
'tags': ['dancing'],
},
}, {
'url': 'http://www.tube8.com/shemale/teen/blonde-cd-gets-kidnapped-by-two-blacks-and-punished-for-being-a-slutty-girl/19569151/',
'only_matching': True,
}]
def _real_extract(self, url):
webpage, info = self._extract_info(url)
if not info['title']:
info['title'] = self._html_search_regex(
r'videoTitle\s*=\s*"([^"]+)', webpage, 'title')
description = self._html_search_regex(
r'>Description:</strong>\s*(.+?)\s*<', webpage, 'description', fatal=False)
uploader = self._html_search_regex(
r'<span class="username">\s*(.+?)\s*<',
webpage, 'uploader', fatal=False)
like_count = int_or_none(self._search_regex(
r'rupVar\s*=\s*"(\d+)"', webpage, 'like count', fatal=False))
dislike_count = int_or_none(self._search_regex(
r'rdownVar\s*=\s*"(\d+)"', webpage, 'dislike count', fatal=False))
view_count = str_to_int(self._search_regex(
r'<strong>Views: </strong>([\d,\.]+)\s*</li>',
webpage, 'view count', fatal=False))
comment_count = str_to_int(self._search_regex(
r'<span id="allCommentsCount">(\d+)</span>',
webpage, 'comment count', fatal=False))
category = self._search_regex(
r'Category:\s*</strong>\s*<a[^>]+href=[^>]+>([^<]+)',
webpage, 'category', fatal=False)
categories = [category] if category else None
tags_str = self._search_regex(
r'(?s)Tags:\s*</strong>(.+?)</(?!a)',
webpage, 'tags', fatal=False)
tags = [t for t in re.findall(
r'<a[^>]+href=[^>]+>([^<]+)', tags_str)] if tags_str else None
info.update({
'description': description,
'uploader': uploader,
'view_count': view_count,
'like_count': like_count,
'dislike_count': dislike_count,
'comment_count': comment_count,
'categories': categories,
'tags': tags,
})
return info
| unlicense |
andersonsilvade/python_C | Python32/Tools/pynche/ListViewer.py | 116 | 6648 | """ListViewer class.
This class implements an input/output view on the color model. It lists every
unique color (e.g. unique r/g/b value) found in the color database. Each
color is shown by small swatch and primary color name. Some colors have
aliases -- more than one name for the same r/g/b value. These aliases are
displayed in the small listbox at the bottom of the screen.
Clicking on a color name or swatch selects that color and updates all other
windows. When a color is selected in a different viewer, the color list is
scrolled to the selected color and it is highlighted. If the selected color
is an r/g/b value without a name, no scrolling occurs.
You can turn off Update On Click if all you want to see is the alias for a
given name, without selecting the color.
"""
from tkinter import *
import ColorDB
ADDTOVIEW = 'Color %List Window...'
class ListViewer:
def __init__(self, switchboard, master=None):
self.__sb = switchboard
optiondb = switchboard.optiondb()
self.__lastbox = None
self.__dontcenter = 0
# GUI
root = self.__root = Toplevel(master, class_='Pynche')
root.protocol('WM_DELETE_WINDOW', self.withdraw)
root.title('Pynche Color List')
root.iconname('Pynche Color List')
root.bind('<Alt-q>', self.__quit)
root.bind('<Alt-Q>', self.__quit)
root.bind('<Alt-w>', self.withdraw)
root.bind('<Alt-W>', self.withdraw)
#
# create the canvas which holds everything, and its scrollbar
#
frame = self.__frame = Frame(root)
frame.pack()
canvas = self.__canvas = Canvas(frame, width=160, height=300,
borderwidth=2, relief=SUNKEN)
self.__scrollbar = Scrollbar(frame)
self.__scrollbar.pack(fill=Y, side=RIGHT)
canvas.pack(fill=BOTH, expand=1)
canvas.configure(yscrollcommand=(self.__scrollbar, 'set'))
self.__scrollbar.configure(command=(canvas, 'yview'))
self.__populate()
#
# Update on click
self.__uoc = BooleanVar()
self.__uoc.set(optiondb.get('UPONCLICK', 1))
self.__uocbtn = Checkbutton(root,
text='Update on Click',
variable=self.__uoc,
command=self.__toggleupdate)
self.__uocbtn.pack(expand=1, fill=BOTH)
#
# alias list
self.__alabel = Label(root, text='Aliases:')
self.__alabel.pack()
self.__aliases = Listbox(root, height=5,
selectmode=BROWSE)
self.__aliases.pack(expand=1, fill=BOTH)
def __populate(self):
#
# create all the buttons
colordb = self.__sb.colordb()
canvas = self.__canvas
row = 0
widest = 0
bboxes = self.__bboxes = []
for name in colordb.unique_names():
exactcolor = ColorDB.triplet_to_rrggbb(colordb.find_byname(name))
canvas.create_rectangle(5, row*20 + 5,
20, row*20 + 20,
fill=exactcolor)
textid = canvas.create_text(25, row*20 + 13,
text=name,
anchor=W)
x1, y1, textend, y2 = canvas.bbox(textid)
boxid = canvas.create_rectangle(3, row*20+3,
textend+3, row*20 + 23,
outline='',
tags=(exactcolor, 'all'))
canvas.bind('<ButtonRelease>', self.__onrelease)
bboxes.append(boxid)
if textend+3 > widest:
widest = textend+3
row += 1
canvheight = (row-1)*20 + 25
canvas.config(scrollregion=(0, 0, 150, canvheight))
for box in bboxes:
x1, y1, x2, y2 = canvas.coords(box)
canvas.coords(box, x1, y1, widest, y2)
def __onrelease(self, event=None):
canvas = self.__canvas
# find the current box
x = canvas.canvasx(event.x)
y = canvas.canvasy(event.y)
ids = canvas.find_overlapping(x, y, x, y)
for boxid in ids:
if boxid in self.__bboxes:
break
else:
## print 'No box found!'
return
tags = self.__canvas.gettags(boxid)
for t in tags:
if t[0] == '#':
break
else:
## print 'No color tag found!'
return
red, green, blue = ColorDB.rrggbb_to_triplet(t)
self.__dontcenter = 1
if self.__uoc.get():
self.__sb.update_views(red, green, blue)
else:
self.update_yourself(red, green, blue)
self.__red, self.__green, self.__blue = red, green, blue
def __toggleupdate(self, event=None):
if self.__uoc.get():
self.__sb.update_views(self.__red, self.__green, self.__blue)
def __quit(self, event=None):
self.__root.quit()
def withdraw(self, event=None):
self.__root.withdraw()
def deiconify(self, event=None):
self.__root.deiconify()
def update_yourself(self, red, green, blue):
canvas = self.__canvas
# turn off the last box
if self.__lastbox:
canvas.itemconfigure(self.__lastbox, outline='')
# turn on the current box
colortag = ColorDB.triplet_to_rrggbb((red, green, blue))
canvas.itemconfigure(colortag, outline='black')
self.__lastbox = colortag
# fill the aliases
self.__aliases.delete(0, END)
try:
aliases = self.__sb.colordb().aliases_of(red, green, blue)[1:]
except ColorDB.BadColor:
self.__aliases.insert(END, '<no matching color>')
return
if not aliases:
self.__aliases.insert(END, '<no aliases>')
else:
for name in aliases:
self.__aliases.insert(END, name)
# maybe scroll the canvas so that the item is visible
if self.__dontcenter:
self.__dontcenter = 0
else:
ig, ig, ig, y1 = canvas.coords(colortag)
ig, ig, ig, y2 = canvas.coords(self.__bboxes[-1])
h = int(canvas['height']) * 0.5
canvas.yview('moveto', (y1-h) / y2)
def save_options(self, optiondb):
optiondb['UPONCLICK'] = self.__uoc.get()
def colordb_changed(self, colordb):
self.__canvas.delete('all')
self.__populate()
| mit |
pjgaudre/2016-IPSW-500px | Def500.py | 1 | 3315 | # -*- coding: utf-8 -*-
"""
Created on Wed Aug 17 10:26:55 2016
@author: davidc
"""
#!/usr/bin/python
import numpy as np
import pylab #image showing, an apendix to matplotlib
from PIL import Image, ImageChops
import pandas as pd #data package like read csv
import os
#import matplotlib.pyplot as plt #image showing
# import exifread # https://pypi.python.org/pypi/ExifRead
#import pytesseract # https://pypi.python.org/pypi/pytesseract
# https://github.com/tesseract-ocr/tesseract/wiki
'''
These pictures are duplicates
#1343 train/21556793.jpeg
#1594 train/19990265.jpeg
#3410 train/19028923.jpeg
'''
#set working directory
os.chdir('/Users/davidc/Desktop/Research_Tools/2016_IPSW_500px')
def Photo_id_2_index(photo_id,train):
return train['photo_id'][train['photo_id']==photo_id].index[0]
def ImagToJpeg(index,train):
'''
Function ImagToJpeg opens the jpeg:
INPUTS:
index: which picture do you want? 0, 1, 2, ...., 3677
'''
path = 'dataset/'+ train['image_path'][index]
img_jpg = Image.open(open(path,'rb'))
return img_jpg
def JpegToArray(img_jpg):
'''
Function JpegToArray converts the jpeg to an array:
INPUTS:
index: file in a jpg image type
'''
img_arry = np.asarray(img_jpg, dtype='float64') # Converts Jpeg to Array
return img_arry
def ViewArray(img_arry):
'''
Function ViewImg outputs the image:
INPUTS:
img: array returned from JpegToArray
'''
pylab.imshow(img_arry)
def rgb2grey(rbg):
'''
Function rgb2grey converts an array image to greyscale
INPUTS:
index: img_array
'''
return np.dot(rbg[...,:3], [0.299, 0.587, 0.114])
def ViewImagIndex(index,train):
'''
Function ViewImagIndex give the index
INPUTS:
index:
'''
View_img_jpg = ImagToJpeg(index,train)
pylab.imshow(View_img_jpg)
def Is_there_a_border(index,train):
'''
Function ViewImagIndex give the index
INPUTS:
index:
'''
im = ImagToJpeg(index,train)
bg = Image.new(im.mode, im.size, im.getpixel((0,0)))
diff = ImageChops.difference(im, bg)
# diff = ImageChops.add(diff, diff, 2.0, -50)
diff = ImageChops.add(diff, diff, 2.0, -int(np.percentile(JpegToArray(diff),25)))
bbox = diff.getbbox()
LB = not (bbox[1] == 0)
RB = not (bbox[3] == im.size[1])
TB = not (bbox[0] == 0)
BB = not (bbox[2] == im.size[1])
borders = ( (LB and RB) ) # or (TB and BB) or (LB and TB) or (LB and BB) or (RB and TB) or (RB and BB)
return borders
def Is_there_a_border_vec(index,train):
'''
Function Is_there_a_border_vec puts into a vector
INPUTS:
index:
'''
temp = np.zeros(index.size)
for i in index:
temp[i] = Is_there_a_border(i,train)
return temp
#Is_there_a_border_vec = np.vectorize(Is_there_a_border, excluded=['train'])
def CompBordList(Predicted_Border):
'''
Function CompBordList gives photo_id if label is true border
INPUTS:
Predicted_Border: algorithm chosen pics with borders
'''
BenMar3col = pd.read_csv('train_borders3.csv')
BM_border = BenMar3col['photo_id'][BenMar3col['label']==2]
BM_border = BM_border.values
return BM_border
| mit |
dylan-reeves/home_backup | backupclient-env/Lib/hmac.py | 142 | 5063 | """HMAC (Keyed-Hashing for Message Authentication) Python module.
Implements the HMAC algorithm as described by RFC 2104.
"""
import warnings as _warnings
from _operator import _compare_digest as compare_digest
import hashlib as _hashlib
trans_5C = bytes((x ^ 0x5C) for x in range(256))
trans_36 = bytes((x ^ 0x36) for x in range(256))
# The size of the digests returned by HMAC depends on the underlying
# hashing module used. Use digest_size from the instance of HMAC instead.
digest_size = None
class HMAC:
"""RFC 2104 HMAC class. Also complies with RFC 4231.
This supports the API for Cryptographic Hash Functions (PEP 247).
"""
blocksize = 64 # 512-bit HMAC; can be changed in subclasses.
def __init__(self, key, msg = None, digestmod = None):
"""Create a new HMAC object.
key: key for the keyed hash object.
msg: Initial input for the hash, if provided.
digestmod: A module supporting PEP 247. *OR*
A hashlib constructor returning a new hash object. *OR*
A hash name suitable for hashlib.new().
Defaults to hashlib.md5.
Implicit default to hashlib.md5 is deprecated and will be
removed in Python 3.6.
Note: key and msg must be a bytes or bytearray objects.
"""
if not isinstance(key, (bytes, bytearray)):
raise TypeError("key: expected bytes or bytearray, but got %r" % type(key).__name__)
if digestmod is None:
_warnings.warn("HMAC() without an explicit digestmod argument "
"is deprecated.", PendingDeprecationWarning, 2)
digestmod = _hashlib.md5
if callable(digestmod):
self.digest_cons = digestmod
elif isinstance(digestmod, str):
self.digest_cons = lambda d=b'': _hashlib.new(digestmod, d)
else:
self.digest_cons = lambda d=b'': digestmod.new(d)
self.outer = self.digest_cons()
self.inner = self.digest_cons()
self.digest_size = self.inner.digest_size
if hasattr(self.inner, 'block_size'):
blocksize = self.inner.block_size
if blocksize < 16:
_warnings.warn('block_size of %d seems too small; using our '
'default of %d.' % (blocksize, self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
else:
_warnings.warn('No block_size attribute on given digest object; '
'Assuming %d.' % (self.blocksize),
RuntimeWarning, 2)
blocksize = self.blocksize
# self.blocksize is the default blocksize. self.block_size is
# effective block size as well as the public API attribute.
self.block_size = blocksize
if len(key) > blocksize:
key = self.digest_cons(key).digest()
key = key + bytes(blocksize - len(key))
self.outer.update(key.translate(trans_5C))
self.inner.update(key.translate(trans_36))
if msg is not None:
self.update(msg)
@property
def name(self):
return "hmac-" + self.inner.name
def update(self, msg):
"""Update this hashing object with the string msg.
"""
self.inner.update(msg)
def copy(self):
"""Return a separate copy of this hashing object.
An update to this copy won't affect the original object.
"""
# Call __new__ directly to avoid the expensive __init__.
other = self.__class__.__new__(self.__class__)
other.digest_cons = self.digest_cons
other.digest_size = self.digest_size
other.inner = self.inner.copy()
other.outer = self.outer.copy()
return other
def _current(self):
"""Return a hash object for the current state.
To be used only internally with digest() and hexdigest().
"""
h = self.outer.copy()
h.update(self.inner.digest())
return h
def digest(self):
"""Return the hash value of this hashing object.
This returns a string containing 8-bit data. The object is
not altered in any way by this function; you can continue
updating the object after calling this function.
"""
h = self._current()
return h.digest()
def hexdigest(self):
"""Like digest(), but returns a string of hexadecimal digits instead.
"""
h = self._current()
return h.hexdigest()
def new(key, msg = None, digestmod = None):
"""Create a new hashing object and return it.
key: The starting key for the hash.
msg: if available, will immediately be hashed into the object's starting
state.
You can now feed arbitrary strings into the object using its update()
method, and can ask for the hash value at any time by calling its digest()
method.
"""
return HMAC(key, msg, digestmod)
| gpl-3.0 |
andreparrish/python-for-android | python-build/python-libs/gdata/build/lib/gdata/data.py | 133 | 35748 | #!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
"""Provides classes and constants for the XML in the Google Data namespace.
Documentation for the raw XML which these classes represent can be found here:
http://code.google.com/apis/gdata/docs/2.0/elements.html
"""
__author__ = 'j.s@google.com (Jeff Scudder)'
import atom.core
import atom.data
GDATA_TEMPLATE = '{http://schemas.google.com/g/2005}%s'
OPENSEARCH_TEMPLATE = '{http://a9.com/-/spec/opensearchrss/1.0/}%s'
BATCH_TEMPLATE = '{http://schemas.google.com/gdata/batch}%s'
# Labels used in batch request entries to specify the desired CRUD operation.
BATCH_INSERT = 'insert'
BATCH_UPDATE = 'update'
BATCH_DELETE = 'delete'
BATCH_QUERY = 'query'
EVENT_LOCATION = 'http://schemas.google.com/g/2005#event'
ALTERNATE_LOCATION = 'http://schemas.google.com/g/2005#event.alternate'
PARKING_LOCATION = 'http://schemas.google.com/g/2005#event.parking'
CANCELED_EVENT = 'http://schemas.google.com/g/2005#event.canceled'
CONFIRMED_EVENT = 'http://schemas.google.com/g/2005#event.confirmed'
TENTATIVE_EVENT = 'http://schemas.google.com/g/2005#event.tentative'
CONFIDENTIAL_EVENT = 'http://schemas.google.com/g/2005#event.confidential'
DEFAULT_EVENT = 'http://schemas.google.com/g/2005#event.default'
PRIVATE_EVENT = 'http://schemas.google.com/g/2005#event.private'
PUBLIC_EVENT = 'http://schemas.google.com/g/2005#event.public'
OPAQUE_EVENT = 'http://schemas.google.com/g/2005#event.opaque'
TRANSPARENT_EVENT = 'http://schemas.google.com/g/2005#event.transparent'
CHAT_MESSAGE = 'http://schemas.google.com/g/2005#message.chat'
INBOX_MESSAGE = 'http://schemas.google.com/g/2005#message.inbox'
SENT_MESSAGE = 'http://schemas.google.com/g/2005#message.sent'
SPAM_MESSAGE = 'http://schemas.google.com/g/2005#message.spam'
STARRED_MESSAGE = 'http://schemas.google.com/g/2005#message.starred'
UNREAD_MESSAGE = 'http://schemas.google.com/g/2005#message.unread'
BCC_RECIPIENT = 'http://schemas.google.com/g/2005#message.bcc'
CC_RECIPIENT = 'http://schemas.google.com/g/2005#message.cc'
SENDER = 'http://schemas.google.com/g/2005#message.from'
REPLY_TO = 'http://schemas.google.com/g/2005#message.reply-to'
TO_RECIPIENT = 'http://schemas.google.com/g/2005#message.to'
ASSISTANT_REL = 'http://schemas.google.com/g/2005#assistant'
CALLBACK_REL = 'http://schemas.google.com/g/2005#callback'
CAR_REL = 'http://schemas.google.com/g/2005#car'
COMPANY_MAIN_REL = 'http://schemas.google.com/g/2005#company_main'
FAX_REL = 'http://schemas.google.com/g/2005#fax'
HOME_REL = 'http://schemas.google.com/g/2005#home'
HOME_FAX_REL = 'http://schemas.google.com/g/2005#home_fax'
ISDN_REL = 'http://schemas.google.com/g/2005#isdn'
MAIN_REL = 'http://schemas.google.com/g/2005#main'
MOBILE_REL = 'http://schemas.google.com/g/2005#mobile'
OTHER_REL = 'http://schemas.google.com/g/2005#other'
OTHER_FAX_REL = 'http://schemas.google.com/g/2005#other_fax'
PAGER_REL = 'http://schemas.google.com/g/2005#pager'
RADIO_REL = 'http://schemas.google.com/g/2005#radio'
TELEX_REL = 'http://schemas.google.com/g/2005#telex'
TTL_TDD_REL = 'http://schemas.google.com/g/2005#tty_tdd'
WORK_REL = 'http://schemas.google.com/g/2005#work'
WORK_FAX_REL = 'http://schemas.google.com/g/2005#work_fax'
WORK_MOBILE_REL = 'http://schemas.google.com/g/2005#work_mobile'
WORK_PAGER_REL = 'http://schemas.google.com/g/2005#work_pager'
NETMEETING_REL = 'http://schemas.google.com/g/2005#netmeeting'
OVERALL_REL = 'http://schemas.google.com/g/2005#overall'
PRICE_REL = 'http://schemas.google.com/g/2005#price'
QUALITY_REL = 'http://schemas.google.com/g/2005#quality'
EVENT_REL = 'http://schemas.google.com/g/2005#event'
EVENT_ALTERNATE_REL = 'http://schemas.google.com/g/2005#event.alternate'
EVENT_PARKING_REL = 'http://schemas.google.com/g/2005#event.parking'
AIM_PROTOCOL = 'http://schemas.google.com/g/2005#AIM'
MSN_PROTOCOL = 'http://schemas.google.com/g/2005#MSN'
YAHOO_MESSENGER_PROTOCOL = 'http://schemas.google.com/g/2005#YAHOO'
SKYPE_PROTOCOL = 'http://schemas.google.com/g/2005#SKYPE'
QQ_PROTOCOL = 'http://schemas.google.com/g/2005#QQ'
GOOGLE_TALK_PROTOCOL = 'http://schemas.google.com/g/2005#GOOGLE_TALK'
ICQ_PROTOCOL = 'http://schemas.google.com/g/2005#ICQ'
JABBER_PROTOCOL = 'http://schemas.google.com/g/2005#JABBER'
REGULAR_COMMENTS = 'http://schemas.google.com/g/2005#regular'
REVIEW_COMMENTS = 'http://schemas.google.com/g/2005#reviews'
MAIL_BOTH = 'http://schemas.google.com/g/2005#both'
MAIL_LETTERS = 'http://schemas.google.com/g/2005#letters'
MAIL_PARCELS = 'http://schemas.google.com/g/2005#parcels'
MAIL_NEITHER = 'http://schemas.google.com/g/2005#neither'
GENERAL_ADDRESS = 'http://schemas.google.com/g/2005#general'
LOCAL_ADDRESS = 'http://schemas.google.com/g/2005#local'
OPTIONAL_ATENDEE = 'http://schemas.google.com/g/2005#event.optional'
REQUIRED_ATENDEE = 'http://schemas.google.com/g/2005#event.required'
ATTENDEE_ACCEPTED = 'http://schemas.google.com/g/2005#event.accepted'
ATTENDEE_DECLINED = 'http://schemas.google.com/g/2005#event.declined'
ATTENDEE_INVITED = 'http://schemas.google.com/g/2005#event.invited'
ATTENDEE_TENTATIVE = 'http://schemas.google.com/g/2005#event.tentative'
class Error(Exception):
pass
class MissingRequiredParameters(Error):
pass
class LinkFinder(atom.data.LinkFinder):
"""Mixin used in Feed and Entry classes to simplify link lookups by type.
Provides lookup methods for edit, edit-media, post, ACL and other special
links which are common across Google Data APIs.
"""
def find_html_link(self):
"""Finds the first link with rel of alternate and type of text/html."""
for link in self.link:
if link.rel == 'alternate' and link.type == 'text/html':
return link.href
return None
FindHtmlLink = find_html_link
def get_html_link(self):
for a_link in self.link:
if a_link.rel == 'alternate' and a_link.type == 'text/html':
return a_link
return None
GetHtmlLink = get_html_link
def find_post_link(self):
"""Get the URL to which new entries should be POSTed.
The POST target URL is used to insert new entries.
Returns:
A str for the URL in the link with a rel matching the POST type.
"""
return self.find_url('http://schemas.google.com/g/2005#post')
FindPostLink = find_post_link
def get_post_link(self):
return self.get_link('http://schemas.google.com/g/2005#post')
GetPostLink = get_post_link
def find_acl_link(self):
return self.find_url(
'http://schemas.google.com/acl/2007#accessControlList')
FindAclLink = find_acl_link
def get_acl_link(self):
return self.get_link(
'http://schemas.google.com/acl/2007#accessControlList')
GetAclLink = get_acl_link
def find_feed_link(self):
return self.find_url('http://schemas.google.com/g/2005#feed')
FindFeedLink = find_feed_link
def get_feed_link(self):
return self.get_link('http://schemas.google.com/g/2005#feed')
GetFeedLink = get_feed_link
def find_previous_link(self):
return self.find_url('previous')
FindPreviousLink = find_previous_link
def get_previous_link(self):
return self.get_link('previous')
GetPreviousLink = get_previous_link
class TotalResults(atom.core.XmlElement):
"""opensearch:TotalResults for a GData feed."""
_qname = OPENSEARCH_TEMPLATE % 'totalResults'
class StartIndex(atom.core.XmlElement):
"""The opensearch:startIndex element in GData feed."""
_qname = OPENSEARCH_TEMPLATE % 'startIndex'
class ItemsPerPage(atom.core.XmlElement):
"""The opensearch:itemsPerPage element in GData feed."""
_qname = OPENSEARCH_TEMPLATE % 'itemsPerPage'
class ExtendedProperty(atom.core.XmlElement):
"""The Google Data extendedProperty element.
Used to store arbitrary key-value information specific to your
application. The value can either be a text string stored as an XML
attribute (.value), or an XML node (XmlBlob) as a child element.
This element is used in the Google Calendar data API and the Google
Contacts data API.
"""
_qname = GDATA_TEMPLATE % 'extendedProperty'
name = 'name'
value = 'value'
def get_xml_blob(self):
"""Returns the XML blob as an atom.core.XmlElement.
Returns:
An XmlElement representing the blob's XML, or None if no
blob was set.
"""
if self._other_elements:
return self._other_elements[0]
else:
return None
GetXmlBlob = get_xml_blob
def set_xml_blob(self, blob):
"""Sets the contents of the extendedProperty to XML as a child node.
Since the extendedProperty is only allowed one child element as an XML
blob, setting the XML blob will erase any preexisting member elements
in this object.
Args:
blob: str or atom.core.XmlElement representing the XML blob stored in
the extendedProperty.
"""
# Erase any existing extension_elements, clears the child nodes from the
# extendedProperty.
if isinstance(blob, atom.core.XmlElement):
self._other_elements = [blob]
else:
self._other_elements = [atom.core.parse(str(blob))]
SetXmlBlob = set_xml_blob
class GDEntry(atom.data.Entry, LinkFinder):
"""Extends Atom Entry to provide data processing"""
etag = '{http://schemas.google.com/g/2005}etag'
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def is_media(self):
if self.find_media_edit_link():
return True
return False
IsMedia = is_media
def find_media_link(self):
"""Returns the URL to the media content, if the entry is a media entry.
Otherwise returns None.
"""
if self.is_media():
return self.content.src
return None
FindMediaLink = find_media_link
class GDFeed(atom.data.Feed, LinkFinder):
"""A Feed from a GData service."""
etag = '{http://schemas.google.com/g/2005}etag'
total_results = TotalResults
start_index = StartIndex
items_per_page = ItemsPerPage
entry = [GDEntry]
def get_id(self):
if self.id is not None and self.id.text is not None:
return self.id.text.strip()
return None
GetId = get_id
def get_generator(self):
if self.generator and self.generator.text:
return self.generator.text.strip()
return None
class BatchId(atom.core.XmlElement):
"""Identifies a single operation in a batch request."""
_qname = BATCH_TEMPLATE % 'id'
class BatchOperation(atom.core.XmlElement):
"""The CRUD operation which this batch entry represents."""
_qname = BATCH_TEMPLATE % 'operation'
type = 'type'
class BatchStatus(atom.core.XmlElement):
"""The batch:status element present in a batch response entry.
A status element contains the code (HTTP response code) and
reason as elements. In a single request these fields would
be part of the HTTP response, but in a batch request each
Entry operation has a corresponding Entry in the response
feed which includes status information.
See http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'status'
code = 'code'
reason = 'reason'
content_type = 'content-type'
class BatchEntry(GDEntry):
"""An atom:entry for use in batch requests.
The BatchEntry contains additional members to specify the operation to be
performed on this entry and a batch ID so that the server can reference
individual operations in the response feed. For more information, see:
http://code.google.com/apis/gdata/batch.html
"""
batch_operation = BatchOperation
batch_id = BatchId
batch_status = BatchStatus
class BatchInterrupted(atom.core.XmlElement):
"""The batch:interrupted element sent if batch request was interrupted.
Only appears in a feed if some of the batch entries could not be processed.
See: http://code.google.com/apis/gdata/batch.html#Handling_Errors
"""
_qname = BATCH_TEMPLATE % 'interrupted'
reason = 'reason'
success = 'success'
failures = 'failures'
parsed = 'parsed'
class BatchFeed(GDFeed):
"""A feed containing a list of batch request entries."""
interrupted = BatchInterrupted
entry = [BatchEntry]
def add_batch_entry(self, entry=None, id_url_string=None,
batch_id_string=None, operation_string=None):
"""Logic for populating members of a BatchEntry and adding to the feed.
If the entry is not a BatchEntry, it is converted to a BatchEntry so
that the batch specific members will be present.
The id_url_string can be used in place of an entry if the batch operation
applies to a URL. For example query and delete operations require just
the URL of an entry, no body is sent in the HTTP request. If an
id_url_string is sent instead of an entry, a BatchEntry is created and
added to the feed.
This method also assigns the desired batch id to the entry so that it
can be referenced in the server's response. If the batch_id_string is
None, this method will assign a batch_id to be the index at which this
entry will be in the feed's entry list.
Args:
entry: BatchEntry, atom.data.Entry, or another Entry flavor (optional)
The entry which will be sent to the server as part of the batch
request. The item must have a valid atom id so that the server
knows which entry this request references.
id_url_string: str (optional) The URL of the entry to be acted on. You
can find this URL in the text member of the atom id for an entry.
If an entry is not sent, this id will be used to construct a new
BatchEntry which will be added to the request feed.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
operation_string: str (optional) The desired batch operation which will
set the batch_operation.type member of the entry. Options are
'insert', 'update', 'delete', and 'query'
Raises:
MissingRequiredParameters: Raised if neither an id_ url_string nor an
entry are provided in the request.
Returns:
The added entry.
"""
if entry is None and id_url_string is None:
raise MissingRequiredParameters('supply either an entry or URL string')
if entry is None and id_url_string is not None:
entry = BatchEntry(id=atom.data.Id(text=id_url_string))
if batch_id_string is not None:
entry.batch_id = BatchId(text=batch_id_string)
elif entry.batch_id is None or entry.batch_id.text is None:
entry.batch_id = BatchId(text=str(len(self.entry)))
if operation_string is not None:
entry.batch_operation = BatchOperation(type=operation_string)
self.entry.append(entry)
return entry
AddBatchEntry = add_batch_entry
def add_insert(self, entry, batch_id_string=None):
"""Add an insert request to the operations in this batch request feed.
If the entry doesn't yet have an operation or a batch id, these will
be set to the insert operation and a batch_id specified as a parameter.
Args:
entry: BatchEntry The entry which will be sent in the batch feed as an
insert request.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. Note that batch_ids should either always be specified or
never, mixing could potentially result in duplicate batch ids.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_INSERT)
AddInsert = add_insert
def add_update(self, entry, batch_id_string=None):
"""Add an update request to the list of batch operations in this feed.
Sets the operation type of the entry to insert if it is not already set
and assigns the desired batch id to the entry so that it can be
referenced in the server's response.
Args:
entry: BatchEntry The entry which will be sent to the server as an
update (HTTP PUT) request. The item must have a valid atom id
so that the server knows which entry to replace.
batch_id_string: str (optional) The batch ID to be used to reference
this batch operation in the results feed. If this parameter is None,
the current length of the feed's entry array will be used as a
count. See also comments for AddInsert.
"""
self.add_batch_entry(entry=entry, batch_id_string=batch_id_string,
operation_string=BATCH_UPDATE)
AddUpdate = add_update
def add_delete(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a delete request to the batch request feed.
This method takes either the url_string which is the atom id of the item
to be deleted, or the entry itself. The atom id of the entry must be
present so that the server knows which entry should be deleted.
Args:
url_string: str (optional) The URL of the entry to be deleted. You can
find this URL in the text member of the atom id for an entry.
entry: BatchEntry (optional) The entry to be deleted.
batch_id_string: str (optional)
Raises:
MissingRequiredParameters: Raised if neither a url_string nor an entry
are provided in the request.
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_DELETE)
AddDelete = add_delete
def add_query(self, url_string=None, entry=None, batch_id_string=None):
"""Adds a query request to the batch request feed.
This method takes either the url_string which is the query URL
whose results will be added to the result feed. The query URL will
be encapsulated in a BatchEntry, and you may pass in the BatchEntry
with a query URL instead of sending a url_string.
Args:
url_string: str (optional)
entry: BatchEntry (optional)
batch_id_string: str (optional)
Raises:
MissingRequiredParameters
"""
self.add_batch_entry(entry=entry, id_url_string=url_string,
batch_id_string=batch_id_string, operation_string=BATCH_QUERY)
AddQuery = add_query
def find_batch_link(self):
return self.find_url('http://schemas.google.com/g/2005#batch')
FindBatchLink = find_batch_link
class EntryLink(atom.core.XmlElement):
"""The gd:entryLink element.
Represents a logically nested entry. For example, a <gd:who>
representing a contact might have a nested entry from a contact feed.
"""
_qname = GDATA_TEMPLATE % 'entryLink'
entry = GDEntry
rel = 'rel'
read_only = 'readOnly'
href = 'href'
class FeedLink(atom.core.XmlElement):
"""The gd:feedLink element.
Represents a logically nested feed. For example, a calendar feed might
have a nested feed representing all comments on entries.
"""
_qname = GDATA_TEMPLATE % 'feedLink'
feed = GDFeed
rel = 'rel'
read_only = 'readOnly'
count_hint = 'countHint'
href = 'href'
class AdditionalName(atom.core.XmlElement):
"""The gd:additionalName element.
Specifies additional (eg. middle) name of the person.
Contains an attribute for the phonetic representaton of the name.
"""
_qname = GDATA_TEMPLATE % 'additionalName'
yomi = 'yomi'
class Comments(atom.core.XmlElement):
"""The gd:comments element.
Contains a comments feed for the enclosing entry (such as a calendar event).
"""
_qname = GDATA_TEMPLATE % 'comments'
rel = 'rel'
feed_link = FeedLink
class Country(atom.core.XmlElement):
"""The gd:country element.
Country name along with optional country code. The country code is
given in accordance with ISO 3166-1 alpha-2:
http://www.iso.org/iso/iso-3166-1_decoding_table
"""
_qname = GDATA_TEMPLATE % 'country'
code = 'code'
class EmailImParent(atom.core.XmlElement):
address = 'address'
label = 'label'
rel = 'rel'
primary = 'primary'
class Email(EmailImParent):
"""The gd:email element.
An email address associated with the containing entity (which is
usually an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'email'
display_name = 'displayName'
class FamilyName(atom.core.XmlElement):
"""The gd:familyName element.
Specifies family name of the person, eg. "Smith".
"""
_qname = GDATA_TEMPLATE % 'familyName'
yomi = 'yomi'
class Im(EmailImParent):
"""The gd:im element.
An instant messaging address associated with the containing entity.
"""
_qname = GDATA_TEMPLATE % 'im'
protocol = 'protocol'
class GivenName(atom.core.XmlElement):
"""The gd:givenName element.
Specifies given name of the person, eg. "John".
"""
_qname = GDATA_TEMPLATE % 'givenName'
yomi = 'yomi'
class NamePrefix(atom.core.XmlElement):
"""The gd:namePrefix element.
Honorific prefix, eg. 'Mr' or 'Mrs'.
"""
_qname = GDATA_TEMPLATE % 'namePrefix'
class NameSuffix(atom.core.XmlElement):
"""The gd:nameSuffix element.
Honorific suffix, eg. 'san' or 'III'.
"""
_qname = GDATA_TEMPLATE % 'nameSuffix'
class FullName(atom.core.XmlElement):
"""The gd:fullName element.
Unstructured representation of the name.
"""
_qname = GDATA_TEMPLATE % 'fullName'
class Name(atom.core.XmlElement):
"""The gd:name element.
Allows storing person's name in a structured way. Consists of
given name, additional name, family name, prefix, suffix and full name.
"""
_qname = GDATA_TEMPLATE % 'name'
given_name = GivenName
additional_name = AdditionalName
family_name = FamilyName
name_prefix = NamePrefix
name_suffix = NameSuffix
full_name = FullName
class OrgDepartment(atom.core.XmlElement):
"""The gd:orgDepartment element.
Describes a department within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgDepartment'
class OrgJobDescription(atom.core.XmlElement):
"""The gd:orgJobDescription element.
Describes a job within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgJobDescription'
class OrgName(atom.core.XmlElement):
"""The gd:orgName element.
The name of the organization. Must appear within a gd:organization
element.
Contains a Yomigana attribute (Japanese reading aid) for the
organization name.
"""
_qname = GDATA_TEMPLATE % 'orgName'
yomi = 'yomi'
class OrgSymbol(atom.core.XmlElement):
"""The gd:orgSymbol element.
Provides a symbol of an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgSymbol'
class OrgTitle(atom.core.XmlElement):
"""The gd:orgTitle element.
The title of a person within an organization. Must appear within a
gd:organization element.
"""
_qname = GDATA_TEMPLATE % 'orgTitle'
class Organization(atom.core.XmlElement):
"""The gd:organization element.
An organization, typically associated with a contact.
"""
_qname = GDATA_TEMPLATE % 'organization'
label = 'label'
primary = 'primary'
rel = 'rel'
department = OrgDepartment
job_description = OrgJobDescription
name = OrgName
symbol = OrgSymbol
title = OrgTitle
class When(atom.core.XmlElement):
"""The gd:when element.
Represents a period of time or an instant.
"""
_qname = GDATA_TEMPLATE % 'when'
end = 'endTime'
start = 'startTime'
value = 'valueString'
class OriginalEvent(atom.core.XmlElement):
"""The gd:originalEvent element.
Equivalent to the Recurrence ID property specified in section 4.8.4.4
of RFC 2445. Appears in every instance of a recurring event, to identify
the original event.
Contains a <gd:when> element specifying the original start time of the
instance that has become an exception.
"""
_qname = GDATA_TEMPLATE % 'originalEvent'
id = 'id'
href = 'href'
when = When
class PhoneNumber(atom.core.XmlElement):
"""The gd:phoneNumber element.
A phone number associated with the containing entity (which is usually
an entity representing a person or a location).
"""
_qname = GDATA_TEMPLATE % 'phoneNumber'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class PostalAddress(atom.core.XmlElement):
"""The gd:postalAddress element."""
_qname = GDATA_TEMPLATE % 'postalAddress'
label = 'label'
rel = 'rel'
uri = 'uri'
primary = 'primary'
class Rating(atom.core.XmlElement):
"""The gd:rating element.
Represents a numeric rating of the enclosing entity, such as a
comment. Each rating supplies its own scale, although it may be
normalized by a service; for example, some services might convert all
ratings to a scale from 1 to 5.
"""
_qname = GDATA_TEMPLATE % 'rating'
average = 'average'
max = 'max'
min = 'min'
num_raters = 'numRaters'
rel = 'rel'
value = 'value'
class Recurrence(atom.core.XmlElement):
"""The gd:recurrence element.
Represents the dates and times when a recurring event takes place.
The string that defines the recurrence consists of a set of properties,
each of which is defined in the iCalendar standard (RFC 2445).
Specifically, the string usually begins with a DTSTART property that
indicates the starting time of the first instance of the event, and
often a DTEND property or a DURATION property to indicate when the
first instance ends. Next come RRULE, RDATE, EXRULE, and/or EXDATE
properties, which collectively define a recurring event and its
exceptions (but see below). (See section 4.8.5 of RFC 2445 for more
information about these recurrence component properties.) Last comes a
VTIMEZONE component, providing detailed timezone rules for any timezone
ID mentioned in the preceding properties.
Google services like Google Calendar don't generally generate EXRULE
and EXDATE properties to represent exceptions to recurring events;
instead, they generate <gd:recurrenceException> elements. However,
Google services may include EXRULE and/or EXDATE properties anyway;
for example, users can import events and exceptions into Calendar, and
if those imported events contain EXRULE or EXDATE properties, then
Calendar will provide those properties when it sends a <gd:recurrence>
element.
Note the the use of <gd:recurrenceException> means that you can't be
sure just from examining a <gd:recurrence> element whether there are
any exceptions to the recurrence description. To ensure that you find
all exceptions, look for <gd:recurrenceException> elements in the feed,
and use their <gd:originalEvent> elements to match them up with
<gd:recurrence> elements.
"""
_qname = GDATA_TEMPLATE % 'recurrence'
class RecurrenceException(atom.core.XmlElement):
"""The gd:recurrenceException element.
Represents an event that's an exception to a recurring event-that is,
an instance of a recurring event in which one or more aspects of the
recurring event (such as attendance list, time, or location) have been
changed.
Contains a <gd:originalEvent> element that specifies the original
recurring event that this event is an exception to.
When you change an instance of a recurring event, that instance becomes
an exception. Depending on what change you made to it, the exception
behaves in either of two different ways when the original recurring
event is changed:
- If you add, change, or remove comments, attendees, or attendee
responses, then the exception remains tied to the original event, and
changes to the original event also change the exception.
- If you make any other changes to the exception (such as changing the
time or location) then the instance becomes "specialized," which means
that it's no longer as tightly tied to the original event. If you
change the original event, specialized exceptions don't change. But
see below.
For example, say you have a meeting every Tuesday and Thursday at
2:00 p.m. If you change the attendance list for this Thursday's meeting
(but not for the regularly scheduled meeting), then it becomes an
exception. If you change the time for this Thursday's meeting (but not
for the regularly scheduled meeting), then it becomes specialized.
Regardless of whether an exception is specialized or not, if you do
something that deletes the instance that the exception was derived from,
then the exception is deleted. Note that changing the day or time of a
recurring event deletes all instances, and creates new ones.
For example, after you've specialized this Thursday's meeting, say you
change the recurring meeting to happen on Monday, Wednesday, and Friday.
That change deletes all of the recurring instances of the
Tuesday/Thursday meeting, including the specialized one.
If a particular instance of a recurring event is deleted, then that
instance appears as a <gd:recurrenceException> containing a
<gd:entryLink> that has its <gd:eventStatus> set to
"http://schemas.google.com/g/2005#event.canceled". (For more
information about canceled events, see RFC 2445.)
"""
_qname = GDATA_TEMPLATE % 'recurrenceException'
specialized = 'specialized'
entry_link = EntryLink
original_event = OriginalEvent
class Reminder(atom.core.XmlElement):
"""The gd:reminder element.
A time interval, indicating how long before the containing entity's start
time or due time attribute a reminder should be issued. Alternatively,
may specify an absolute time at which a reminder should be issued. Also
specifies a notification method, indicating what medium the system
should use to remind the user.
"""
_qname = GDATA_TEMPLATE % 'reminder'
absolute_time = 'absoluteTime'
method = 'method'
days = 'days'
hours = 'hours'
minutes = 'minutes'
class Agent(atom.core.XmlElement):
"""The gd:agent element.
The agent who actually receives the mail. Used in work addresses.
Also for 'in care of' or 'c/o'.
"""
_qname = GDATA_TEMPLATE % 'agent'
class HouseName(atom.core.XmlElement):
"""The gd:housename element.
Used in places where houses or buildings have names (and not
necessarily numbers), eg. "The Pillars".
"""
_qname = GDATA_TEMPLATE % 'housename'
class Street(atom.core.XmlElement):
"""The gd:street element.
Can be street, avenue, road, etc. This element also includes the
house number and room/apartment/flat/floor number.
"""
_qname = GDATA_TEMPLATE % 'street'
class PoBox(atom.core.XmlElement):
"""The gd:pobox element.
Covers actual P.O. boxes, drawers, locked bags, etc. This is usually
but not always mutually exclusive with street.
"""
_qname = GDATA_TEMPLATE % 'pobox'
class Neighborhood(atom.core.XmlElement):
"""The gd:neighborhood element.
This is used to disambiguate a street address when a city contains more
than one street with the same name, or to specify a small place whose
mail is routed through a larger postal town. In China it could be a
county or a minor city.
"""
_qname = GDATA_TEMPLATE % 'neighborhood'
class City(atom.core.XmlElement):
"""The gd:city element.
Can be city, village, town, borough, etc. This is the postal town and
not necessarily the place of residence or place of business.
"""
_qname = GDATA_TEMPLATE % 'city'
class Subregion(atom.core.XmlElement):
"""The gd:subregion element.
Handles administrative districts such as U.S. or U.K. counties that are
not used for mail addressing purposes. Subregion is not intended for
delivery addresses.
"""
_qname = GDATA_TEMPLATE % 'subregion'
class Region(atom.core.XmlElement):
"""The gd:region element.
A state, province, county (in Ireland), Land (in Germany),
departement (in France), etc.
"""
_qname = GDATA_TEMPLATE % 'region'
class Postcode(atom.core.XmlElement):
"""The gd:postcode element.
Postal code. Usually country-wide, but sometimes specific to the
city (e.g. "2" in "Dublin 2, Ireland" addresses).
"""
_qname = GDATA_TEMPLATE % 'postcode'
class Country(atom.core.XmlElement):
"""The gd:country element.
The name or code of the country.
"""
_qname = GDATA_TEMPLATE % 'country'
class FormattedAddress(atom.core.XmlElement):
"""The gd:formattedAddress element.
The full, unstructured postal address.
"""
_qname = GDATA_TEMPLATE % 'formattedAddress'
class StructuredPostalAddress(atom.core.XmlElement):
"""The gd:structuredPostalAddress element.
Postal address split into components. It allows to store the address
in locale independent format. The fields can be interpreted and used
to generate formatted, locale dependent address. The following elements
reperesent parts of the address: agent, house name, street, P.O. box,
neighborhood, city, subregion, region, postal code, country. The
subregion element is not used for postal addresses, it is provided for
extended uses of addresses only. In order to store postal address in an
unstructured form formatted address field is provided.
"""
_qname = GDATA_TEMPLATE % 'structuredPostalAddress'
rel = 'rel'
mail_class = 'mailClass'
usage = 'usage'
label = 'label'
primary = 'primary'
agent = Agent
house_name = HouseName
street = Street
po_box = PoBox
neighborhood = Neighborhood
city = City
subregion = Subregion
region = Region
postcode = Postcode
country = Country
formatted_address = FormattedAddress
class Where(atom.core.XmlElement):
"""The gd:where element.
A place (such as an event location) associated with the containing
entity. The type of the association is determined by the rel attribute;
the details of the location are contained in an embedded or linked-to
Contact entry.
A <gd:where> element is more general than a <gd:geoPt> element. The
former identifies a place using a text description and/or a Contact
entry, while the latter identifies a place using a specific geographic
location.
"""
_qname = GDATA_TEMPLATE % 'where'
label = 'label'
rel = 'rel'
value = 'valueString'
entry_link = EntryLink
class AttendeeType(atom.core.XmlElement):
"""The gd:attendeeType element."""
_qname = GDATA_TEMPLATE % 'attendeeType'
value = 'value'
class AttendeeStatus(atom.core.XmlElement):
"""The gd:attendeeStatus element."""
_qname = GDATA_TEMPLATE % 'attendeeStatus'
value = 'value'
class Who(atom.core.XmlElement):
"""The gd:who element.
A person associated with the containing entity. The type of the
association is determined by the rel attribute; the details about the
person are contained in an embedded or linked-to Contact entry.
The <gd:who> element can be used to specify email senders and
recipients, calendar event organizers, and so on.
"""
_qname = GDATA_TEMPLATE % 'who'
email = 'email'
rel = 'rel'
value = 'valueString'
attendee_status = AttendeeStatus
attendee_type = AttendeeType
entry_link = EntryLink
| apache-2.0 |
matthaywardwebdesign/rethinkdb | external/v8_3.30.33.16/build/gyp/test/subdirectory/gyptest-SYMROOT-default.py | 399 | 1260 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
The configuration sets the Xcode SYMROOT variable and uses --depth=
to make Xcode behave like the other build tools--that is, put all
built targets in a single output build directory at the top of the tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', '-Dset_symroot=1', '--depth=.', chdir='src')
test.relocate('src', 'relocate/src')
# Suppress the test infrastructure's setting SYMROOT on the command line.
test.build('prog1.gyp', SYMROOT=None, chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
test.run_built_executable('prog2',
stdout="Hello from prog2.c\n",
chdir='relocate/src')
test.pass_test()
| agpl-3.0 |
cpadavis/SpaceWarps | analysis/swap/shannon.py | 3 | 5216 | #============================================================================
"""
NAME
shannon.py
PURPOSE
Methods for calculating various information gains during binary
classification.
COMMENTS
Copied from informationgain.py at
https://github.com/CitizenScienceInAstronomyWorkshop/Bureaucracy
METHODS
shannon(x):
expectedInformationGain(p0, M_ll, M_nn)
informationGain(p0, M_ll, M_nn, c)
BUGS
AUTHORS
The code in this file was written by Edwin Simpson and Phil Marshall
during the Citizen Science in Astronomy Workshop at ASIAA, Taipei,
in March 2014, hosted by Meg Schwamb.
This file is part of the Space Warps project, which is distributed
under the MIT license by the Space Warps Science Team.
http://spacewarps.org/
HISTORY
2014-05-21 Incorporated into SWAP code Baumer & Davis (KIPAC)
LICENCE
The MIT License (MIT)
Copyright (c) 2014 CitizenScienceInAstronomyWorkshop
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
#============================================================================
from numpy import log2, ndarray
# ----------------------------------------------------------------------------
# The Shannon function:
def shannon(x):
if isinstance(x, ndarray) == False:
if x>0:
res = x*log2(x)
else:
res = 0.0
else:
x[x == 0] = 1.0
res = x*log2(x)
return res
# ----------------------------------------------------------------------------
# The Shannon entropy, S
def shannonEntropy(x):
if isinstance(x, np.ndarray) == False:
if x>0 and (1.-x)>0:
res = -x*np.log2(x) - (1.-x)*np.log2(1.-x)
else:
res = 0.0
else:
x[x == 0] = 1.0
res = -x*np.log2(x)
x[x == 1] = 0.0
res = res - (1.-x)*np.log2(1.-x)
return res
# ----------------------------------------------------------------------------
# Expectation value of the information that would be contributed by an
# agent defined by confusion matrix M when presented with a subject
# having probability p0, over both possible truths and both
# possible classifications:
def expectedInformationGain(p0, M_ll, M_nn):
p1 = 1-p0
I = p0 * (shannon(M_ll) + shannon(1-M_ll)) \
+ p1 * (shannon(M_nn) + shannon(1-M_nn)) \
- shannon(M_ll*p0 + (1-M_nn)*p1) \
- shannon((1-M_ll)*p0 + M_nn*p1)
return I
# ----------------------------------------------------------------------------
# The information gain (relative entropy) contributed by an agent, defined by
# confusion matrix M, having classified a subject, that arrived having
# probability 'p0', as being 'c' (lens/not = true/false):
def informationGain(p0, M_ll, M_nn, lens):
p1 = 1-p0
if lens:
M_cl = M_ll
M_cn = 1-M_nn
else:
M_cl = 1-M_ll
M_cn = M_nn
pc = M_cl*p0 + M_cn*p1
p0_c = M_cl/pc
p1_c = M_cn/pc
I = p0*shannon(p0_c) + p1*shannon(p1_c)
return I
# ----------------------------------------------------------------------------
# Bayesian update of the probability of a subject by an agent whose
# confusion matrix is defined by M
def update(p0,M_ll,M_nn,lens):
if(lens):
M_cl = M_ll
M_cn = 1.0 - M_nn
else:
M_cl = 1.0 - M_ll
M_cn = M_nn
return p0*M_cl/(p0*M_cl+(1.0-p0)*M_cn)
# PJM: I re-factored this so that the update eqn was in terms of
# M_cl and M_cn (to match my notes).
# ----------------------------------------------------------------------------
# The change in subject entropy transmitted by an agent, having classified a
# subject, that arrived having probability 'p0' and has new
# probability 'p1'
def entropyChange(p0, M_ll, M_nn, c):
p1 = update(p0,M_ll,M_nn,c)
I = mutualInformation(p0,p1)
return I
# ----------------------------------------------------------------------------
# The mutual information between states with probability 'p0' and 'p1'
def mutualInformation(p0,p1):
I = shannonEntropy(p0) - shannonEntropy(p1)
return I
#============================================================================
| mit |
mverzett/rootpy | examples/stats/plot_quantiles.py | 7 | 1986 | #!/usr/bin/env python
"""
=================================================
Draw a Quantile-Quantile Plot and Confidence Band
=================================================
This is an example of drawing a quantile-quantile plot with a confidence level
(CL) band.
"""
print(__doc__)
import ROOT
from rootpy.interactive import wait
from rootpy.plotting import Hist, Canvas, Legend, set_style
from rootpy.plotting.contrib.quantiles import qqgraph
from rootpy.extern.six.moves import range
set_style('ATLAS')
c = Canvas(width=1200, height=600)
c.Divide(2, 1, 1e-3, 1e-3)
rand = ROOT.TRandom3()
h1 = Hist(100, -5, 5, name="h1", title="Histogram 1",
linecolor='red', legendstyle='l')
h2 = Hist(100, -5, 5, name="h2", title="Histogram 2",
linecolor='blue', legendstyle='l')
for ievt in range(10000):
h1.Fill(rand.Gaus(0, 0.8))
h2.Fill(rand.Gaus(0, 1))
pad = c.cd(1)
h1.Draw('hist')
h2.Draw('hist same')
leg = Legend([h1, h2], pad=pad, leftmargin=0.5,
topmargin=0.11, rightmargin=0.05,
textsize=20)
leg.Draw()
pad = c.cd(2)
gr = qqgraph(h1, h2)
gr.xaxis.title = h1.title
gr.yaxis.title = h2.title
gr.fillcolor = 17
gr.fillstyle = 'solid'
gr.linecolor = 17
gr.markercolor = 'darkred'
gr.markerstyle = 20
gr.title = "QQ with CL"
gr.Draw("ap")
x_min = gr.GetXaxis().GetXmin()
x_max = gr.GetXaxis().GetXmax()
y_min = gr.GetXaxis().GetXmin()
y_max = gr.GetXaxis().GetXmax()
gr.Draw('a3')
gr.Draw('Xp same')
# a straight line y=x to be a reference
f_dia = ROOT.TF1("f_dia", "x",
h1.GetXaxis().GetXmin(),
h1.GetXaxis().GetXmax())
f_dia.SetLineColor(9)
f_dia.SetLineWidth(2)
f_dia.SetLineStyle(2)
f_dia.Draw("same")
leg = Legend(3, pad=pad, leftmargin=0.45,
topmargin=0.45, rightmargin=0.05,
textsize=20)
leg.AddEntry(gr, "QQ points", "p")
leg.AddEntry(gr, "68% CL band", "f")
leg.AddEntry(f_dia, "Diagonal line", "l")
leg.Draw()
c.Modified()
c.Update()
c.Draw()
wait()
| gpl-3.0 |
xujun10110/golismero | thirdparty_libs/django/utils/formats.py | 104 | 7799 | import decimal
import datetime
from django.conf import settings
from django.utils import dateformat, numberformat, datetime_safe
from django.utils.importlib import import_module
from django.utils.encoding import force_str
from django.utils.functional import lazy
from django.utils.safestring import mark_safe
from django.utils import six
from django.utils.translation import get_language, to_locale, check_for_language
# format_cache is a mapping from (format_type, lang) to the format string.
# By using the cache, it is possible to avoid running get_format_modules
# repeatedly.
_format_cache = {}
_format_modules_cache = {}
ISO_INPUT_FORMATS = {
'DATE_INPUT_FORMATS': ('%Y-%m-%d',),
'TIME_INPUT_FORMATS': ('%H:%M:%S', '%H:%M'),
'DATETIME_INPUT_FORMATS': (
'%Y-%m-%d %H:%M:%S',
'%Y-%m-%d %H:%M:%S.%f',
'%Y-%m-%d %H:%M',
'%Y-%m-%d'
),
}
def reset_format_cache():
"""Clear any cached formats.
This method is provided primarily for testing purposes,
so that the effects of cached formats can be removed.
"""
global _format_cache, _format_modules_cache
_format_cache = {}
_format_modules_cache = {}
def iter_format_modules(lang):
"""
Does the heavy lifting of finding format modules.
"""
if check_for_language(lang):
format_locations = ['django.conf.locale.%s']
if settings.FORMAT_MODULE_PATH:
format_locations.append(settings.FORMAT_MODULE_PATH + '.%s')
format_locations.reverse()
locale = to_locale(lang)
locales = [locale]
if '_' in locale:
locales.append(locale.split('_')[0])
for location in format_locations:
for loc in locales:
try:
yield import_module('.formats', location % loc)
except ImportError:
pass
def get_format_modules(lang=None, reverse=False):
"""
Returns a list of the format modules found
"""
if lang is None:
lang = get_language()
modules = _format_modules_cache.setdefault(lang, list(iter_format_modules(lang)))
if reverse:
return list(reversed(modules))
return modules
def get_format(format_type, lang=None, use_l10n=None):
"""
For a specific format type, returns the format for the current
language (locale), defaults to the format in the settings.
format_type is the name of the format, e.g. 'DATE_FORMAT'
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
format_type = force_str(format_type)
if use_l10n or (use_l10n is None and settings.USE_L10N):
if lang is None:
lang = get_language()
cache_key = (format_type, lang)
try:
cached = _format_cache[cache_key]
if cached is not None:
return cached
else:
# Return the general setting by default
return getattr(settings, format_type)
except KeyError:
for module in get_format_modules(lang):
try:
val = getattr(module, format_type)
for iso_input in ISO_INPUT_FORMATS.get(format_type, ()):
if iso_input not in val:
if isinstance(val, tuple):
val = list(val)
val.append(iso_input)
_format_cache[cache_key] = val
return val
except AttributeError:
pass
_format_cache[cache_key] = None
return getattr(settings, format_type)
get_format_lazy = lazy(get_format, six.text_type, list, tuple)
def date_format(value, format=None, use_l10n=None):
"""
Formats a datetime.date or datetime.datetime object using a
localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.format(value, get_format(format or 'DATE_FORMAT', use_l10n=use_l10n))
def time_format(value, format=None, use_l10n=None):
"""
Formats a datetime.time object using a localizable format
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
return dateformat.time_format(value, get_format(format or 'TIME_FORMAT', use_l10n=use_l10n))
def number_format(value, decimal_pos=None, use_l10n=None, force_grouping=False):
"""
Formats a numeric value using localization settings
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if use_l10n or (use_l10n is None and settings.USE_L10N):
lang = get_language()
else:
lang = None
return numberformat.format(
value,
get_format('DECIMAL_SEPARATOR', lang, use_l10n=use_l10n),
decimal_pos,
get_format('NUMBER_GROUPING', lang, use_l10n=use_l10n),
get_format('THOUSAND_SEPARATOR', lang, use_l10n=use_l10n),
force_grouping=force_grouping
)
def localize(value, use_l10n=None):
"""
Checks if value is a localizable type (date, number...) and returns it
formatted as a string using current locale format.
If use_l10n is provided and is not None, that will force the value to
be localized (or not), overriding the value of settings.USE_L10N.
"""
if isinstance(value, bool):
return mark_safe(six.text_type(value))
elif isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.datetime):
return date_format(value, 'DATETIME_FORMAT', use_l10n=use_l10n)
elif isinstance(value, datetime.date):
return date_format(value, use_l10n=use_l10n)
elif isinstance(value, datetime.time):
return time_format(value, 'TIME_FORMAT', use_l10n=use_l10n)
else:
return value
def localize_input(value, default=None):
"""
Checks if an input value is a localizable type and returns it
formatted with the appropriate formatting string of the current locale.
"""
if isinstance(value, (decimal.Decimal, float) + six.integer_types):
return number_format(value)
elif isinstance(value, datetime.datetime):
value = datetime_safe.new_datetime(value)
format = force_str(default or get_format('DATETIME_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.date):
value = datetime_safe.new_date(value)
format = force_str(default or get_format('DATE_INPUT_FORMATS')[0])
return value.strftime(format)
elif isinstance(value, datetime.time):
format = force_str(default or get_format('TIME_INPUT_FORMATS')[0])
return value.strftime(format)
return value
def sanitize_separators(value):
"""
Sanitizes a value according to the current decimal and
thousand separator setting. Used with form field input.
"""
if settings.USE_L10N:
decimal_separator = get_format('DECIMAL_SEPARATOR')
if isinstance(value, six.string_types):
parts = []
if decimal_separator in value:
value, decimals = value.split(decimal_separator, 1)
parts.append(decimals)
if settings.USE_THOUSAND_SEPARATOR:
parts.append(value.replace(get_format('THOUSAND_SEPARATOR'), ''))
else:
parts.append(value)
value = '.'.join(reversed(parts))
return value
| gpl-2.0 |
kenshay/ImageScript | ProgramData/SystemFiles/Python/Lib/mutex.py | 21 | 1878 | """Mutual exclusion -- for use with module sched
A mutex has two pieces of state -- a 'locked' bit and a queue.
When the mutex is not locked, the queue is empty.
Otherwise, the queue contains 0 or more (function, argument) pairs
representing functions (or methods) waiting to acquire the lock.
When the mutex is unlocked while the queue is not empty,
the first queue entry is removed and its function(argument) pair called,
implying it now has the lock.
Of course, no multi-threading is implied -- hence the funny interface
for lock, where a function is called once the lock is acquired.
"""
from warnings import warnpy3k
warnpy3k("the mutex module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
from collections import deque
class mutex:
def __init__(self):
"""Create a new mutex -- initially unlocked."""
self.locked = False
self.queue = deque()
def test(self):
"""Test the locked bit of the mutex."""
return self.locked
def testandset(self):
"""Atomic test-and-set -- grab the lock if it is not set,
return True if it succeeded."""
if not self.locked:
self.locked = True
return True
else:
return False
def lock(self, function, argument):
"""Lock a mutex, call the function with supplied argument
when it is acquired. If the mutex is already locked, place
function and argument in the queue."""
if self.testandset():
function(argument)
else:
self.queue.append((function, argument))
def unlock(self):
"""Unlock a mutex. If the queue is not empty, call the next
function with its argument."""
if self.queue:
function, argument = self.queue.popleft()
function(argument)
else:
self.locked = False
| gpl-3.0 |
ravibhure/ansible | lib/ansible/modules/windows/win_iis_virtualdirectory.py | 47 | 2485 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, Henrik Wallström <henrik@wallstroms.nu>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_iis_virtualdirectory
version_added: "2.0"
short_description: Configures a virtual directory in IIS.
description:
- Creates, Removes and configures a virtual directory in IIS.
options:
name:
description:
- The name of the virtual directory to create or remove
required: true
state:
description:
- Whether to add or remove the specified virtual directory
choices:
- absent
- present
required: false
default: present
site:
description:
- The site name under which the virtual directory is created or exists.
required: true
application:
description:
- The application under which the virtual directory is created or exists.
required: false
default: null
physical_path:
description:
- The physical path to the folder in which the new virtual directory is created. The specified folder must already exist.
required: false
default: null
author: Henrik Wallström
'''
EXAMPLES = r'''
- name: Create a virtual directory if it does not exist
win_iis_virtualdirectory:
name: somedirectory
site: somesite
state: present
physical_path: c:\virtualdirectory\some
- name: Remove a virtual directory if it exists
win_iis_virtualdirectory:
name: somedirectory
site: somesite
state: absent
- name: Create a virtual directory on an application if it does not exist
win_iis_virtualdirectory:
name: somedirectory
site: somesite
application: someapp
state: present
physical_path: c:\virtualdirectory\some
'''
| gpl-3.0 |
cleinias/Homeo | src/VREP/vrep.py | 1 | 52447 | # This file is part of the REMOTE API
#
# Copyright 2006-2014 Dr. Marc Andreas Freese. All rights reserved.
# marc@coppeliarobotics.com
# www.coppeliarobotics.com
#
# The REMOTE API is licensed under the terms of GNU GPL:
#
# -------------------------------------------------------------------
# The REMOTE API is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# The REMOTE API is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with the REMOTE API. If not, see <http://www.gnu.org/licenses/>.
# -------------------------------------------------------------------
#
# This file was automatically created for V-REP release V3.1.0 on January 20th 2014
import platform
import struct
from ctypes import *
from vrepConst import *
#load library
libsimx = None
if platform.system() =='Windows':
libsimx = CDLL("./remoteApi.dll")
elif platform.system() == 'Darwin':
libsimx = CDLL("./remoteApi.dylib")
else:
libsimx = CDLL("./remoteApi.so")
#ctypes wrapper prototypes
c_GetJointPosition = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetJointPosition", libsimx))
c_SetJointPosition = CFUNCTYPE(c_int32,c_int32, c_int32, c_float, c_int32)(("simxSetJointPosition", libsimx))
c_GetJointMatrix = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetJointMatrix", libsimx))
c_SetSphericalJointMatrix = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxSetSphericalJointMatrix", libsimx))
c_SetJointTargetVelocity = CFUNCTYPE(c_int32,c_int32, c_int32, c_float, c_int32)(("simxSetJointTargetVelocity", libsimx))
c_SetJointTargetPosition = CFUNCTYPE(c_int32,c_int32, c_int32, c_float, c_int32)(("simxSetJointTargetPosition", libsimx))
c_JointGetForce = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxJointGetForce", libsimx))
c_SetJointForce = CFUNCTYPE(c_int32,c_int32, c_int32, c_float, c_int32)(("simxSetJointForce", libsimx))
c_ReadForceSensor = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_ubyte), POINTER(c_float), POINTER(c_float), c_int32)(("simxReadForceSensor", libsimx))
c_BreakForceSensor = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxBreakForceSensor", libsimx))
c_ReadVisionSensor = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_ubyte), POINTER(POINTER(c_float)), POINTER(POINTER(c_int32)), c_int32)(("simxReadVisionSensor", libsimx))
c_GetObjectHandle = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_int32), c_int32)(("simxGetObjectHandle", libsimx))
c_GetVisionSensorImage = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), POINTER(POINTER(c_byte)), c_ubyte, c_int32)(("simxGetVisionSensorImage", libsimx))
c_SetVisionSensorImage = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_byte), c_int32, c_ubyte, c_int32)(("simxSetVisionSensorImage", libsimx))
c_GetVisionSensorDepthBuffer= CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), POINTER(POINTER(c_float)), c_int32)(("simxGetVisionSensorDepthBuffer", libsimx))
c_GetObjectChild = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetObjectChild", libsimx))
c_GetObjectParent = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetObjectParent", libsimx))
c_ReadProximitySensor = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_ubyte), POINTER(c_float), POINTER(c_int32), POINTER(c_float), c_int32)(("simxReadProximitySensor", libsimx))
c_LoadModel = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_ubyte, POINTER(c_int32), c_int32)(("simxLoadModel", libsimx))
c_LoadUI = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_ubyte, POINTER(c_int32), POINTER(POINTER(c_int32)), c_int32)(("simxLoadUI", libsimx))
c_LoadScene = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_ubyte, c_int32)(("simxLoadScene", libsimx))
c_StartSimulation = CFUNCTYPE(c_int32,c_int32, c_int32)(("simxStartSimulation", libsimx))
c_PauseSimulation = CFUNCTYPE(c_int32,c_int32, c_int32)(("simxPauseSimulation", libsimx))
c_StopSimulation = CFUNCTYPE(c_int32,c_int32, c_int32)(("simxStopSimulation", libsimx))
c_GetUIHandle = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_int32), c_int32)(("simxGetUIHandle", libsimx))
c_GetUISlider = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetUISlider", libsimx))
c_SetUISlider = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_int32, c_int32)(("simxSetUISlider", libsimx))
c_GetUIEventButton = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), POINTER(c_int32), c_int32)(("simxGetUIEventButton", libsimx))
c_GetUIButtonProperty = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetUIButtonProperty", libsimx))
c_SetUIButtonProperty = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_int32, c_int32)(("simxSetUIButtonProperty", libsimx))
c_AddStatusbarMessage = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32)(("simxAddStatusbarMessage", libsimx))
c_AuxiliaryConsoleOpen = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32, c_int32, POINTER(c_int32), POINTER(c_int32), POINTER(c_float), POINTER(c_float), POINTER(c_int32), c_int32)(("simxAuxiliaryConsoleOpen", libsimx))
c_AuxiliaryConsoleClose = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxAuxiliaryConsoleClose", libsimx))
c_AuxiliaryConsolePrint = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_char), c_int32)(("simxAuxiliaryConsolePrint", libsimx))
c_AuxiliaryConsoleShow = CFUNCTYPE(c_int32,c_int32, c_int32, c_ubyte, c_int32)(("simxAuxiliaryConsoleShow", libsimx))
c_GetObjectOrientation = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetObjectOrientation", libsimx))
c_GetObjectPosition = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetObjectPosition", libsimx))
c_SetObjectOrientation = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_float), c_int32)(("simxSetObjectOrientation", libsimx))
c_SetObjectPosition = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_float), c_int32)(("simxSetObjectPosition", libsimx))
c_SetObjectParent = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_ubyte, c_int32)(("simxSetObjectParent", libsimx))
c_SetUIButtonLabel = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_char), POINTER(c_char), c_int32)(("simxSetUIButtonLabel", libsimx))
c_GetLastErrors = CFUNCTYPE(c_int32,c_int32, POINTER(c_int32), POINTER(POINTER(c_char)), c_int32)(("simxGetLastErrors", libsimx))
c_GetArrayParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetArrayParameter", libsimx))
c_SetArrayParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxSetArrayParameter", libsimx))
c_GetBooleanParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_ubyte), c_int32)(("simxGetBooleanParameter", libsimx))
c_SetBooleanParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_ubyte, c_int32)(("simxSetBooleanParameter", libsimx))
c_GetIntegerParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetIntegerParameter", libsimx))
c_SetIntegerParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_int32)(("simxSetIntegerParameter", libsimx))
c_GetFloatingParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetFloatingParameter", libsimx))
c_SetFloatingParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_float, c_int32)(("simxSetFloatingParameter", libsimx))
c_GetStringParameter = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(POINTER(c_char)), c_int32)(("simxGetStringParameter", libsimx))
c_GetCollisionHandle = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_int32), c_int32)(("simxGetCollisionHandle", libsimx))
c_GetDistanceHandle = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_int32), c_int32)(("simxGetDistanceHandle", libsimx))
c_ReadCollision = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_ubyte), c_int32)(("simxReadCollision", libsimx))
c_ReadDistance = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), c_int32)(("simxReadDistance", libsimx))
c_RemoveObject = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxRemoveObject", libsimx))
c_RemoveUI = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxRemoveUI", libsimx))
c_CloseScene = CFUNCTYPE(c_int32,c_int32, c_int32)(("simxCloseScene", libsimx))
c_GetObjects = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), POINTER(POINTER(c_int32)), c_int32)(("simxGetObjects", libsimx))
c_DisplayDialog = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_char), c_int32, POINTER(c_char), POINTER(c_float), POINTER(c_float), POINTER(c_int32), POINTER(c_int32), c_int32)(("simxDisplayDialog", libsimx))
c_EndDialog = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32)(("simxEndDialog", libsimx))
c_GetDialogInput = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(POINTER(c_char)), c_int32)(("simxGetDialogInput", libsimx))
c_GetDialogResult = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetDialogResult", libsimx))
c_CopyPasteObjects = CFUNCTYPE(c_int32,c_int32, POINTER(c_int32), c_int32, POINTER(POINTER(c_int32)), POINTER(c_int32), c_int32)(("simxCopyPasteObjects", libsimx))
c_GetObjectSelection = CFUNCTYPE(c_int32,c_int32, POINTER(POINTER(c_int32)), POINTER(c_int32), c_int32)(("simxGetObjectSelection", libsimx))
c_SetObjectSelection = CFUNCTYPE(c_int32,c_int32, POINTER(c_int32), c_int32, c_int32)(("simxSetObjectSelection", libsimx))
c_ClearFloatSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32)(("simxClearFloatSignal", libsimx))
c_ClearIntegerSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32)(("simxClearIntegerSignal", libsimx))
c_ClearStringSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32)(("simxClearStringSignal", libsimx))
c_GetFloatSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_float), c_int32)(("simxGetFloatSignal", libsimx))
c_GetIntegerSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_int32), c_int32)(("simxGetIntegerSignal", libsimx))
c_GetStringSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(POINTER(c_ubyte)), POINTER(c_int32), c_int32)(("simxGetStringSignal", libsimx))
c_SetFloatSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_float, c_int32)(("simxSetFloatSignal", libsimx))
c_SetIntegerSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32, c_int32)(("simxSetIntegerSignal", libsimx))
c_SetStringSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_ubyte), c_int32, c_int32)(("simxSetStringSignal", libsimx))
c_AppendStringSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_ubyte), c_int32, c_int32)(("simxAppendStringSignal", libsimx))
c_GetObjectFloatParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_float), c_int32)(("simxGetObjectFloatParameter", libsimx))
c_SetObjectFloatParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_float, c_int32)(("simxSetObjectFloatParameter", libsimx))
c_GetObjectIntParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetObjectIntParameter", libsimx))
c_SetObjectIntParameter = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_int32, c_int32)(("simxSetObjectIntParameter", libsimx))
c_GetModelProperty = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32), c_int32)(("simxGetModelProperty", libsimx))
c_SetModelProperty = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, c_int32)(("simxSetModelProperty", libsimx))
c_Start = CFUNCTYPE(c_int32,POINTER(c_char), c_int32, c_ubyte, c_ubyte, c_int32, c_int32)(("simxStart", libsimx))
c_Finish = CFUNCTYPE(None, c_int32)(("simxFinish", libsimx))
c_GetPingTime = CFUNCTYPE(c_int32,c_int32, POINTER(c_int32))(("simxGetPingTime", libsimx))
c_GetLastCmdTime = CFUNCTYPE(c_int32,c_int32)(("simxGetLastCmdTime", libsimx))
c_SynchronousTrigger = CFUNCTYPE(c_int32,c_int32)(("simxSynchronousTrigger", libsimx))
c_Synchronous = CFUNCTYPE(c_int32,c_int32, c_ubyte)(("simxSynchronous", libsimx))
c_PauseCommunication = CFUNCTYPE(c_int32,c_int32, c_ubyte)(("simxPauseCommunication", libsimx))
c_GetInMessageInfo = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32))(("simxGetInMessageInfo", libsimx))
c_GetOutMessageInfo = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_int32))(("simxGetOutMessageInfo", libsimx))
c_GetConnectionId = CFUNCTYPE(c_int32,c_int32)(("simxGetConnectionId", libsimx))
c_CreateBuffer = CFUNCTYPE(POINTER(c_ubyte), c_int32)(("simxCreateBuffer", libsimx))
c_ReleaseBuffer = CFUNCTYPE(None, c_void_p)(("simxReleaseBuffer", libsimx))
c_TransferFile = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_char), c_int32, c_int32)(("simxTransferFile", libsimx))
c_EraseFile = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), c_int32)(("simxEraseFile", libsimx))
c_GetAndClearStringSignal = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(POINTER(c_ubyte)), POINTER(c_int32), c_int32)(("simxGetAndClearStringSignal", libsimx))
c_CreateDummy = CFUNCTYPE(c_int32,c_int32, c_float, POINTER(c_ubyte), POINTER(c_int32), c_int32)(("simxCreateDummy", libsimx))
c_Query = CFUNCTYPE(c_int32,c_int32, POINTER(c_char), POINTER(c_ubyte), c_int32, POINTER(c_char), POINTER(POINTER(c_ubyte)), POINTER(c_int32), c_int32)(("simxQuery", libsimx))
c_GetObjectGroupData = CFUNCTYPE(c_int32,c_int32, c_int32, c_int32, POINTER(c_int32), POINTER(POINTER(c_int32)), POINTER(c_int32), POINTER(POINTER(c_int32)), POINTER(c_int32), POINTER(POINTER(c_float)), POINTER(c_int32), POINTER(POINTER(c_char)), c_int32)(("simxGetObjectGroupData", libsimx))
c_GetObjectVelocity = CFUNCTYPE(c_int32,c_int32, c_int32, POINTER(c_float), POINTER(c_float), c_int32)(("simxGetObjectVelocity", libsimx))
#API functions
def simxGetJointPosition(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
position = c_float()
return c_GetJointPosition(clientID, jointHandle, byref(position), operationMode), position.value
def simxSetJointPosition(clientID, jointHandle, position, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointPosition(clientID, jointHandle, position, operationMode)
def simxGetJointMatrix(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
matrix = (c_float*12)()
ret = c_GetJointMatrix(clientID, jointHandle, matrix, operationMode)
arr = []
for i in range(12):
arr.append(matrix[i])
return ret, arr
def simxSetSphericalJointMatrix(clientID, jointHandle, matrix, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
matrix = (c_float*12)(*matrix)
return c_SetSphericalJointMatrix(clientID, jointHandle, matrix, operationMode)
def simxSetJointTargetVelocity(clientID, jointHandle, targetVelocity, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointTargetVelocity(clientID, jointHandle, targetVelocity, operationMode)
def simxSetJointTargetPosition(clientID, jointHandle, targetPosition, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointTargetPosition(clientID, jointHandle, targetPosition, operationMode)
def simxJointGetForce(clientID, jointHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
force = c_float()
return c_JointGetForce(clientID, jointHandle, byref(force), operationMode), force.value
def simxSetJointForce(clientID, jointHandle, force, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetJointForce(clientID, jointHandle, force, operationMode)
def simxReadForceSensor(clientID, forceSensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
state = c_ubyte()
forceVector = (c_float*3)()
torqueVector = (c_float*3)()
ret = c_ReadForceSensor(clientID, forceSensorHandle, byref(state), forceVector, torqueVector, operationMode)
arr1 = []
for i in range(3):
arr1.append(forceVector[i])
arr2 = []
for i in range(3):
arr2.append(torqueVector[i])
return ret, ord(state.value), arr1, arr2
def simxBreakForceSensor(clientID, forceSensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_BreakForceSensor(clientID, forceSensorHandle, operationMode)
def simxReadVisionSensor(clientID, sensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
detectionState = c_ubyte()
auxValues = pointer(c_float())
auxValuesCount = pointer(c_int())
ret = c_ReadVisionSensor(clientID, sensorHandle, byref(detectionState), byref(auxValues), byref(auxValuesCount), operationMode)
auxValues2 = []
if ret == 0:
s = 0
for i in range(auxValuesCount[0]):
auxValues2.append(auxValues[s:s+auxValuesCount[i+1]])
s += auxValuesCount[i+1]
#free C buffers
c_ReleaseBuffer(auxValues)
c_ReleaseBuffer(auxValuesCount)
return ret, bool(detectionState.value!='\0'), auxValues2
def simxGetObjectHandle(clientID, objectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = c_int()
return c_GetObjectHandle(clientID, objectName, byref(handle), operationMode), handle.value
def simxGetVisionSensorImage(clientID, sensorHandle, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
resolution = (c_int*2)()
c_image = pointer(c_byte())
bytesPerPixel = 3
if (options and 1) != 0:
bytesPerPixel = 1
ret = c_GetVisionSensorImage(clientID, sensorHandle, resolution, byref(c_image), options, operationMode)
reso = []
image = []
if (ret == 0):
image = [None]*resolution[0]*resolution[1]*bytesPerPixel
for i in range(resolution[0] * resolution[1] * bytesPerPixel):
image[i] = c_image[i]
for i in range(2):
reso.append(resolution[i])
return ret, reso, image
def simxSetVisionSensorImage(clientID, sensorHandle, image, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
size = len(image)
image_bytes = (c_byte*size)(*image)
return c_SetVisionSensorImage(clientID, sensorHandle, image_bytes, size, options, operationMode)
def simxGetVisionSensorDepthBuffer(clientID, sensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_buffer = pointer(c_float())
resolution = (c_int*2)()
ret = c_GetVisionSensorDepthBuffer(clientID, sensorHandle, resolution, byref(c_buffer), operationMode)
reso = []
buffer = []
if (ret == 0):
buffer = [None]*resolution[0]*resolution[1]
for i in range(resolution[0] * resolution[1]):
buffer[i] = c_buffer[i]
for i in range(2):
reso.append(resolution[i])
return ret, reso, buffer
def simxGetObjectChild(clientID, parentObjectHandle, childIndex, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
childObjectHandle = c_int()
return c_GetObjectChild(clientID, parentObjectHandle, childIndex, byref(childObjectHandle), operationMode), childObjectHandle.value
def simxGetObjectParent(clientID, childObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
parentObjectHandle = c_int()
return c_GetObjectParent(clientID, childObjectHandle, byref(parentObjectHandle), operationMode), parentObjectHandle.value
def simxReadProximitySensor(clientID, sensorHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
detectionState = c_ubyte()
detectedObjectHandle = c_int()
detectedPoint = (c_float*3)()
detectedSurfaceNormalVector = (c_float*3)()
ret = c_ReadProximitySensor(clientID, sensorHandle, byref(detectionState), detectedPoint, byref(detectedObjectHandle), detectedSurfaceNormalVector, operationMode)
arr1 = []
for i in range(3):
arr1.append(detectedPoint[i])
arr2 = []
for i in range(3):
arr2.append(detectedSurfaceNormalVector[i])
return ret, bool(detectionState.value!='\0'), arr1, detectedObjectHandle.value, arr2
def simxLoadModel(clientID, modelPathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
baseHandle = c_int()
return c_LoadModel(clientID, modelPathAndName, options, byref(baseHandle), operationMode), baseHandle.value
def simxLoadUI(clientID, uiPathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
count = c_int()
uiHandles = pointer(c_int())
ret = c_LoadUI(clientID, uiPathAndName, options, byref(count), byref(uiHandles), operationMode)
handles = []
if ret == 0:
for i in range(count.value):
handles.append(uiHandles[i])
#free C buffers
c_ReleaseBuffer(uiHandles)
return ret, handles
def simxLoadScene(clientID, scenePathAndName, options, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_LoadScene(clientID, scenePathAndName, options, operationMode)
def simxStartSimulation(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_StartSimulation(clientID, operationMode)
def simxPauseSimulation(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_PauseSimulation(clientID, operationMode)
def simxStopSimulation(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_StopSimulation(clientID, operationMode)
def simxGetUIHandle(clientID, uiName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = c_int()
return c_GetUIHandle(clientID, uiName, byref(handle), operationMode), handle.value
def simxGetUISlider(clientID, uiHandle, uiButtonID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
position = c_int()
return c_GetUISlider(clientID, uiHandle, uiButtonID, byref(position), operationMode), position.value
def simxSetUISlider(clientID, uiHandle, uiButtonID, position, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetUISlider(clientID, uiHandle, uiButtonID, position, operationMode)
def simxGetUIEventButton(clientID, uiHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
uiEventButtonID = c_int()
auxValues = (c_int*2)()
ret = c_GetUIEventButton(clientID, uiHandle, byref(uiEventButtonID), auxValues, operationMode)
arr = []
for i in range(2):
arr.append(auxValues[i])
return ret, uiEventButtonID.value, arr
def simxGetUIButtonProperty(clientID, uiHandle, uiButtonID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
prop = c_int()
return c_GetUIButtonProperty(clientID, uiHandle, uiButtonID, byref(prop), operationMode), prop.value
def simxSetUIButtonProperty(clientID, uiHandle, uiButtonID, prop, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetUIButtonProperty(clientID, uiHandle, uiButtonID, prop, operationMode)
def simxAddStatusbarMessage(clientID, message, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AddStatusbarMessage(clientID, message, operationMode)
def simxAuxiliaryConsoleOpen(clientID, title, maxLines, mode, position, size, textColor, backgroundColor, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
consoleHandle = c_int()
if position != None:
c_position = (c_int*2)(*position)
else:
c_position = None
if size != None:
c_size = (c_int*2)(*size)
else:
c_size = None
if textColor != None:
c_textColor = (c_float*3)(*textColor)
else:
c_textColor = None
if backgroundColor != None:
c_backgroundColor = (c_float*3)(*backgroundColor)
else:
c_backgroundColor = None
return c_AuxiliaryConsoleOpen(clientID, title, maxLines, mode, c_position, c_size, c_textColor, c_backgroundColor, byref(consoleHandle), operationMode), consoleHandle.value
def simxAuxiliaryConsoleClose(clientID, consoleHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsoleClose(clientID, consoleHandle, operationMode)
def simxAuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsolePrint(clientID, consoleHandle, txt, operationMode)
def simxAuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AuxiliaryConsoleShow(clientID, consoleHandle, showState, operationMode)
def simxGetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
eulerAngles = (c_float*3)()
ret = c_GetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, eulerAngles, operationMode)
arr = []
for i in range(3):
arr.append(eulerAngles[i])
return ret, arr
def simxGetObjectPosition(clientID, objectHandle, relativeToObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
position = (c_float*3)()
ret = c_GetObjectPosition(clientID, objectHandle, relativeToObjectHandle, position, operationMode)
arr = []
for i in range(3):
arr.append(position[i])
return ret, arr
def simxSetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, eulerAngles, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
angles = (c_float*3)(*eulerAngles)
return c_SetObjectOrientation(clientID, objectHandle, relativeToObjectHandle, angles, operationMode)
def simxSetObjectPosition(clientID, objectHandle, relativeToObjectHandle, position, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_position = (c_float*3)(*position)
return c_SetObjectPosition(clientID, objectHandle, relativeToObjectHandle, c_position, operationMode)
def simxSetObjectParent(clientID, objectHandle, parentObject, keepInPlace, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetObjectParent(clientID, objectHandle, parentObject, keepInPlace, operationMode)
def simxSetUIButtonLabel(clientID, uiHandle, uiButtonID, upStateLabel, downStateLabel, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetUIButtonLabel(clientID, uiHandle, uiButtonID, upStateLabel, downStateLabel, operationMode)
def simxGetLastErrors(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
errors =[]
errorCnt = c_int()
errorStrings = pointer(c_char())
ret = c_GetLastErrors(clientID, byref(errorCnt), byref(errorStrings), operationMode)
if ret == 0:
s = 0
for i in range(errorCnt.value):
a = bytearray()
while errorStrings[s] != '\0':
a.append(errorStrings[s])
s += 1
s += 1 #skip null
errors.append(str(a))
return ret, errors
def simxGetArrayParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValues = (c_float*3)()
ret = c_GetArrayParameter(clientID, paramIdentifier, paramValues, operationMode)
arr = []
for i in range(3):
arr.append(paramValues[i])
return ret, arr
def simxSetArrayParameter(clientID, paramIdentifier, paramValues, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_paramValues = (c_float*3)(*paramValues)
return c_SetArrayParameter(clientID, paramIdentifier, c_paramValues, operationMode)
def simxGetBooleanParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = c_ubyte()
return c_GetBooleanParameter(clientID, paramIdentifier, byref(paramValue), operationMode), bool(paramValue.value!='\0')
def simxSetBooleanParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetBooleanParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetIntegerParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = c_int()
return c_GetIntegerParameter(clientID, paramIdentifier, byref(paramValue), operationMode), paramValue.value
def simxSetIntegerParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetIntegerParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetFloatingParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = c_float()
return c_GetFloatingParameter(clientID, paramIdentifier, byref(paramValue), operationMode), paramValue.value
def simxSetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetFloatingParameter(clientID, paramIdentifier, paramValue, operationMode)
def simxGetStringParameter(clientID, paramIdentifier, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
paramValue = pointer(c_char())
ret = c_GetStringParameter(clientID, paramIdentifier, byref(paramValue), operationMode)
a = bytearray()
if ret == 0:
i = 0
while paramValue[i] != '\0':
a.append(paramValue[i])
i=i+1
return ret, str(a)
def simxGetCollisionHandle(clientID, collisionObjectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = c_int()
return c_GetCollisionHandle(clientID, collisionObjectName, byref(handle), operationMode), handle.value
def simxGetDistanceHandle(clientID, distanceObjectName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = c_int()
return c_GetDistanceHandle(clientID, distanceObjectName, byref(handle), operationMode), handle.value
def simxReadCollision(clientID, collisionObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
collisionState = c_ubyte()
return c_ReadCollision(clientID, collisionObjectHandle, byref(collisionState), operationMode), bool(collisionState.value!='\0')
def simxReadDistance(clientID, distanceObjectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
minimumDistance = c_float()
return c_ReadDistance(clientID, distanceObjectHandle, byref(minimumDistance), operationMode), minimumDistance.value
def simxRemoveObject(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_RemoveObject(clientID, objectHandle, operationMode)
def simxRemoveUI(clientID, uiHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_RemoveUI(clientID, uiHandle, operationMode)
def simxCloseScene(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_CloseScene(clientID, operationMode)
def simxGetObjects(clientID, objectType, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
objectCount = c_int()
objectHandles = pointer(c_int())
ret = c_GetObjects(clientID, objectType, byref(objectCount), byref(objectHandles), operationMode)
handles = []
if ret == 0:
for i in range(objectCount.value):
handles.append(objectHandles[i])
return ret, handles
def simxDisplayDialog(clientID, titleText, mainText, dialogType, initialText, titleColors, dialogColors, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
if titleColors != None:
c_titleColors = (c_float*6)(*titleColors)
else:
c_titleColors = None
if dialogColors != None:
c_dialogColors = (c_float*6)(*dialogColors)
else:
c_dialogColors = None
c_dialogHandle = c_int()
c_uiHandle = c_int()
return c_DisplayDialog(clientID, titleText, mainText, dialogType, initialText, c_titleColors, c_dialogColors, byref(c_dialogHandle), byref(c_uiHandle), operationMode), c_dialogHandle.value, c_uiHandle.value
def simxEndDialog(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_EndDialog(clientID, dialogHandle, operationMode)
def simxGetDialogInput(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
inputText = pointer(c_char())
ret = c_GetDialogInput(clientID, dialogHandle, byref(inputText), operationMode)
a = bytearray()
if ret == 0:
i = 0
while inputText[i] != '\0':
a.append(inputText[i])
i = i+1
return ret, str(a)
def simxGetDialogResult(clientID, dialogHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
result = c_int()
return c_GetDialogResult(clientID, dialogHandle, byref(result), operationMode), result.value
def simxCopyPasteObjects(clientID, objectHandles, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_objectHandles = (c_int*len(objectHandles))(*objectHandles)
newObjectCount = c_int()
newObjectHandles = pointer(c_int())
ret = c_CopyPasteObjects(clientID, c_objectHandles, len(objectHandles), byref(newObjectHandles), byref(newObjectCount), operationMode)
newobj = []
if ret == 0:
for i in range(newObjectCount.value):
newobj.append(newObjectHandles[i])
return ret, newobj
def simxGetObjectSelection(clientID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
objectCount = c_int()
objectHandles = pointer(c_int())
ret = c_GetObjectSelection(clientID, byref(objectHandles), byref(objectCount), operationMode)
newobj = []
if ret == 0:
for i in range(objectCount.value):
newobj.append(objectHandles[i])
return ret, newobj
def simxSetObjectSelection(clientID, objectHandles, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
c_objectHandles = (c_int*len(objectHandles))(*objectHandles)
return c_SetObjectSelection(clientID, c_objectHandles, len(objectHandles), operationMode)
def simxClearFloatSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_ClearFloatSignal(clientID, signalName, operationMode)
def simxClearIntegerSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_ClearIntegerSignal(clientID, signalName, operationMode)
def simxClearStringSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_ClearStringSignal(clientID, signalName, operationMode)
def simxGetFloatSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
signalValue = c_float()
return c_GetFloatSignal(clientID, signalName, byref(signalValue), operationMode), signalValue.value
def simxGetIntegerSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
signalValue = c_int()
return c_GetIntegerSignal(clientID, signalName, byref(signalValue), operationMode), signalValue.value
def simxGetStringSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
signalLength = c_int();
signalValue = pointer(c_ubyte())
ret = c_GetStringSignal(clientID, signalName, byref(signalValue), byref(signalLength), operationMode)
a = bytearray()
if ret == 0:
for i in range(signalLength.value):
a.append(signalValue[i])
return ret, str(a)
def simxGetAndClearStringSignal(clientID, signalName, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
signalLength = c_int();
signalValue = pointer(c_ubyte())
ret = c_GetAndClearStringSignal(clientID, signalName, byref(signalValue), byref(signalLength), operationMode)
a = bytearray()
if ret == 0:
for i in range(signalLength.value):
a.append(signalValue[i])
return ret, str(a)
def simxSetFloatSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetFloatSignal(clientID, signalName, signalValue, operationMode)
def simxSetIntegerSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetIntegerSignal(clientID, signalName, signalValue, operationMode)
def simxSetStringSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetStringSignal(clientID, signalName, signalValue, len(signalValue), operationMode)
def simxAppendStringSignal(clientID, signalName, signalValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_AppendStringSignal(clientID, signalName, signalValue, len(signalValue), operationMode)
def simxGetObjectFloatParameter(clientID, objectHandle, parameterID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
parameterValue = c_float()
return c_GetObjectFloatParameter(clientID, objectHandle, parameterID, byref(parameterValue), operationMode), parameterValue.value
def simxSetObjectFloatParameter(clientID, objectHandle, parameterID, parameterValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetObjectFloatParameter(clientID, objectHandle, parameterID, parameterValue, operationMode)
def simxGetObjectIntParameter(clientID, objectHandle, parameterID, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
parameterValue = c_int()
return c_GetObjectIntParameter(clientID, objectHandle, parameterID, byref(parameterValue), operationMode), parameterValue.value
def simxSetObjectIntParameter(clientID, objectHandle, parameterID, parameterValue, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetObjectIntParameter(clientID, objectHandle, parameterID, parameterValue, operationMode)
def simxGetModelProperty(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
prop = c_int()
return c_GetModelProperty(clientID, objectHandle, byref(prop), operationMode), prop.value
def simxSetModelProperty(clientID, objectHandle, prop, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SetModelProperty(clientID, objectHandle, prop, operationMode)
def simxStart(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_Start(connectionAddress, connectionPort, waitUntilConnected, doNotReconnectOnceDisconnected, timeOutInMs, commThreadCycleInMs)
def simxFinish(clientID):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_Finish(clientID)
def simxGetPingTime(clientID):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
pingTime = c_int()
return c_GetPingTime(clientID, byref(pingTime)), pingTime.value
def simxGetLastCmdTime(clientID):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_GetLastCmdTime(clientID)
def simxSynchronousTrigger(clientID):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_SynchronousTrigger(clientID)
def simxSynchronous(clientID, enable):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_Synchronous(clientID, enable)
def simxPauseCommunication(clientID, enable):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_PauseCommunication(clientID, enable)
def simxGetInMessageInfo(clientID, infoType):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
info = c_int()
return c_GetInMessageInfo(clientID, infoType, byref(info)), info.value
def simxGetOutMessageInfo(clientID, infoType):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
info = c_int()
return c_GetOutMessageInfo(clientID, infoType, byref(info)), info.value
def simxGetConnectionId(clientID):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_GetConnectionId(clientID)
def simxCreateBuffer(bufferSize):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_CreateBuffer(bufferSize)
def simxReleaseBuffer(buffer):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_ReleaseBuffer(buffer)
def simxTransferFile(clientID, filePathAndName, fileName_serverSide, timeOut, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_TransferFile(clientID, filePathAndName, fileName_serverSide, timeOut, operationMode)
def simxEraseFile(clientID, fileName_serverSide, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
return c_EraseFile(clientID, fileName_serverSide, operationMode)
def simxCreateDummy(clientID, size, color, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handle = c_int()
if color != None:
c_color = (c_ubyte*12)(*color)
else:
c_color = None
return c_CreateDummy(clientID, size, c_color, byref(handle), operationMode), handle.value
def simxQuery(clientID, signalName, signalValue, retSignalName, timeOutInMs):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
retSignalLength = c_int();
retSignalValue = pointer(c_ubyte())
ret = c_Query(clientID, signalName, signalValue, len(signalValue), retSignalName, byref(retSignalValue), byref(retSignalLength), timeOutInMs)
a = bytearray()
if ret == 0:
for i in range(retSignalLength.value):
a.append(retSignalValue[i])
return ret, str(a)
def simxGetObjectGroupData(clientID, objectType, dataType, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
handles =[]
intData =[]
floatData =[]
stringData =[]
handlesC = c_int()
handlesP = pointer(c_int())
intDataC = c_int()
intDataP = pointer(c_int())
floatDataC = c_int()
floatDataP = pointer(c_float())
stringDataC = c_int()
stringDataP = pointer(c_char())
ret = c_GetObjectGroupData(clientID, objectType, dataType, byref(handlesC), byref(handlesP), byref(intDataC), byref(intDataP), byref(floatDataC), byref(floatDataP), byref(stringDataC), byref(stringDataP), operationMode)
if ret == 0:
for i in range(handlesC.value):
handles.append(handlesP[i])
for i in range(intDataC.value):
intData.append(intDataP[i])
for i in range(floatDataC.value):
floatData.append(floatDataP[i])
s = 0
for i in range(stringDataC.value):
a = bytearray()
while stringDataP[s] != '\0':
a.append(stringDataP[s])
s += 1
s += 1 #skip null
stringData.append(str(a))
return ret, handles, intData, floatData, stringData
def simxGetObjectVelocity(clientID, objectHandle, operationMode):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
linearVel = (c_float*3)()
angularVel = (c_float*3)()
ret = c_ReadForceSensor(clientID, objectHandle, linearVel, angularVel, operationMode)
arr1 = []
for i in range(3):
arr1.append(linearVel[i])
arr2 = []
for i in range(3):
arr2.append(angularVel[i])
return ret, arr1, arr2
def simxPackInts(intList):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
s=''
for i in range(len(intList)):
s+=struct.pack('<i',intList[i])
return s
def simxUnpackInts(intsPackedInString):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
b=[]
for i in range(len(intsPackedInString)/4):
b.append(struct.unpack('<i',intsPackedInString[4*i:4*(i+1)])[0])
return b
def simxPackFloats(floatList):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
s=''
for i in range(len(floatList)):
s+=struct.pack('<f',floatList[i])
return s
def simxUnpackFloats(floatsPackedInString):
'''
Please have a look at the function description/documentation in the V-REP user manual
'''
b=[]
for i in range(len(floatsPackedInString)/4):
b.append(struct.unpack('<f',floatsPackedInString[4*i:4*(i+1)])[0])
return b
| gpl-3.0 |
probcomp/cgpm | src/uncorrelated/linear.py | 1 | 1827 | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2016 MIT Probabilistic Computing Project
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.stats import norm
from cgpm.uncorrelated.undirected import UnDirectedXyGpm
from cgpm.utils import mvnormal as multivariate_normal
class Linear(UnDirectedXyGpm):
def simulate_joint(self):
return self.rng.multivariate_normal(
[0,0], [[1,1-self.noise],[1-self.noise,1]])
def simulate_conditional(self, z):
mean = self.conditional_mean(z)
var = self.conditional_variance(z)
return self.rng.normal(loc=mean, scale=np.sqrt(var))
def logpdf_joint(self, x, y):
return multivariate_normal.logpdf(
np.array([x,y]), np.array([0,0]),
np.array([[1,1-self.noise],[1-self.noise,1]]))
def logpdf_marginal(self, z):
return norm.logpdf(z, scale=1)
def logpdf_conditional(self, w, z):
mean = self.conditional_mean(z)
var = self.conditional_variance(z)
return norm.logpdf(w, loc=mean, scale=np.sqrt(var))
def conditional_mean(self, z):
return (1-self.noise)*z
def conditional_variance(self, z):
return (1-(1-self.noise)**2)
def mutual_information(self):
cov = 1-self.noise
return -.5 * np.log(1-cov**2)
| apache-2.0 |
Permutatrix/servo | etc/ci/performance/download_buildbot_timings.py | 20 | 6836 | #!/usr/bin/env python3
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import argparse
import csv
from datetime import datetime, date
import json
from math import floor
import os
from urllib.request import urlopen, HTTPError
SCRIPT_PATH = os.path.split(__file__)[0]
def main():
default_output_dir = os.path.join(SCRIPT_PATH, 'output')
default_cache_dir = os.path.join(SCRIPT_PATH, '.cache')
parser = argparse.ArgumentParser(
description="Download buildbot metadata"
)
parser.add_argument("--index-url",
type=str,
default='http://build.servo.org/json',
help="the URL to get the JSON index data index from. "
"Default: http://build.servo.org/json")
parser.add_argument("--build-url",
type=str,
default='http://build.servo.org/json/builders/{}/builds/{}',
help="the URL to get the JSON build data from. "
"Default: http://build.servo.org/json/builders/{}/builds/{}")
parser.add_argument("--cache-dir",
type=str,
default=default_cache_dir,
help="the directory to cache JSON files in. Default: " + default_cache_dir)
parser.add_argument("--cache-name",
type=str,
default='build-{}-{}.json',
help="the filename to cache JSON data in. "
"Default: build-{}-{}.json")
parser.add_argument("--output-dir",
type=str,
default=default_output_dir,
help="the directory to save the CSV data to. Default: " + default_output_dir)
parser.add_argument("--output-name",
type=str,
default='builds-{}-{}.csv',
help="the filename to save the CSV data to. "
"Default: builds-{}-{}.csv")
parser.add_argument("--verbose", "-v",
action='store_true',
help="print every HTTP request")
args = parser.parse_args()
os.makedirs(args.cache_dir, exist_ok=True)
os.makedirs(args.output_dir, exist_ok=True)
# Get the index to find out the list of builder names
# Note: this isn't cached
if args.verbose:
print("Downloading index {}.".format(args.index_url))
with urlopen(args.index_url) as response:
index = json.loads(response.read().decode('utf-8'))
builds = []
for builder in sorted(index["builders"]):
# The most recent build is at offset -1
# Fetch it to find out the build number
# Note: this isn't cached
recent_build_url = args.build_url.format(builder, -1)
if args.verbose:
print("Downloading recent build {}.".format(recent_build_url))
with urlopen(recent_build_url) as response:
recent_build = json.loads(response.read().decode('utf-8'))
recent_build_number = recent_build["number"]
# Download each build, and convert to CSV
for build_number in range(0, recent_build_number):
# Rather annoyingly, we can't just use the Python http cache,
# because it doesn't cache 404 responses. So we roll our own.
cache_json_name = args.cache_name.format(builder, build_number)
cache_json = os.path.join(args.cache_dir, cache_json_name)
if os.path.isfile(cache_json):
with open(cache_json) as f:
build = json.load(f)
else:
# Get the build data
build_url = args.build_url.format(builder, build_number)
if args.verbose:
print("Downloading build {}.".format(build_url))
try:
with urlopen(build_url) as response:
build = json.loads(response.read().decode('utf-8'))
except HTTPError as e:
if e.code == 404:
build = {}
else:
raise
# Don't cache current builds.
if build.get('currentStep'):
continue
with open(cache_json, 'w+') as f:
json.dump(build, f)
if 'times' in build:
builds.append(build)
years = {}
for build in builds:
build_date = date.fromtimestamp(build['times'][0])
years.setdefault(build_date.year, {}).setdefault(build_date.month, []).append(build)
for year, months in years.items():
for month, builds in months.items():
output_name = args.output_name.format(year, month)
output = os.path.join(args.output_dir, output_name)
# Create the CSV file.
if args.verbose:
print('Creating file {}.'.format(output))
with open(output, 'w+') as output_file:
output_csv = csv.writer(output_file)
# The CSV column names
output_csv.writerow([
'builder',
'buildNumber',
'buildTimestamp',
'stepName',
'stepText',
'stepNumber',
'stepStart',
'stepFinish'
])
for build in builds:
builder = build["builderName"]
build_number = build["number"]
build_timestamp = datetime.fromtimestamp(build["times"][0]).replace(microsecond=0)
# Write out the timing data for each step
for step in build["steps"]:
if step["isFinished"]:
step_name = step["name"]
step_text = ' '.join(step["text"])
step_number = step["step_number"]
step_start = floor(step["times"][0])
step_finish = floor(step["times"][1])
output_csv.writerow([
builder,
build_number,
build_timestamp,
step_name,
step_text,
step_number,
step_start,
step_finish
])
if __name__ == "__main__":
main()
| mpl-2.0 |
strauzen/haxe-sublime-bundle | tests/test.py | 2 | 2341 | import sublime, os, time
from functools import partial
from unittest import TestCase
version = sublime.version()
if version >= "3000":
from UnitTesting.unittesting import DeferrableTestCase
else:
from unittesting import DeferrableTestCase
# path to the Haxe package folder
root_path = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
class TestHxml(DeferrableTestCase):
def setUp(self):
self.window = sublime.active_window()
if version >= "3000":
self.ori_project_data = self.window.project_data()
def tearDown(self):
if version >= "3000":
# restore the original project data
self.window.set_project_data(self.ori_project_data)
# show the test result
self.window.open_file(os.path.join(root_path, "tests", "result.txt"))
def set_project_folder(self, path):
folders = [{
"follow_symlinks": True,
"path": path
}]
project_data = self.window.project_data()
if project_data:
project_data["folders"] = folders
else:
project_data = {
"folders": folders
}
self.window.set_project_data(project_data)
def assertTrueWait(self, expect, timeout_sec = 5):
t = time.clock()
while time.clock() - t < timeout_sec and not expect():
yield
self.assertTrue(expect())
def test_hxml_simple(self):
hxml_simple_path = os.path.join(root_path, "tests", "projects", "hxml_simple")
self.set_project_folder(hxml_simple_path)
view = self.window.open_file(os.path.join(hxml_simple_path, "Main.hx"))
# syntax should be Haxe
self.assertTrue("Haxe" in view.settings().get('syntax'))
output_path = os.path.join(hxml_simple_path, "Main.n")
if os.path.exists(output_path):
os.remove(output_path)
# test build (Command+B and Ctrl+Enter)
expect = partial(os.path.exists, output_path)
for cmd in ["build", "haxe_run_build", "haxe_save_all_and_build"]:
self.window.run_command(cmd)
for _ in self.assertTrueWait(expect):
yield
os.remove(output_path)
# clean up
self.window.focus_view(view)
self.window.run_command("close_file") | apache-2.0 |
TensorVision/MediSeg | AP3/basic_local_classifier.py | 1 | 14922 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""A basic classifier which uses only local features."""
import os.path
from PIL import Image
import scipy.misc
import scipy.ndimage
import logging
import sys
import time
import numpy as np
import json
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
level=logging.INFO,
stream=sys.stdout)
from keras.models import Sequential
from keras.layers import Dense, Dropout
import keras.optimizers
import sklearn
from keras.models import model_from_yaml
from keras.preprocessing.image import img_to_array
from skimage.segmentation import quickshift, slic
from tensorvision.utils import load_segmentation_mask
import sys
sys.path.append(
os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from utils import get_file_list
import analyze
from seg_utils import get_image, get_class_weight
def get_features(x, y, image, model_nr=2):
"""Get features at position (x, y) from image."""
height, width, _ = image.shape
p = get_pos_colors(image, x, y)
if model_nr in [1, "1.1"]:
return p
elif model_nr in [2, 3]:
return (p[0], p[1], p[2], x, y)
elif model_nr in [4]:
left = get_pos_colors(image, x - 1, y)
return (p[0], p[1], p[2], left[0], left[1], left[2], x, y)
elif model_nr in [5]:
left = get_pos_colors(image, x - 1, y)
right = get_pos_colors(image, x + 1, y)
top = get_pos_colors(image, x, y + 1)
bottom = get_pos_colors(image, x, y - 1)
return (p[0], p[1], p[2],
left[0], left[1], left[2],
right[0], right[1], right[2],
top[0], top[1], top[2],
bottom[0], bottom[1], bottom[2])
else:
print("model_nr '%s' unknown" % str(model_nr))
sys.exit(-1)
def get_pos_colors(image, x, y):
"""Get the color at a position or 0-vector, if the position is invalid."""
if x > 0 and y > 0 and len(image) > y and len(image[0]) > x:
return (image[y][x][0], image[y][x][1], image[y][x][2])
else:
return (0, 0, 0)
def inputs(hypes, _, phase, data_dir):
"""
Get data.
Parameters
----------
hypes : dict
_ : ignore this
phase : {'train', 'val'}
data_dir : str
Returns
-------
tuple
(xs, ys), where xs and ys are lists of the same length.
xs are paths to the input images and ys are paths to the expected
output
"""
x_files, y_files = get_file_list(hypes, 'train')
x_files, y_files = sklearn.utils.shuffle(x_files,
y_files,
random_state=0)
xs, ys = [], []
for x, y in zip(x_files, y_files):
logging.info("Read '%s' for data...", x)
image = get_image(x, 'RGB')
label = load_segmentation_mask(hypes, y)
im = Image.open(x, 'r')
width, height = im.size
for x in range(width):
for y in range(height):
image_val = get_features(x, y, image, hypes['model_nr'])
label_val = label[y][x]
xs.append(image_val)
ys.append(label_val)
return xs, np.array(ys, dtype=int)
def shuffle_in_unison_inplace(a, b):
"""Shuffle both, a and b, the same way."""
assert len(a) == len(b)
p = np.random.permutation(len(a))
return a[p], b[p]
def generate_training_data(hypes, x_files, y_files):
"""
Generate training data.
Parameters
----------
hypes : dict
Hyperparameters
x_files : list
Paths to raw data files
y_files : list
Paths to segmentation masks
Yields
------
tuple
(xs, ys) - training batch of feature list xs and label list ys
"""
x_files, y_files = sklearn.utils.shuffle(x_files,
y_files,
random_state=0)
i = 0
xs, ys = get_traindata_single_file(hypes, x_files[i], y_files[i])
i = (i + 1) % len(x_files)
while True:
while len(xs) < hypes['solver']['batch_size']:
xs_tmp, ys_tmp = get_traindata_single_file(hypes,
x_files[i],
y_files[i])
i = (i + 1) % len(x_files)
xs = np.concatenate((xs, xs_tmp), axis=0)
ys = np.concatenate((ys, ys_tmp), axis=0)
if hypes['training']['make_equal']:
xs, ys = reduce_data_equal(xs, ys)
# xs, ys = shuffle_in_unison_inplace(xs, ys)
# print("sum(ys)=%i / %i" % (np.sum(ys), len(ys) - np.sum(ys)))
# print("sum(ys[s])=%i" % np.sum(ys[:hypes['solver']['batch_size']]))
yield (xs[:hypes['solver']['batch_size']],
ys[:hypes['solver']['batch_size']])
xs = xs[hypes['solver']['batch_size']:]
ys = ys[hypes['solver']['batch_size']:]
def get_traindata_single_file(hypes, x, y):
"""Get trainingdata for a single file x with segmentation file y."""
xs, ys = [], []
logging.info("Read '%s' for data...", x)
image = get_image(x, 'RGB')
label = load_segmentation_mask(hypes, y)
im = Image.open(x, 'r')
width, height = im.size
for x in range(width):
for y in range(height):
image_val = get_features(x, y, image, hypes['model_nr'])
label_val = label[y][x]
xs.append(image_val)
ys.append(label_val)
return np.array(xs), np.array(ys, dtype=int)
def get_segmentation(hypes, image_path, model):
"""
Get a segmentation.
Path
----
hypes : dict
Hyperparameters (model specific information)
image_path : str
Path to a file which gets segmented.
model : object
Returns
-------
Numpy array of the same width and height as input.
"""
image = get_image(image_path, 'RGB')
# Preprocess
# import skimage.exposure
# image = skimage.exposure.equalize_hist(image)
# image = Image.fromarray(image, 'RGB')
# converter = PIL.ImageEnhance.Color(image)
# image = converter.enhance(2)
# image = img_to_array(image)
# scipy.misc.imshow(image)
im = Image.open(image_path, 'r')
width, height = im.size
segmentation = np.zeros((height, width), dtype=int)
x_test = []
for x in range(width):
for y in range(height):
x_test.append(get_features(x, y, image, hypes['model_nr']))
classes = model.predict_classes(np.array(x_test, dtype=int),
batch_size=1024)
i = 0
for x in range(width):
for y in range(height):
segmentation[y][x] = classes[i]
i += 1
if hypes['model_nr'] == [3, "1.1"]:
segmentation = morphological_operations(segmentation)
if hypes['segmenter']['invert']:
# Set all labels which are 1 to 0 and vice versa.
segmentation = np.invert(segmentation.astype(bool)).astype(int)
# segmentation = superpixel_majority_vote(image, segmentation)
return segmentation
def superpixel_majority_vote(image, segmentation):
"""Mark superpixels by majority vote."""
image = image.astype(float)
segments = quickshift(image, ratio=0.5, max_dist=10, sigma=1.0)
# segments = slic(image, n_segments=50, compactness=20)
# watershed -
("http://scikit-image.org/docs/dev/auto_examples/segmentation/"
"plot_marked_watershed.html")
# http://scikit-image.org/docs/dev/auto_examples/
height, width = segments.shape
segment_count = {}
for x in range(width):
for y in range(height):
s = segments[y][x]
if s not in segment_count:
segment_count[s] = {0: 0, 1: 0} # binary
segment_count[s][segmentation[y][x]] += 1
for x in range(width):
for y in range(height):
s = segments[y][x]
class_ = int(segment_count[s][1] > segment_count[s][0])
segmentation[y][x] = class_
return segmentation
def morphological_operations(segmentation):
"""Apply morphological operations to improve the segmentation."""
size = 3
segmentation = scipy.ndimage.morphology.binary_erosion(segmentation,
iterations=size)
segmentation = scipy.ndimage.morphology.binary_dilation(segmentation,
iterations=size)
return segmentation
def main(hypes_file, data_dir, override):
"""Orchestrate."""
with open(hypes_file, 'r') as f:
hypes = json.load(f)
if 'training' not in hypes:
hypes['training'] = {}
if 'make_equal' not in hypes['training']:
hypes['training']['make_equal'] = False
base = os.path.dirname(hypes_file)
model_file_path = os.path.join(base, '%s.yaml' % hypes['model']['name'])
model_file_path = os.path.abspath(model_file_path)
weights_file_path = os.path.join(base, '%s.hdf5' % hypes['model']['name'])
weights_file_path = os.path.abspath(weights_file_path)
if not os.path.isfile(model_file_path) or override:
if not os.path.isfile(model_file_path):
logging.info("Did not find '%s'. Start training...",
model_file_path)
else:
logging.info("Override '%s'. Start training...",
model_file_path)
# Get data
# x_files, y_files = inputs(hypes, None, 'train', data_dir)
x_files, y_files = get_file_list(hypes, 'train')
x_files, y_files = sklearn.utils.shuffle(x_files,
y_files,
random_state=0)
x_train, y_train = get_traindata_single_file(hypes,
x_files[0],
y_files[0])
nb_features = x_train[0].shape[0]
logging.info("Input gets %i features", nb_features)
# Make model
model = Sequential()
model.add(Dense(64,
input_dim=nb_features,
init='uniform',
activation='sigmoid'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy',
optimizer='adagrad', # rmsprop
metrics=['accuracy'])
generator = generate_training_data(hypes, x_files, y_files)
t0 = time.time()
sep = hypes['solver']['samples_per_epoch']
if True:
class_weight = get_class_weight(hypes)
logging.info("class_weights = %s", class_weight)
model.fit_generator(generator,
samples_per_epoch=sep,
nb_epoch=hypes['solver']['epochs'],
verbose=1,
validation_data=(x_train, y_train),
class_weight=class_weight)
else:
logging.info("Fit with .fit")
x_train, y_train = inputs(hypes, None, 'train', data_dir)
model.fit(x_train, y_train, batch_size=128, nb_epoch=1)
t1 = time.time()
print("Training Time: %0.4f" % (t1 - t0))
# save as YAML
yaml_string = model.to_yaml()
with open(model_file_path, 'w') as f:
f.write(yaml_string)
model.save_weights(weights_file_path)
# Evaluate
data = get_file_list(hypes, 'test')
logging.info("Start segmentation")
analyze.evaluate(hypes,
data,
data_dir,
model,
elements=[0, 1],
get_segmentation=get_segmentation)
else:
logging.info("## Found '%s'.", model_file_path)
with open(model_file_path) as f:
yaml_string = f.read()
model = model_from_yaml(yaml_string)
model.load_weights(weights_file_path)
model.compile(optimizer='adagrad', loss='binary_crossentropy')
data = get_file_list(hypes, 'test')
analyze.evaluate(hypes,
data,
data_dir,
model,
elements=[0, 1],
get_segmentation=get_segmentation)
def reduce_data_equal(x_train, y_train, max_per_class=None):
"""
Reduce the amount of data to get the same number per class.
This script assumes that y_train is a list of binary labels {0, 1}.
"""
n = min(sum(y_train), abs(len(y_train) - sum(y_train)))
if max_per_class is not None:
n = min(n, max_per_class)
true_count, false_count = 0, 0
x_train_n, y_train_n = [], []
x_train = list(x_train)
y_train = list(y_train)
for x, y in zip(x_train, y_train):
if y == 1 and true_count < n:
x_train_n.append(x)
y_train_n.append(y)
true_count += 1
elif y == 0 and false_count < n:
x_train_n.append(x)
y_train_n.append(y)
false_count += 1
x_train = np.array(x_train_n)
y_train = np.array(y_train_n)
return x_train, y_train
def is_valid_file(parser, arg):
"""
Check if arg is a valid file that already exists on the file system.
Parameters
----------
parser : argparse object
arg : str
Returns
-------
arg
"""
arg = os.path.abspath(arg)
if not os.path.exists(arg):
parser.error("The file %s does not exist!" % arg)
else:
return arg
def get_parser():
"""Get parser object for basic local classifier."""
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description=__doc__,
formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument("--out",
dest="data",
help=("output directory"),
required=True)
parser.add_argument("--hypes",
dest="hypes_file",
help=("Configuration file in JSON format"),
type=lambda x: is_valid_file(parser, x),
metavar="FILE",
required=True)
parser.add_argument("--override",
action="store_true", dest="override", default=False,
help="override old model, if it exists")
return parser
if __name__ == "__main__":
args = get_parser().parse_args()
main(args.hypes_file, args.data, args.override)
| mit |
anushbmx/kitsune | kitsune/questions/management/commands/update_weekly_votes.py | 1 | 1135 | from datetime import datetime, timedelta
from django.core.management.base import BaseCommand
from kitsune.questions.models import Question, QuestionVote
from kitsune.questions.tasks import update_question_vote_chunk
from kitsune.sumo.utils import chunked
class Command(BaseCommand):
help = "Keep the num_votes_past_week value accurate."
def handle(self, **options):
# Get all questions (id) with a vote in the last week.
recent = datetime.now() - timedelta(days=7)
q = QuestionVote.objects.filter(created__gte=recent)
q = q.values_list('question_id', flat=True).order_by('question')
q = q.distinct()
q_with_recent_votes = list(q)
# Get all questions with num_votes_past_week > 0
q = Question.objects.filter(num_votes_past_week__gt=0)
q = q.values_list('id', flat=True)
q_with_nonzero_votes = list(q)
# Union.
qs_to_update = list(set(q_with_recent_votes + q_with_nonzero_votes))
# Chunk them for tasks.
for chunk in chunked(qs_to_update, 50):
update_question_vote_chunk.apply_async(args=[chunk])
| bsd-3-clause |
digistam/recon-ng | libs/mechanize/_msiecookiejar.py | 134 | 14694 | """Microsoft Internet Explorer cookie loading on Windows.
Copyright 2002-2003 Johnny Lee <typo_pl@hotmail.com> (MSIE Perl code)
Copyright 2002-2006 John J Lee <jjl@pobox.com> (The Python port)
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD or ZPL 2.1 licenses (see the file
COPYING.txt included with the distribution).
"""
# XXX names and comments are not great here
import os, re, time, struct, logging
if os.name == "nt":
import _winreg
from _clientcookie import FileCookieJar, CookieJar, Cookie, \
MISSING_FILENAME_TEXT, LoadError
debug = logging.getLogger("mechanize").debug
def regload(path, leaf):
key = _winreg.OpenKey(_winreg.HKEY_CURRENT_USER, path, 0,
_winreg.KEY_ALL_ACCESS)
try:
value = _winreg.QueryValueEx(key, leaf)[0]
except WindowsError:
value = None
return value
WIN32_EPOCH = 0x019db1ded53e8000L # 1970 Jan 01 00:00:00 in Win32 FILETIME
def epoch_time_offset_from_win32_filetime(filetime):
"""Convert from win32 filetime to seconds-since-epoch value.
MSIE stores create and expire times as Win32 FILETIME, which is 64
bits of 100 nanosecond intervals since Jan 01 1601.
mechanize expects time in 32-bit value expressed in seconds since the
epoch (Jan 01 1970).
"""
if filetime < WIN32_EPOCH:
raise ValueError("filetime (%d) is before epoch (%d)" %
(filetime, WIN32_EPOCH))
return divmod((filetime - WIN32_EPOCH), 10000000L)[0]
def binary_to_char(c): return "%02X" % ord(c)
def binary_to_str(d): return "".join(map(binary_to_char, list(d)))
class MSIEBase:
magic_re = re.compile(r"Client UrlCache MMF Ver \d\.\d.*")
padding = "\x0d\xf0\xad\x0b"
msie_domain_re = re.compile(r"^([^/]+)(/.*)$")
cookie_re = re.compile("Cookie\:.+\@([\x21-\xFF]+).*?"
"(.+\@[\x21-\xFF]+\.txt)")
# path under HKEY_CURRENT_USER from which to get location of index.dat
reg_path = r"software\microsoft\windows" \
r"\currentversion\explorer\shell folders"
reg_key = "Cookies"
def __init__(self):
self._delayload_domains = {}
def _delayload_domain(self, domain):
# if necessary, lazily load cookies for this domain
delayload_info = self._delayload_domains.get(domain)
if delayload_info is not None:
cookie_file, ignore_discard, ignore_expires = delayload_info
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s", cookie_file)
else:
del self._delayload_domains[domain]
def _load_cookies_from_file(self, filename):
debug("Loading MSIE cookies file: %s", filename)
cookies = []
cookies_fh = open(filename)
try:
while 1:
key = cookies_fh.readline()
if key == "": break
rl = cookies_fh.readline
def getlong(rl=rl): return long(rl().rstrip())
def getstr(rl=rl): return rl().rstrip()
key = key.rstrip()
value = getstr()
domain_path = getstr()
flags = getlong() # 0x2000 bit is for secure I think
lo_expire = getlong()
hi_expire = getlong()
lo_create = getlong()
hi_create = getlong()
sep = getstr()
if "" in (key, value, domain_path, flags, hi_expire, lo_expire,
hi_create, lo_create, sep) or (sep != "*"):
break
m = self.msie_domain_re.search(domain_path)
if m:
domain = m.group(1)
path = m.group(2)
cookies.append({"KEY": key, "VALUE": value,
"DOMAIN": domain, "PATH": path,
"FLAGS": flags, "HIXP": hi_expire,
"LOXP": lo_expire, "HICREATE": hi_create,
"LOCREATE": lo_create})
finally:
cookies_fh.close()
return cookies
def load_cookie_data(self, filename,
ignore_discard=False, ignore_expires=False):
"""Load cookies from file containing actual cookie data.
Old cookies are kept unless overwritten by newly loaded ones.
You should not call this method if the delayload attribute is set.
I think each of these files contain all cookies for one user, domain,
and path.
filename: file containing cookies -- usually found in a file like
C:\WINNT\Profiles\joe\Cookies\joe@blah[1].txt
"""
now = int(time.time())
cookie_data = self._load_cookies_from_file(filename)
for cookie in cookie_data:
flags = cookie["FLAGS"]
secure = ((flags & 0x2000) != 0)
filetime = (cookie["HIXP"] << 32) + cookie["LOXP"]
expires = epoch_time_offset_from_win32_filetime(filetime)
if expires < now:
discard = True
else:
discard = False
domain = cookie["DOMAIN"]
initial_dot = domain.startswith(".")
if initial_dot:
domain_specified = True
else:
# MSIE 5 does not record whether the domain cookie-attribute
# was specified.
# Assuming it wasn't is conservative, because with strict
# domain matching this will match less frequently; with regular
# Netscape tail-matching, this will match at exactly the same
# times that domain_specified = True would. It also means we
# don't have to prepend a dot to achieve consistency with our
# own & Mozilla's domain-munging scheme.
domain_specified = False
# assume path_specified is false
# XXX is there other stuff in here? -- e.g. comment, commentURL?
c = Cookie(0,
cookie["KEY"], cookie["VALUE"],
None, False,
domain, domain_specified, initial_dot,
cookie["PATH"], False,
secure,
expires,
discard,
None,
None,
{"flags": flags})
if not ignore_discard and c.discard:
continue
if not ignore_expires and c.is_expired(now):
continue
CookieJar.set_cookie(self, c)
def load_from_registry(self, ignore_discard=False, ignore_expires=False,
username=None):
"""
username: only required on win9x
"""
cookies_dir = regload(self.reg_path, self.reg_key)
filename = os.path.normpath(os.path.join(cookies_dir, "INDEX.DAT"))
self.load(filename, ignore_discard, ignore_expires, username)
def _really_load(self, index, filename, ignore_discard, ignore_expires,
username):
now = int(time.time())
if username is None:
username = os.environ['USERNAME'].lower()
cookie_dir = os.path.dirname(filename)
data = index.read(256)
if len(data) != 256:
raise LoadError("%s file is too short" % filename)
# Cookies' index.dat file starts with 32 bytes of signature
# followed by an offset to the first record, stored as a little-
# endian DWORD.
sig, size, data = data[:32], data[32:36], data[36:]
size = struct.unpack("<L", size)[0]
# check that sig is valid
if not self.magic_re.match(sig) or size != 0x4000:
raise LoadError("%s ['%s' %s] does not seem to contain cookies" %
(str(filename), sig, size))
# skip to start of first record
index.seek(size, 0)
sector = 128 # size of sector in bytes
while 1:
data = ""
# Cookies are usually in two contiguous sectors, so read in two
# sectors and adjust if not a Cookie.
to_read = 2 * sector
d = index.read(to_read)
if len(d) != to_read:
break
data = data + d
# Each record starts with a 4-byte signature and a count
# (little-endian DWORD) of sectors for the record.
sig, size, data = data[:4], data[4:8], data[8:]
size = struct.unpack("<L", size)[0]
to_read = (size - 2) * sector
## from urllib import quote
## print "data", quote(data)
## print "sig", quote(sig)
## print "size in sectors", size
## print "size in bytes", size*sector
## print "size in units of 16 bytes", (size*sector) / 16
## print "size to read in bytes", to_read
## print
if sig != "URL ":
assert sig in ("HASH", "LEAK", \
self.padding, "\x00\x00\x00\x00"), \
"unrecognized MSIE index.dat record: %s" % \
binary_to_str(sig)
if sig == "\x00\x00\x00\x00":
# assume we've got all the cookies, and stop
break
if sig == self.padding:
continue
# skip the rest of this record
assert to_read >= 0
if size != 2:
assert to_read != 0
index.seek(to_read, 1)
continue
# read in rest of record if necessary
if size > 2:
more_data = index.read(to_read)
if len(more_data) != to_read: break
data = data + more_data
cookie_re = ("Cookie\:%s\@([\x21-\xFF]+).*?" % username +
"(%s\@[\x21-\xFF]+\.txt)" % username)
m = re.search(cookie_re, data, re.I)
if m:
cookie_file = os.path.join(cookie_dir, m.group(2))
if not self.delayload:
try:
self.load_cookie_data(cookie_file,
ignore_discard, ignore_expires)
except (LoadError, IOError):
debug("error reading cookie file, skipping: %s",
cookie_file)
else:
domain = m.group(1)
i = domain.find("/")
if i != -1:
domain = domain[:i]
self._delayload_domains[domain] = (
cookie_file, ignore_discard, ignore_expires)
class MSIECookieJar(MSIEBase, FileCookieJar):
"""FileCookieJar that reads from the Windows MSIE cookies database.
MSIECookieJar can read the cookie files of Microsoft Internet Explorer
(MSIE) for Windows version 5 on Windows NT and version 6 on Windows XP and
Windows 98. Other configurations may also work, but are untested. Saving
cookies in MSIE format is NOT supported. If you save cookies, they'll be
in the usual Set-Cookie3 format, which you can read back in using an
instance of the plain old CookieJar class. Don't save using the same
filename that you loaded cookies from, because you may succeed in
clobbering your MSIE cookies index file!
You should be able to have LWP share Internet Explorer's cookies like
this (note you need to supply a username to load_from_registry if you're on
Windows 9x or Windows ME):
cj = MSIECookieJar(delayload=1)
# find cookies index file in registry and load cookies from it
cj.load_from_registry()
opener = mechanize.build_opener(mechanize.HTTPCookieProcessor(cj))
response = opener.open("http://example.com/")
Iterating over a delayloaded MSIECookieJar instance will not cause any
cookies to be read from disk. To force reading of all cookies from disk,
call read_all_cookies. Note that the following methods iterate over self:
clear_temporary_cookies, clear_expired_cookies, __len__, __repr__, __str__
and as_string.
Additional methods:
load_from_registry(ignore_discard=False, ignore_expires=False,
username=None)
load_cookie_data(filename, ignore_discard=False, ignore_expires=False)
read_all_cookies()
"""
def __init__(self, filename=None, delayload=False, policy=None):
MSIEBase.__init__(self)
FileCookieJar.__init__(self, filename, delayload, policy)
def set_cookie(self, cookie):
if self.delayload:
self._delayload_domain(cookie.domain)
CookieJar.set_cookie(self, cookie)
def _cookies_for_request(self, request):
"""Return a list of cookies to be returned to server."""
domains = self._cookies.copy()
domains.update(self._delayload_domains)
domains = domains.keys()
cookies = []
for domain in domains:
cookies.extend(self._cookies_for_domain(domain, request))
return cookies
def _cookies_for_domain(self, domain, request):
if not self._policy.domain_return_ok(domain, request):
return []
debug("Checking %s for cookies to return", domain)
if self.delayload:
self._delayload_domain(domain)
return CookieJar._cookies_for_domain(self, domain, request)
def read_all_cookies(self):
"""Eagerly read in all cookies."""
if self.delayload:
for domain in self._delayload_domains.keys():
self._delayload_domain(domain)
def load(self, filename, ignore_discard=False, ignore_expires=False,
username=None):
"""Load cookies from an MSIE 'index.dat' cookies index file.
filename: full path to cookie index file
username: only required on win9x
"""
if filename is None:
if self.filename is not None: filename = self.filename
else: raise ValueError(MISSING_FILENAME_TEXT)
index = open(filename, "rb")
try:
self._really_load(index, filename, ignore_discard, ignore_expires,
username)
finally:
index.close()
| gpl-3.0 |
TheCapleGuy/GoogleTest | test/gtest_xml_outfiles_test.py | 2526 | 5340 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Unit test for the gtest_xml_output module."""
__author__ = "keith.ray@gmail.com (Keith Ray)"
import os
from xml.dom import minidom, Node
import gtest_test_utils
import gtest_xml_test_utils
GTEST_OUTPUT_SUBDIR = "xml_outfiles"
GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_"
GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_"
EXPECTED_XML_1 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyOne" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyOne" SetUpProp="1" TestSomeProperty="1" TearDownProp="1" />
</testsuite>
</testsuites>
"""
EXPECTED_XML_2 = """<?xml version="1.0" encoding="UTF-8"?>
<testsuites tests="1" failures="0" disabled="0" errors="0" time="*" timestamp="*" name="AllTests">
<testsuite name="PropertyTwo" tests="1" failures="0" disabled="0" errors="0" time="*">
<testcase name="TestSomeProperties" status="run" time="*" classname="PropertyTwo" SetUpProp="2" TestSomeProperty="2" TearDownProp="2" />
</testsuite>
</testsuites>
"""
class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase):
"""Unit test for Google Test's XML output functionality."""
def setUp(self):
# We want the trailing '/' that the last "" provides in os.path.join, for
# telling Google Test to create an output directory instead of a single file
# for xml output.
self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(),
GTEST_OUTPUT_SUBDIR, "")
self.DeleteFilesAndDir()
def tearDown(self):
self.DeleteFilesAndDir()
def DeleteFilesAndDir(self):
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml"))
except os.error:
pass
try:
os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml"))
except os.error:
pass
try:
os.rmdir(self.output_dir_)
except os.error:
pass
def testOutfile1(self):
self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1)
def testOutfile2(self):
self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2)
def _TestOutFile(self, test_name, expected_xml):
gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name)
command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_]
p = gtest_test_utils.Subprocess(command,
working_dir=gtest_test_utils.GetTempDir())
self.assert_(p.exited)
self.assertEquals(0, p.exit_code)
# TODO(wan@google.com): libtool causes the built test binary to be
# named lt-gtest_xml_outfiles_test_ instead of
# gtest_xml_outfiles_test_. To account for this possibillity, we
# allow both names in the following code. We should remove this
# hack when Chandler Carruth's libtool replacement tool is ready.
output_file_name1 = test_name + ".xml"
output_file1 = os.path.join(self.output_dir_, output_file_name1)
output_file_name2 = 'lt-' + output_file_name1
output_file2 = os.path.join(self.output_dir_, output_file_name2)
self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2),
output_file1)
expected = minidom.parseString(expected_xml)
if os.path.isfile(output_file1):
actual = minidom.parse(output_file1)
else:
actual = minidom.parse(output_file2)
self.NormalizeXml(actual.documentElement)
self.AssertEquivalentNodes(expected.documentElement,
actual.documentElement)
expected.unlink()
actual.unlink()
if __name__ == "__main__":
os.environ["GTEST_STACK_TRACE_DEPTH"] = "0"
gtest_test_utils.Main()
| bsd-3-clause |
Bysmyyr/chromium-crosswalk | tools/telemetry/telemetry/internal/platform/tracing_agent/chrome_tracing_agent_unittest.py | 2 | 10817 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import platform
import stat
import unittest
from telemetry import decorators
from telemetry.internal.platform.tracing_agent import chrome_tracing_agent
from telemetry.internal.platform.tracing_agent import (
chrome_tracing_devtools_manager)
from telemetry.timeline import tracing_category_filter
from telemetry.timeline import tracing_config
from telemetry.timeline import tracing_options
from devil.android import device_utils
class FakeTracingControllerBackend(object):
def __init__(self):
self.is_tracing_running = False
class FakePlatformBackend(object):
def __init__(self):
self.tracing_controller_backend = FakeTracingControllerBackend()
def GetOSName(self):
return ''
class FakeAndroidPlatformBackend(FakePlatformBackend):
def __init__(self):
super(FakeAndroidPlatformBackend, self).__init__()
devices = device_utils.DeviceUtils.HealthyDevices(None)
self.device = devices[0]
def GetOSName(self):
return 'android'
class FakeDesktopPlatformBackend(FakePlatformBackend):
def GetOSName(self):
system = platform.system()
if system == 'Linux':
return 'linux'
if system == 'Darwin':
return 'mac'
if system == 'Windows':
return 'win'
class FakeDevtoolsClient(object):
def __init__(self, remote_port):
self.is_alive = True
self.is_tracing_running = False
self.remote_port = remote_port
self.will_raise_exception_in_stop_tracing = False
def IsAlive(self):
return self.is_alive
def StartChromeTracing(self, _trace_options, _filter_string, _timeout=10):
self.is_tracing_running = True
def StopChromeTracing(self, _trace_data_builder):
self.is_tracing_running = False
if self.will_raise_exception_in_stop_tracing:
raise Exception
def IsChromeTracingSupported(self):
return True
class ChromeTracingAgentTest(unittest.TestCase):
def setUp(self):
self.platform1 = FakePlatformBackend()
self.platform2 = FakePlatformBackend()
self.platform3 = FakePlatformBackend()
def StartTracing(self, platform_backend, enable_chrome_trace=True):
assert chrome_tracing_agent.ChromeTracingAgent.IsSupported(platform_backend)
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
trace_options = tracing_options.TracingOptions()
trace_options.enable_chrome_trace = enable_chrome_trace
category_filter = tracing_category_filter.TracingCategoryFilter('foo')
agent._platform_backend.tracing_controller_backend.is_tracing_running = True
agent.Start(trace_options, category_filter, 10)
return agent
def StopTracing(self, agent):
agent._platform_backend.tracing_controller_backend.is_tracing_running = (
False)
agent.Stop(None)
def testRegisterDevtoolsClient(self):
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(1), self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(2), self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(3), self.platform1)
tracing_agent_of_platform1 = self.StartTracing(self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(4), self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(5), self.platform2)
self.StopTracing(tracing_agent_of_platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
FakeDevtoolsClient(6), self.platform1)
def testIsSupportWithoutStartupTracingSupport(self):
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool2, self.platform2)
devtool2.is_alive = False
# Chrome tracing is only supported on platform 1 since only platform 1 has
# an alive devtool.
self.assertTrue(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform1))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform2))
self.assertFalse(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(self.platform3))
@decorators.Enabled('linux', 'mac', 'win')
def testIsSupportOnDesktopPlatform(self):
# Chrome tracing is always supported on desktop platforms because of startup
# tracing.
desktop_platform = FakeDesktopPlatformBackend()
self.assertTrue(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(desktop_platform))
devtool = FakeDevtoolsClient(1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool, desktop_platform)
self.assertTrue(
chrome_tracing_agent.ChromeTracingAgent.IsSupported(desktop_platform))
def testStartAndStopTracing(self):
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
devtool3 = FakeDevtoolsClient(3)
devtool4 = FakeDevtoolsClient(2)
# Register devtools 1, 2, 3 on platform1 and devtool 4 on platform 2
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool2, self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool3, self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool4, self.platform2)
devtool2.is_alive = False
tracing_agent1 = self.StartTracing(self.platform1)
with self.assertRaises(chrome_tracing_agent.ChromeTracingStartedError):
self.StartTracing(self.platform1)
self.assertTrue(devtool1.is_tracing_running)
self.assertFalse(devtool2.is_tracing_running)
self.assertTrue(devtool3.is_tracing_running)
# Devtool 4 shouldn't have tracing started although it has the same remote
# port as devtool 2
self.assertFalse(devtool4.is_tracing_running)
self.StopTracing(tracing_agent1)
self.assertFalse(devtool1.is_tracing_running)
self.assertFalse(devtool2.is_tracing_running)
self.assertFalse(devtool3.is_tracing_running)
self.assertFalse(devtool4.is_tracing_running)
# Test that it should be ok to start & stop tracing on platform1 again.
tracing_agent1 = self.StartTracing(self.platform1)
self.StopTracing(tracing_agent1)
tracing_agent2 = self.StartTracing(self.platform2)
self.assertTrue(devtool4.is_tracing_running)
self.StopTracing(tracing_agent2)
self.assertFalse(devtool4.is_tracing_running)
def testExceptionRaisedInStopTracing(self):
devtool1 = FakeDevtoolsClient(1)
devtool2 = FakeDevtoolsClient(2)
# Register devtools 1, 2 on platform 1
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool1, self.platform1)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool2, self.platform1)
tracing_agent1 = self.StartTracing(self.platform1)
self.assertTrue(devtool1.is_tracing_running)
self.assertTrue(devtool2.is_tracing_running)
devtool1.will_raise_exception_in_stop_tracing = True
with self.assertRaises(chrome_tracing_agent.ChromeTracingStoppedError):
self.StopTracing(tracing_agent1)
# Tracing is stopped on both devtools clients even if there is exception.
self.assertIsNone(tracing_agent1.trace_config)
self.assertFalse(devtool1.is_tracing_running)
self.assertFalse(devtool2.is_tracing_running)
devtool1.is_alive = False
devtool2.is_alive = False
# Register devtools 3 on platform 1 should not raise any exception.
devtool3 = FakeDevtoolsClient(3)
chrome_tracing_devtools_manager.RegisterDevToolsClient(
devtool3, self.platform1)
# Start & Stop tracing on platform 1 should work just fine.
tracing_agent2 = self.StartTracing(self.platform1)
self.StopTracing(tracing_agent2)
@decorators.Enabled('android')
def testCreateAndRemoveTraceConfigFileOnAndroid(self):
platform_backend = FakeAndroidPlatformBackend()
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
self.assertIsNone(agent.trace_config_file)
config = tracing_config.TracingConfig(
tracing_options.TracingOptions(),
tracing_category_filter.TracingCategoryFilter())
agent._CreateTraceConfigFile(config)
self.assertIsNotNone(agent.trace_config_file)
self.assertTrue(platform_backend.device.PathExists(agent.trace_config_file))
config_file_str = platform_backend.device.ReadFile(agent.trace_config_file,
as_root=True)
self.assertEqual(agent._CreateTraceConfigFileString(config),
config_file_str.strip())
config_file_path = agent.trace_config_file
agent._RemoveTraceConfigFile()
self.assertFalse(platform_backend.device.PathExists(config_file_path))
self.assertIsNone(agent.trace_config_file)
# robust to multiple file removal
agent._RemoveTraceConfigFile()
self.assertFalse(platform_backend.device.PathExists(config_file_path))
self.assertIsNone(agent.trace_config_file)
@decorators.Enabled('linux', 'mac', 'win')
def testCreateAndRemoveTraceConfigFileOnDesktop(self):
platform_backend = FakeDesktopPlatformBackend()
agent = chrome_tracing_agent.ChromeTracingAgent(platform_backend)
self.assertIsNone(agent.trace_config_file)
config = tracing_config.TracingConfig(
tracing_options.TracingOptions(),
tracing_category_filter.TracingCategoryFilter())
agent._CreateTraceConfigFile(config)
self.assertIsNotNone(agent.trace_config_file)
self.assertTrue(os.path.exists(agent.trace_config_file))
self.assertTrue(os.stat(agent.trace_config_file).st_mode & stat.S_IROTH)
with open(agent.trace_config_file, 'r') as f:
config_file_str = f.read()
self.assertEqual(agent._CreateTraceConfigFileString(config),
config_file_str.strip())
config_file_path = agent.trace_config_file
agent._RemoveTraceConfigFile()
self.assertFalse(os.path.exists(config_file_path))
self.assertIsNone(agent.trace_config_file)
# robust to multiple file removal
agent._RemoveTraceConfigFile()
self.assertFalse(os.path.exists(config_file_path))
self.assertIsNone(agent.trace_config_file)
| bsd-3-clause |
pernici/sympy | sympy/functions/special/bessel.py | 2 | 4464 | from math import pi
from sympy.core import sympify
from sympy.functions.elementary.trigonometric import sin, cos
def fn(n, z):
"""
Coefficients for the spherical Bessel functions.
Those are only needed in the jn() function.
The coefficients are calculated from:
fn(0, z) = 1/z
fn(1, z) = 1/z**2
fn(n-1, z) + fn(n+1, z) == (2*n+1)/z * fn(n, z)
Examples:
>>> from sympy.functions.special.bessel import fn
>>> from sympy import Symbol
>>> z = Symbol("z")
>>> fn(1, z)
z**(-2)
>>> fn(2, z)
-1/z + 3/z**3
>>> fn(3, z)
15/z**4 - 6/z**2
>>> fn(4, z)
1/z + 105/z**5 - 45/z**3
"""
n = sympify(n)
if not n.is_Integer:
raise TypeError("'n' must be an Integer")
if n == 0:
return 1/z
elif n == 1:
return 1/z**2
elif n > 1:
return ((2*n-1)/z * fn(n-1, z)).expand() - fn(n-2, z)
elif n < 0:
return ((2*n+3)/z * fn(n+1, z)).expand() - fn(n+2, z)
def jn(n, z):
"""
Spherical Bessel function of the first kind.
Examples:
>>> from sympy import Symbol, jn, sin, cos
>>> z = Symbol("z")
>>> print jn(0, z)
sin(z)/z
>>> jn(1, z) == sin(z)/z**2 - cos(z)/z
True
>>> jn(3, z) ==(1/z - 15/z**3)*cos(z) + (15/z**4 - 6/z**2)*sin(z)
True
The spherical Bessel functions are calculated using the formula:
jn(n, z) == fn(n, z) * sin(z) + (-1)**(n+1) * fn(-n-1, z) * cos(z)
where fn(n, z) are the coefficients, see fn()'s sourcecode for more
information.
"""
n = sympify(n)
z = sympify(z)
return fn(n, z) * sin(z) + (-1)**(n+1) * fn(-n-1, z) * cos(z)
def yn(n, z):
"""
Spherical Bessel function of the second kind.
Examples:
>>> from sympy import Symbol, yn, sin, cos
>>> z = Symbol("z")
>>> print yn(0, z)
-cos(z)/z
>>> yn(1, z) == -cos(z)/z**2-sin(z)/z
True
yn is calculated using the formula:
yn(n, z) == (-1)**(n+1) * jn(-n-1, z)
"""
n = sympify(n)
z = sympify(z)
return (-1)**(n+1) * jn(-n-1, z)
def jn_zeros(n, k, method="sympy"):
"""
Zeros of the spherical Bessel function of the first kind.
This returns an array of zeros of jn up to the k-th zero.
method = "sympy": uses the SymPy's jn and findroot to find all roots
method = "scipy": uses the SciPy's sph_jn and newton to find all roots,
which if faster than method="sympy", but it requires SciPy and only
works with low precision floating point numbers
Examples:
>>> from sympy.mpmath import nprint
>>> from sympy import jn_zeros
>>> nprint(jn_zeros(2, 4))
[5.76345919689, 9.09501133048, 12.3229409706, 15.5146030109]
"""
if method == "sympy":
from sympy.mpmath import findroot
f = lambda x: jn(n, x).n()
elif method == "scipy":
from scipy.special import sph_jn
from scipy.optimize import newton
f = lambda x: sph_jn(n, x)[0][-1]
elif method == 'mpmath':
# this needs a recent version of mpmath, newer than in sympy
from mpmath import besseljzero
return [besseljzero(n + 0.5, k) for k in xrange(1, k + 1)]
else:
raise NotImplementedError("Unknown method.")
def solver(f, x):
if method == "sympy":
# findroot(solver="newton") or findroot(solver="secant") can't find
# the root within the given tolerance. So we use solver="muller",
# which converges towards complex roots (even for real starting
# points), and so we need to chop all complex parts (that are small
# anyway). Also we need to set the tolerance, as it sometimes fail
# without it.
def f_real(x):
return f(complex(x).real)
root = findroot(f_real, x, solver="muller", tol=1e-9)
root = complex(root).real
elif method == "scipy":
root = newton(f, x)
else:
raise NotImplementedError("Unknown method.")
return root
# we need to approximate the position of the first root:
root = n+pi
# determine the first root exactly:
root = solver(f, root)
roots = [root]
for i in range(k-1):
# estimate the position of the next root using the last root + pi:
root = solver(f, root+pi)
roots.append(root)
return roots
| bsd-3-clause |
ghowland/gomh | _backups/gomh_002.py | 1 | 1296 | #!/usr/bin/env python
import pygame
import sys
sprite_size = [85/2, 112/2]
pygame.init()
size = (640, 480)
screen = pygame.display.set_mode(size)
pygame.display.set_caption('Get Off My Head')
#pygame.mouse.set_visible(0)
image = pygame.image.load('sf_sprites.png')
image = pygame.transform.scale(image, (image.get_width()/2, image.get_height()/2))
image = image.convert_alpha()
sf_sprites = image
scene = pygame.image.load('sf_back.png')
guy0 = pygame.Surface(sprite_size)
guy0.convert_alpha()
guy0.blit(sf_sprites, (0,0), [0, 0, sprite_size[0], sprite_size[1]])
background = pygame.Surface(screen.get_size())
background = background.convert()
background.fill((0, 0, 0))
guy0_pos = [0, 0]
while True:
background.blit(guy0, guy0_pos)
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit(0)
keys = pygame.key.get_pressed() #checking pressed keys
if keys[pygame.K_LEFT]:
guy0_pos[0] -= 2
if keys[pygame.K_RIGHT]:
guy0_pos[0] += 2
if keys[pygame.K_UP]:
guy0_pos[1] -= 2
if keys[pygame.K_DOWN]:
guy0_pos[1] += 2
# Render background
#background.fill((0, 0, 0))
background.blit(scene, (0, 0))
background.blit(guy0, guy0_pos)
# Render to screen
screen.blit(background, (0,0))
pygame.display.flip()
| mit |
andre-senna/opencog | opencog/python/blending/src/decider/decide_best_sti.py | 22 | 3079 | from blending.src.decider.base_decider import BaseDecider
from blending.util.blending_config import BlendConfig
from blending.util.blending_error import blending_status
__author__ = 'DongMin Kim'
class DecideBestSTI(BaseDecider):
"""Blending decider that deciding to blend or not by checking the existence
of atoms within proper STI range.
This decider estimates the chosen atoms are worth, when they have the
STI value higher than given.
"""
def __init__(self, a):
super(self.__class__, self).__init__(a)
def make_default_config(self):
"""Initialize a default config for this class."""
super(self.__class__, self).make_default_config()
BlendConfig().update(self.a, "decide-sti-min", "1")
BlendConfig().update(self.a, "decide-sti-max", "None")
def __decide_atoms_best_sti(
self, chosen_atoms, result_atoms_count, sti_min, sti_max
):
"""Actual algorithm for deciding blend.
Args:
chosen_atoms: The atoms to decide.
result_atoms_count: Threshold value for minimum count of decided
atoms.
sti_min: Threshold value for minimum value of STI.
sti_max: Threshold value for maximum value of STI.
:param chosen_atoms: list[Atom]
:param result_atoms_count: int
:param sti_min: int
:param sti_max: int
"""
# Filter the atoms with specified STI range.
if sti_max is None:
self.ret = filter(
lambda atom:
sti_min <= atom.av["sti"],
chosen_atoms
)
else:
self.ret = filter(
lambda atom:
sti_min <= atom.av["sti"] < sti_max,
chosen_atoms
)
if len(self.ret) < result_atoms_count:
self.last_status = blending_status.NOT_ENOUGH_ATOMS
return
# Find the atom has biggest STI value.
self.ret = sorted(
self.ret,
key=lambda x: x.av['sti'],
reverse=True
)
self.ret = self.ret[0:result_atoms_count]
def blending_decide_impl(self, chosen_atoms, config_base):
"""Implemented factory method to deciding atoms.
Args:
chosen_atoms: The atoms to decide.
config_base: A Node to save custom config.
:param chosen_atoms: list[Atom]
:param config_base: Atom
"""
result_atoms_count = BlendConfig().get_int(
self.a, "decide-result-atoms-count", config_base
)
sti_min = BlendConfig().get_str(self.a, "decide-sti-min", config_base)
sti_max = BlendConfig().get_str(self.a, "decide-sti-max", config_base)
# Check if given range of STI value is valid or not.
sti_min = int(sti_min) if sti_min.isdigit() else 1
sti_max = int(sti_max) if sti_max.isdigit() else None
self.__decide_atoms_best_sti(
chosen_atoms, result_atoms_count, sti_min, sti_max
)
| agpl-3.0 |
GustavoHennig/ansible | test/units/modules/cloud/amazon/test_lambda.py | 47 | 11588 | #
# (c) 2017 Michael De La Rue
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
import pytest
boto3 = pytest.importorskip("boto3")
import json
import copy
from ansible.module_utils._text import to_bytes
from ansible.module_utils import basic
from ansible.compat.tests.mock import MagicMock, Mock, patch
# lambda is a keyword so we have to hack this.
_temp = __import__("ansible.modules.cloud.amazon.lambda")
lda = getattr(_temp.modules.cloud.amazon,"lambda")
def set_module_args(args):
args = json.dumps({'ANSIBLE_MODULE_ARGS': args})
basic._ANSIBLE_ARGS = to_bytes(args)
base_lambda_config={
'FunctionName' : 'lambda_name',
'Role' : 'arn:aws:iam::987654321012:role/lambda_basic_execution',
'Handler' : 'lambda_python.my_handler',
'Description' : 'this that the other',
'Timeout' : 3,
'MemorySize' : 128,
'Runtime' : 'python2.7',
'CodeSha256' : 'AqMZ+xptM7aC9VXu+5jyp1sqO+Nj4WFMNzQxtPMP2n8=',
}
one_change_lambda_config=copy.copy(base_lambda_config)
one_change_lambda_config['Timeout']=4
two_change_lambda_config=copy.copy(one_change_lambda_config)
two_change_lambda_config['Role']='arn:aws:iam::987654321012:role/lambda_advanced_execution'
code_change_lambda_config=copy.copy(base_lambda_config)
code_change_lambda_config['CodeSha256']='P+Zy8U4T4RiiHWElhL10VBKj9jw4rSJ5bm/TiW+4Rts='
base_module_args={
"region": "us-west-1",
"name": "lambda_name",
"state": "present",
"zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"memory_size": 128,
"timeout" : 3,
"handler": 'lambda_python.my_handler'
}
module_args_with_environment=dict(base_module_args, environment_variables={
"variable_name": "variable_value"
})
def make_mock_no_connection_connection(config):
"""return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
lambda_client_double = MagicMock()
lambda_client_double.get_function.configure_mock(
return_value=False
)
lambda_client_double.update_function_configuration.configure_mock(
return_value={
'Version' : 1
}
)
fake_boto3_conn=Mock(return_value=lambda_client_double)
return (fake_boto3_conn, lambda_client_double)
def make_mock_connection(config):
"""return a mock of ansible's boto3_conn ready to return a mock AWS API client"""
lambda_client_double = MagicMock()
lambda_client_double.get_function.configure_mock(
return_value={
'Configuration' : config
}
)
lambda_client_double.update_function_configuration.configure_mock(
return_value={
'Version' : 1
}
)
fake_boto3_conn=Mock(return_value=lambda_client_double)
return (fake_boto3_conn, lambda_client_double)
class AnsibleFailJson(Exception):
pass
def fail_json_double(*args, **kwargs):
"""works like fail_json but returns module results inside exception instead of stdout"""
kwargs['failed'] = True
raise AnsibleFailJson(kwargs)
#TODO: def test_handle_different_types_in_config_params():
def test_create_lambda_if_not_exist():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double)=make_mock_no_connection_connection(code_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"unexpectedly updated lambda configuration when should have only created"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"update lambda function code when function should have been created only"
assert(len(lambda_client_double.create_function.mock_calls) > 0), \
"failed to call create_function "
(create_args, create_kwargs)=lambda_client_double.create_function.call_args
assert (len(create_kwargs) > 0), "expected create called with keyword args, none found"
try:
# For now I assume that we should NOT send an empty environment. It might
# be okay / better to explicitly send an empty environment. However `None'
# is not acceptable - mikedlr
create_kwargs["Environment"]
raise(Exception("Environment sent to boto when none expected"))
except KeyError:
pass #We are happy, no environment is fine
def test_update_lambda_if_code_changed():
set_module_args(base_module_args)
(boto3_conn_double, lambda_client_double)=make_mock_connection(code_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"unexpectedly updatede lambda configuration when only code changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) > 1), \
"failed to update lambda function when code changed"
# 3 because after uploading we call into the return from mock to try to find what function version
# was returned so the MagicMock actually sees two calls for one update.
assert(len(lambda_client_double.update_function_code.mock_calls) < 3), \
"lambda function code update called multiple times when only one time should be needed"
def test_update_lambda_if_config_changed():
set_module_args(base_module_args)
(boto3_conn_double,lambda_client_double)=make_mock_connection(two_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_update_lambda_if_only_one_config_item_changed():
set_module_args(base_module_args)
(boto3_conn_double,lambda_client_double)=make_mock_connection(one_change_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
def test_update_lambda_if_added_environment_variable():
set_module_args(module_args_with_environment)
(boto3_conn_double,lambda_client_double)=make_mock_connection(base_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) > 0), \
"failed to update lambda function when configuration changed"
assert(len(lambda_client_double.update_function_configuration.mock_calls) < 2), \
"lambda function update called multiple times when only one time should be needed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0), \
"updated lambda code when no change should have happened"
(update_args, update_kwargs)=lambda_client_double.update_function_configuration.call_args
assert (len(update_kwargs) > 0), "expected update configuration called with keyword args, none found"
assert update_kwargs['Environment']['Variables'] == module_args_with_environment['environment_variables']
def test_dont_update_lambda_if_nothing_changed():
set_module_args(base_module_args)
(boto3_conn_double,lambda_client_double)=make_mock_connection(base_lambda_config)
with patch.object(lda, 'boto3_conn', boto3_conn_double):
try:
lda.main()
except SystemExit:
pass
# guard against calling other than for a lambda connection (e.g. IAM)
assert(len(boto3_conn_double.mock_calls) == 1), "multiple boto connections used unexpectedly"
assert(len(lambda_client_double.update_function_configuration.mock_calls) == 0), \
"updated lambda function when no configuration changed"
assert(len(lambda_client_double.update_function_code.mock_calls) == 0 ), \
"updated lambda code when no change should have happened"
def test_warn_region_not_specified():
set_module_args({
"name": "lambda_name",
"state": "present",
# Module is called without a region causing error
# "region": "us-east-1",
"zip_file": "test/units/modules/cloud/amazon/fixtures/thezip.zip",
"runtime": 'python2.7',
"role": 'arn:aws:iam::987654321012:role/lambda_basic_execution',
"handler": 'lambda_python.my_handler'})
get_aws_connection_info_double=Mock(return_value=(None,None,None))
with patch.object(lda, 'get_aws_connection_info', get_aws_connection_info_double):
with patch.object(basic.AnsibleModule, 'fail_json', fail_json_double):
try:
lda.main()
except AnsibleFailJson as e:
result = e.args[0]
assert("region must be specified" in result['msg'])
| gpl-3.0 |
SamYaple/ansible-modules-extras | cloud/cloudstack/cs_iso.py | 33 | 10741 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# (c) 2015, René Moser <mail@renemoser.net>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: cs_iso
short_description: Manages ISO images on Apache CloudStack based clouds.
description:
- Register and remove ISO images.
version_added: '2.0'
author: "René Moser (@resmo)"
options:
name:
description:
- Name of the ISO.
required: true
url:
description:
- URL where the ISO can be downloaded from. Required if C(state) is present.
required: false
default: null
os_type:
description:
- Name of the OS that best represents the OS of this ISO. If the iso is bootable this parameter needs to be passed. Required if C(state) is present.
required: false
default: null
is_ready:
description:
- This flag is used for searching existing ISOs. If set to C(true), it will only list ISO ready for deployment e.g. successfully downloaded and installed. Recommended to set it to C(false).
required: false
default: false
aliases: []
is_public:
description:
- Register the ISO to be publicly available to all users. Only used if C(state) is present.
required: false
default: false
is_featured:
description:
- Register the ISO to be featured. Only used if C(state) is present.
required: false
default: false
is_dynamically_scalable:
description:
- Register the ISO having XS/VMWare tools installed inorder to support dynamic scaling of VM cpu/memory. Only used if C(state) is present.
required: false
default: false
aliases: []
checksum:
description:
- The MD5 checksum value of this ISO. If set, we search by checksum instead of name.
required: false
default: false
bootable:
description:
- Register the ISO to be bootable. Only used if C(state) is present.
required: false
default: true
domain:
description:
- Domain the ISO is related to.
required: false
default: null
account:
description:
- Account the ISO is related to.
required: false
default: null
project:
description:
- Name of the project the ISO to be registered in.
required: false
default: null
zone:
description:
- Name of the zone you wish the ISO to be registered or deleted from. If not specified, first zone found will be used.
required: false
default: null
iso_filter:
description:
- Name of the filter used to search for the ISO.
required: false
default: 'self'
choices: [ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]
state:
description:
- State of the ISO.
required: false
default: 'present'
choices: [ 'present', 'absent' ]
extends_documentation_fragment: cloudstack
'''
EXAMPLES = '''
# Register an ISO if ISO name does not already exist.
- local_action:
module: cs_iso
name: Debian 7 64-bit
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
os_type: Debian GNU/Linux 7(64-bit)
# Register an ISO with given name if ISO md5 checksum does not already exist.
- local_action:
module: cs_iso
name: Debian 7 64-bit
url: http://mirror.switch.ch/ftp/mirror/debian-cd/current/amd64/iso-cd/debian-7.7.0-amd64-netinst.iso
os_type: Debian GNU/Linux 7(64-bit)
checksum: 0b31bccccb048d20b551f70830bb7ad0
# Remove an ISO by name
- local_action:
module: cs_iso
name: Debian 7 64-bit
state: absent
# Remove an ISO by checksum
- local_action:
module: cs_iso
name: Debian 7 64-bit
checksum: 0b31bccccb048d20b551f70830bb7ad0
state: absent
'''
RETURN = '''
---
id:
description: UUID of the ISO.
returned: success
type: string
sample: a6f7a5fc-43f8-11e5-a151-feff819cdc9f
name:
description: Name of the ISO.
returned: success
type: string
sample: Debian 7 64-bit
display_text:
description: Text to be displayed of the ISO.
returned: success
type: string
sample: Debian 7.7 64-bit minimal 2015-03-19
zone:
description: Name of zone the ISO is registered in.
returned: success
type: string
sample: zuerich
status:
description: Status of the ISO.
returned: success
type: string
sample: Successfully Installed
is_ready:
description: True if the ISO is ready to be deployed from.
returned: success
type: boolean
sample: true
checksum:
description: MD5 checksum of the ISO.
returned: success
type: string
sample: 0b31bccccb048d20b551f70830bb7ad0
created:
description: Date of registering.
returned: success
type: string
sample: 2015-03-29T14:57:06+0200
domain:
description: Domain the ISO is related to.
returned: success
type: string
sample: example domain
account:
description: Account the ISO is related to.
returned: success
type: string
sample: example account
project:
description: Project the ISO is related to.
returned: success
type: string
sample: example project
'''
try:
from cs import CloudStack, CloudStackException, read_config
has_lib_cs = True
except ImportError:
has_lib_cs = False
# import cloudstack common
from ansible.module_utils.cloudstack import *
class AnsibleCloudStackIso(AnsibleCloudStack):
def __init__(self, module):
super(AnsibleCloudStackIso, self).__init__(module)
self.returns = {
'checksum': 'checksum',
'status': 'status',
'isready': 'is_ready',
}
self.iso = None
def register_iso(self):
iso = self.get_iso()
if not iso:
args = {}
args['zoneid'] = self.get_zone('id')
args['domainid'] = self.get_domain('id')
args['account'] = self.get_account('name')
args['projectid'] = self.get_project('id')
args['bootable'] = self.module.params.get('bootable')
args['ostypeid'] = self.get_os_type('id')
args['name'] = self.module.params.get('name')
args['displaytext'] = self.module.params.get('name')
args['checksum'] = self.module.params.get('checksum')
args['isdynamicallyscalable'] = self.module.params.get('is_dynamically_scalable')
args['isfeatured'] = self.module.params.get('is_featured')
args['ispublic'] = self.module.params.get('is_public')
if args['bootable'] and not args['ostypeid']:
self.module.fail_json(msg="OS type 'os_type' is requried if 'bootable=true'.")
args['url'] = self.module.params.get('url')
if not args['url']:
self.module.fail_json(msg="URL is requried.")
self.result['changed'] = True
if not self.module.check_mode:
res = self.cs.registerIso(**args)
iso = res['iso'][0]
return iso
def get_iso(self):
if not self.iso:
args = {}
args['isready'] = self.module.params.get('is_ready')
args['isofilter'] = self.module.params.get('iso_filter')
args['domainid'] = self.get_domain('id')
args['account'] = self.get_account('name')
args['projectid'] = self.get_project('id')
args['zoneid'] = self.get_zone('id')
# if checksum is set, we only look on that.
checksum = self.module.params.get('checksum')
if not checksum:
args['name'] = self.module.params.get('name')
isos = self.cs.listIsos(**args)
if isos:
if not checksum:
self.iso = isos['iso'][0]
else:
for i in isos['iso']:
if i['checksum'] == checksum:
self.iso = i
break
return self.iso
def remove_iso(self):
iso = self.get_iso()
if iso:
self.result['changed'] = True
args = {}
args['id'] = iso['id']
args['projectid'] = self.get_project('id')
args['zoneid'] = self.get_zone('id')
if not self.module.check_mode:
res = self.cs.deleteIso(**args)
return iso
def main():
argument_spec = cs_argument_spec()
argument_spec.update(dict(
name = dict(required=True),
url = dict(default=None),
os_type = dict(default=None),
zone = dict(default=None),
iso_filter = dict(default='self', choices=[ 'featured', 'self', 'selfexecutable','sharedexecutable','executable', 'community' ]),
domain = dict(default=None),
account = dict(default=None),
project = dict(default=None),
checksum = dict(default=None),
is_ready = dict(type='bool', default=False),
bootable = dict(type='bool', default=True),
is_featured = dict(type='bool', default=False),
is_dynamically_scalable = dict(type='bool', default=False),
state = dict(choices=['present', 'absent'], default='present'),
))
module = AnsibleModule(
argument_spec=argument_spec,
required_together=cs_required_together(),
supports_check_mode=True
)
if not has_lib_cs:
module.fail_json(msg="python library cs required: pip install cs")
try:
acs_iso = AnsibleCloudStackIso(module)
state = module.params.get('state')
if state in ['absent']:
iso = acs_iso.remove_iso()
else:
iso = acs_iso.register_iso()
result = acs_iso.get_result(iso)
except CloudStackException as e:
module.fail_json(msg='CloudStackException: %s' % str(e))
module.exit_json(**result)
# import module snippets
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| gpl-3.0 |
chaen/DIRAC | ResourceStatusSystem/Command/VOBOXAvailabilityCommand.py | 6 | 1936 | ''' VOBOXAvailabilityCommand module
'''
# FIXME: NOT Usable ATM
# missing doNew, doCache, doMaster
import urlparse
from DIRAC import S_OK, S_ERROR
from DIRAC.Core.DISET.RPCClient import RPCClient
from DIRAC.ResourceStatusSystem.Command.Command import Command
class VOBOXAvailabilityCommand(Command):
'''
Given an url pointing to a service on a vobox, use DIRAC ping against it.
'''
def doCommand(self):
'''
The Command pings a service on a vobox, it needs a service URL to ping it.
:returns: a dict with the following:
.. code-block:: python
{
'serviceUpTime' : <serviceUpTime>,
'machineUpTime' : <machineUpTime>,
'site' : <site>,
'system' : <system>,
'service' : <service>
}
'''
# INPUT PARAMETERS
if 'serviceURL' not in self.args:
return self.returnERROR(S_ERROR('"serviceURL" not found in self.args'))
serviceURL = self.args['serviceURL']
##
parsed = urlparse.urlparse(serviceURL)
site = parsed[1].split(':')[0]
try:
system, service = parsed[2].strip('/').split('/')
except ValueError:
return self.returnERROR(S_ERROR('"%s" seems to be a malformed url' % serviceURL))
pinger = RPCClient(serviceURL)
resPing = pinger.ping()
if not resPing['OK']:
return self.returnERROR(resPing)
serviceUpTime = resPing['Value'].get('service uptime', 0)
machineUpTime = resPing['Value'].get('host uptime', 0)
result = {
'site': site,
'system': system,
'service': service,
'serviceUpTime': serviceUpTime,
'machineUpTime': machineUpTime
}
return S_OK(result)
# FIXME: how do we get the values !!
################################################################################
# EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF#EOF
| gpl-3.0 |
Deepakpatle/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/port/xvfbdriver_unittest.py | 118 | 7503 | # Copyright (C) 2012 Zan Dobersek <zandobersek@gmail.com>
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import unittest2 as unittest
from webkitpy.common.system.filesystem_mock import MockFileSystem
from webkitpy.common.system.executive_mock import MockExecutive2
from webkitpy.common.system.outputcapture import OutputCapture
from webkitpy.common.system.systemhost_mock import MockSystemHost
from webkitpy.port import Port
from webkitpy.port.server_process_mock import MockServerProcess
from webkitpy.port.xvfbdriver import XvfbDriver
from webkitpy.tool.mocktool import MockOptions
_log = logging.getLogger(__name__)
class XvfbDriverTest(unittest.TestCase):
def make_driver(self, worker_number=0, xorg_running=False, executive=None):
port = Port(MockSystemHost(log_executive=True, executive=executive), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._config.build_directory = lambda configuration: "/mock-build"
port._server_process_constructor = MockServerProcess
if xorg_running:
port._executive._running_pids['Xorg'] = 108
driver = XvfbDriver(port, worker_number=worker_number, pixel_tests=True)
driver._startup_delay_secs = 0
return driver
def cleanup_driver(self, driver):
# Setting _xvfb_process member to None is necessary as the Driver object is stopped on deletion,
# killing the Xvfb process if present. Thus, this method should only be called from tests that do not
# intend to test the behavior of XvfbDriver.stop.
driver._xvfb_process = None
def assertDriverStartSuccessful(self, driver, expected_logs, expected_display, pixel_tests=False):
OutputCapture().assert_outputs(self, driver.start, [pixel_tests, []], expected_logs=expected_logs)
self.assertTrue(driver._server_process.started)
self.assertEqual(driver._server_process.env["DISPLAY"], expected_display)
def test_start_no_pixel_tests(self):
driver = self.make_driver()
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0")
self.cleanup_driver(driver)
def test_start_pixel_tests(self):
driver = self.make_driver()
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
def test_start_arbitrary_worker_number(self):
driver = self.make_driver(worker_number=17)
expected_logs = "MOCK run_command: ['ps', '-eo', 'comm,command'], cwd=None\nMOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
def test_next_free_display(self):
output = "Xorg /usr/bin/X :0 -auth /var/run/lightdm/root/:0 -nolisten tcp vt7 -novtswitch -background none\nXvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 2)
self.cleanup_driver(driver)
output = "X /usr/bin/X :0 vt7 -nolisten tcp -auth /var/run/xauth/A:0-8p7Ybb"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 1)
self.cleanup_driver(driver)
output = "Xvfb Xvfb :0 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 1)
self.cleanup_driver(driver)
output = "Xvfb Xvfb :1 -screen 0 800x600x24 -nolisten tcp\nXvfb Xvfb :0 -screen 0 800x600x24 -nolisten tcp\nXvfb Xvfb :3 -screen 0 800x600x24 -nolisten tcp"
executive = MockExecutive2(output)
driver = self.make_driver(executive=executive)
self.assertEqual(driver._next_free_display(), 2)
self.cleanup_driver(driver)
def test_start_next_worker(self):
driver = self.make_driver()
driver._next_free_display = lambda: 0
expected_logs = "MOCK popen: ['Xvfb', ':0', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":0", pixel_tests=True)
self.cleanup_driver(driver)
driver = self.make_driver()
driver._next_free_display = lambda: 3
expected_logs = "MOCK popen: ['Xvfb', ':3', '-screen', '0', '800x600x24', '-nolisten', 'tcp']\n"
self.assertDriverStartSuccessful(driver, expected_logs=expected_logs, expected_display=":3", pixel_tests=True)
self.cleanup_driver(driver)
def test_stop(self):
filesystem = MockFileSystem(files={'/tmp/.X42-lock': '1234\n'})
port = Port(MockSystemHost(log_executive=True, filesystem=filesystem), 'xvfbdrivertestport', options=MockOptions(configuration='Release'))
port._executive.kill_process = lambda x: _log.info("MOCK kill_process pid: " + str(x))
driver = XvfbDriver(port, worker_number=0, pixel_tests=True)
class FakeXvfbProcess(object):
pid = 1234
driver._xvfb_process = FakeXvfbProcess()
driver._lock_file = '/tmp/.X42-lock'
expected_logs = "MOCK kill_process pid: 1234\n"
OutputCapture().assert_outputs(self, driver.stop, [], expected_logs=expected_logs)
self.assertIsNone(driver._xvfb_process)
self.assertFalse(port._filesystem.exists(driver._lock_file))
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.