code stringlengths 3 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int64 3 1.05M |
|---|---|---|---|---|---|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fingerprinting code for the Python runtime."""
import os
import textwrap
from gae_ext_runtime import ext_runtime
from googlecloudsdk.api_lib.app.images import config
from googlecloudsdk.core import log
NAME = 'Python Compat'
ALLOWED_RUNTIME_NAMES = ('python27', 'python-compat')
PYTHON_RUNTIME_NAME = 'python27'
# TODO(user): this generated app.yaml doesn't work because the compat
# runtimes need a "handlers" section. Query the user for this information.
PYTHON_APP_YAML = textwrap.dedent("""\
runtime: {runtime}
vm: true
api_version: 1
threadsafe: false
# You must add a handlers section here. Example:
# handlers:
# - url: .*
# script: main.app
""")
APP_YAML_WARNING = ('app.yaml has been generated, but needs to be provided a '
'"handlers" section.')
DOCKERIGNORE = textwrap.dedent("""\
.dockerignore
Dockerfile
.git
.hg
.svn
""")
COMPAT_DOCKERFILE_PREAMBLE = (
'FROM gcr.io/google_appengine/python-compat-multicore\n')
PYTHON27_DOCKERFILE_PREAMBLE = 'FROM gcr.io/google_appengine/python-compat\n'
DOCKERFILE_INSTALL_APP = 'ADD . /app/\n'
# TODO(user): Do the check for requirements.txt in the source inspection
# and don't generate the pip install if it doesn't exist.
DOCKERFILE_INSTALL_REQUIREMENTS_TXT = (
'RUN if [ -s requirements.txt ]; then pip install -r requirements.txt; '
'fi\n')
class PythonConfigurator(ext_runtime.Configurator):
"""Generates configuration for a Python application."""
def __init__(self, path, params, runtime):
"""Constructor.
Args:
path: (str) Root path of the source tree.
params: (ext_runtime.Params) Parameters passed through to the
fingerprinters.
runtime: (str) The runtime name.
"""
self.root = path
self.params = params
self.runtime = runtime
def GenerateConfigs(self):
"""Generate all config files for the module."""
# Write "Writing file" messages to the user or to log depending on whether
# we're in "deploy."
if self.params.deploy:
notify = log.info
else:
notify = log.status.Print
if self.runtime == 'python-compat':
dockerfile_preamble = COMPAT_DOCKERFILE_PREAMBLE
else:
dockerfile_preamble = PYTHON27_DOCKERFILE_PREAMBLE
# Generate app.yaml. Note: this is not a recommended use-case,
# python-compat users likely have an existing app.yaml. But users can
# still get here with the --runtime flag.
cleaner = ext_runtime.Cleaner()
if not self.params.appinfo:
app_yaml = os.path.join(self.root, 'app.yaml')
if not os.path.exists(app_yaml):
notify('Writing [app.yaml] to [%s].' % self.root)
runtime = 'custom' if self.params.custom else self.runtime
with open(app_yaml, 'w') as f:
f.write(PYTHON_APP_YAML.format(runtime=runtime))
cleaner.Add(app_yaml)
log.warn(APP_YAML_WARNING)
if self.params.custom or self.params.deploy:
dockerfile = os.path.join(self.root, config.DOCKERFILE)
if not os.path.exists(dockerfile):
notify('Writing [%s] to [%s].' % (config.DOCKERFILE, self.root))
# Customize the dockerfile.
with open(dockerfile, 'w') as out:
out.write(dockerfile_preamble)
out.write(DOCKERFILE_INSTALL_APP)
if self.runtime == 'python-compat':
out.write(DOCKERFILE_INSTALL_REQUIREMENTS_TXT)
cleaner.Add(dockerfile)
dockerignore = os.path.join(self.root, '.dockerignore')
if not os.path.exists(dockerignore):
notify('Writing [.dockerignore] to [%s].' % self.root)
with open(dockerignore, 'w') as f:
f.write(DOCKERIGNORE)
cleaner.Add(dockerignore)
if not cleaner.HasFiles():
notify('All config files already exist, not generating anything.')
return cleaner
def Fingerprint(path, params):
"""Check for a Python app.
Args:
path: (str) Application path.
params: (ext_runtime.Params) Parameters passed through to the
fingerprinters.
Returns:
(PythonConfigurator or None) Returns a module if the path contains a
python app.
"""
log.info('Checking for Python Compat.')
# The only way we select these runtimes is if either the user has specified
# it or a matching runtime is specified in the app.yaml.
if (not params.runtime and
(not params.appinfo or
params.appinfo.GetEffectiveRuntime() not in ALLOWED_RUNTIME_NAMES)):
return None
if params.appinfo:
runtime = params.appinfo.GetEffectiveRuntime()
else:
runtime = params.runtime
log.info('Python Compat matches ([{0}] specified in "runtime" field)'.format(
runtime))
return PythonConfigurator(path, params, runtime)
| flgiordano/netcash | +/google-cloud-sdk/lib/googlecloudsdk/api_lib/app/runtimes/python_compat.py | Python | bsd-3-clause | 5,347 |
"""A multi-producer, multi-consumer queue."""
from time import time as _time
#try:
import threading as _threading
#except ImportError:
# import dummy_threading as _threading
from collections import deque
import heapq
__all__ = ['Empty', 'Full', 'Queue', 'PriorityQueue', 'LifoQueue']
class Empty(Exception):
"Exception raised by Queue.get(block=0)/get_nowait()."
pass
class Full(Exception):
"Exception raised by Queue.put(block=0)/put_nowait()."
pass
class Queue(object):
"""Create a queue object with a given maximum size.
If maxsize is <= 0, the queue size is infinite.
"""
def __init__(self, maxsize=0):
self.maxsize = maxsize
self._init(maxsize)
# mutex must be held whenever the queue is mutating. All methods
# that acquire mutex must release it before returning. mutex
# is shared between the three conditions, so acquiring and
# releasing the conditions also acquires and releases mutex.
self.mutex = _threading.Lock()
# Notify not_empty whenever an item is added to the queue; a
# thread waiting to get is notified then.
self.not_empty = _threading.Condition(self.mutex)
# Notify not_full whenever an item is removed from the queue;
# a thread waiting to put is notified then.
self.not_full = _threading.Condition(self.mutex)
# Notify all_tasks_done whenever the number of unfinished tasks
# drops to zero; thread waiting to join() is notified to resume
self.all_tasks_done = _threading.Condition(self.mutex)
self.unfinished_tasks = 0
def task_done(self):
"""Indicate that a formerly enqueued task is complete.
Used by Queue consumer threads. For each get() used to fetch a task,
a subsequent call to task_done() tells the queue that the processing
on the task is complete.
If a join() is currently blocking, it will resume when all items
have been processed (meaning that a task_done() call was received
for every item that had been put() into the queue).
Raises a ValueError if called more times than there were items
placed in the queue.
"""
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
"""Blocks until all items in the Queue have been gotten and processed.
The count of unfinished tasks goes up whenever an item is added to the
queue. The count goes down whenever a consumer thread calls task_done()
to indicate the item was retrieved and all work on it is complete.
When the count of unfinished tasks drops to zero, join() unblocks.
"""
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def qsize(self):
"""Return the approximate size of the queue (not reliable!)."""
self.mutex.acquire()
n = self._qsize()
self.mutex.release()
return n
def empty(self):
"""Return True if the queue is empty, False otherwise (not reliable!)."""
self.mutex.acquire()
n = not self._qsize()
self.mutex.release()
return n
def full(self):
"""Return True if the queue is full, False otherwise (not reliable!)."""
self.mutex.acquire()
n = 0 < self.maxsize == self._qsize()
self.mutex.release()
return n
def put(self, item, block=True, timeout=None):
"""Put an item into the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until a free slot is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Full exception if no free slot was available within that time.
Otherwise ('block' is false), put an item on the queue if a free slot
is immediately available, else raise the Full exception ('timeout'
is ignored in that case).
"""
self.not_full.acquire()
try:
if self.maxsize > 0:
if not block:
if self._qsize() == self.maxsize:
raise Full
elif timeout is None:
while self._qsize() == self.maxsize:
self.not_full.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while self._qsize() == self.maxsize:
remaining = endtime - _time()
if remaining <= 0.0:
raise Full
self.not_full.wait(remaining)
self._put(item)
self.unfinished_tasks += 1
self.not_empty.notify()
finally:
self.not_full.release()
def put_nowait(self, item):
"""Put an item into the queue without blocking.
Only enqueue the item if a free slot is immediately available.
Otherwise raise the Full exception.
"""
return self.put(item, False)
def get(self, block=True, timeout=None):
"""Remove and return an item from the queue.
If optional args 'block' is true and 'timeout' is None (the default),
block if necessary until an item is available. If 'timeout' is
a non-negative number, it blocks at most 'timeout' seconds and raises
the Empty exception if no item was available within that time.
Otherwise ('block' is false), return an item if one is immediately
available, else raise the Empty exception ('timeout' is ignored
in that case).
"""
self.not_empty.acquire()
try:
if not block:
if not self._qsize():
raise Empty
elif timeout is None:
while not self._qsize():
self.not_empty.wait()
elif timeout < 0:
raise ValueError("'timeout' must be a non-negative number")
else:
endtime = _time() + timeout
while not self._qsize():
remaining = endtime - _time()
if remaining <= 0.0:
raise Empty
self.not_empty.wait(remaining)
item = self._get()
self.not_full.notify()
return item
finally:
self.not_empty.release()
def get_nowait(self):
"""Remove and return an item from the queue without blocking.
Only get an item if one is immediately available. Otherwise
raise the Empty exception.
"""
return self.get(False)
# Override these methods to implement other queue organizations
# (e.g. stack or priority queue).
# These will only be called with appropriate locks held
# Initialize the queue representation
def _init(self, maxsize):
self.queue = deque()
def _qsize(self, len=len):
return len(self.queue)
# Put a new item in the queue
def _put(self, item):
self.queue.append(item)
# Get an item from the queue
def _get(self):
return self.queue.popleft()
class PriorityQueue(Queue):
'''Variant of Queue that retrieves open entries in priority order (lowest first).
Entries are typically tuples of the form: (priority number, data).
'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
def _get(self, heappop=heapq.heappop):
return heappop(self.queue)
class LifoQueue(Queue):
'''Variant of Queue that retrieves most recently added entries first.'''
def _init(self, maxsize):
self.queue = []
def _qsize(self, len=len):
return len(self.queue)
def _put(self, item):
self.queue.append(item)
def _get(self):
return self.queue.pop()
| google/grumpy | third_party/stdlib/Queue.py | Python | apache-2.0 | 8,584 |
#
# @BEGIN LICENSE
#
# Psi4: an open-source quantum chemistry software package
#
# Copyright (c) 2007-2018 The Psi4 Developers.
#
# The copyrights for code used from other parties are included in
# the corresponding files.
#
# This file is part of Psi4.
#
# Psi4 is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, version 3.
#
# Psi4 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License along
# with Psi4; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# @END LICENSE
#
import re
import sys
import itertools
import numpy as np
from ..exceptions import *
try:
long(1)
except NameError:
long = int
try:
from itertools import ifilterfalse as filterfalse # py2
except ImportError:
from itertools import filterfalse # py3
def _unique_everseen(iterable, key=None):
"List unique elements, preserving order. Remember all elements ever seen."
# unique_everseen('AAAABBBCCDAABBB') --> A B C D
# unique_everseen('ABBCcAD', str.lower) --> A B C D
# straight from the docs, https://docs.python.org/3/library/itertools.html#itertools-recipes
seen = set()
seen_add = seen.add
if key is None:
for element in filterfalse(seen.__contains__, iterable):
seen_add(element)
yield element
else:
for element in iterable:
k = key(element)
if k not in seen:
seen_add(k)
yield element
def _apply_default(llist, default):
return [default if (c is None) else c for c in llist]
def _high_spin_sum(mult_list):
mm = 1
for m in mult_list:
mm += m - 1
return mm
def _mult_ok(m):
return isinstance(m, (int, np.int64, long)) and m >= 1
def _sufficient_electrons_for_mult(z, c, m):
"""Require sufficient electrons in total: total mult ({}) - 1 > raw electrons ({}) - total chg ({})"""
return m - 1 <= z - c
def _parity_ok(z, c, m):
"""Check total electrons (neutral protons `z` and charge `c`) is (un)paired-compatible with multiplicity `m`"""
return (m % 2) != ((z - c) % 2)
def _alpha_beta_allocator(z, c, m):
nbeta = (z - c - m + 1) // 2
nalpha = nbeta + m - 1
return nalpha, nbeta
def validate_and_fill_chgmult(zeff,
fragment_separators,
molecular_charge,
fragment_charges,
molecular_multiplicity,
fragment_multiplicities,
zero_ghost_fragments=False,
verbose=1):
"""
Applies physical constraints and sensible defaults to reconciling and
completing the molecular and fragment charge and multiplicity
specification.
Parameters
----------
zeff : ndarray of float
(nat,) electron counts for neutral atoms, generally Z nuclear charge.
0 indicates ghosts such that a full fragment of 0s will be constained
to `0 1` charge & multiplicity.
fragment_separators : ndarray of int
(nfr - 1, ) indices splitting `zeff` into nfr fragments.
molecular_charge : float or None
Total charge for molecular system.
fragment_charges : list of float or None
(nfr,) known fragment charges with `None` as placeholder for
unknown. Expected pre-defaulted so even if nothing known if
`fragment_separators` breaks `zeff` into `nfr=2` fragments, input
value should be `fragment_charges=[None, None]`.
molecular_multiplicity : int or None
Total multiplicity for molecular system.
fragment_multiplicity : list of int or None
(nfr,) known fragment charges with `None` as placeholder for
unknown. Expected pre-defaulted so even if nothing known if
`fragment_separators` breaks `zeff` into `nfr=2` fragments, input
value should be `fragment_multiplicities=[None, None]`.
zero_ghost_fragments : bool, optional
Fragments composed entirely of ghost atoms (Zeff=0) are required to have
chgmult `0 1`. When `False`, violations of this will cause a
ValidationError. When `True`, treat ghost fragments indicated by `zeff` to
contain superior information over chgmult arguments that might still
correspond to full-real molecule. Clears information from
`molecular_charge` and `molecular_multiplicity` and sets ghost fragments
to `0 1`, leaving other positions free to readjust. Unused (prefer to set
up such manipulations outside function call) but works.
verbose : int, optional
Amount of printing.
Notes
-----
Returns combination of total & fragment charge & multiplicity among
values of S1-7 that fulfill rules R1-9. A few derived implications in I1-3.
* Constraints
R1 * require all chg & mult exist
R2 * require total charge to be the sum of frag chg
R3 * require mult is positive int
R4 * require sufficient tot electrons for mult: mult - 1 <= neut_el - chg
R5 * require total parity consistent among tot electrons and mult: (mult % 2) != ((neut_el - chg) % 2)
R6 * require chg match input argument values
R7 * require mult match input argument values
R8 * require that tot = sum(frag) mult follow high spin addition unless tot & frag mult fully specified
R9 * require that ghost fragments (zeff all 0) be neutral singlet
* Allowed values
S1 * suggest input argument values for tot chg, frag chg, tot mult or frag mult
S2 * suggest sum frag chg for tot chg, allowing for indiv frag chg defaulting to 0
S3 * suggest distributing unallocated chg onto frag chg
S4 * suggest 0 default for frag chg
S5 * suggest range of high-spin sum frag mult for tot mult, allowing for indiv frag mult defaulting to 1 or 2
S6 * suggest range of unallocated mult = tot - high_spin_sum(frag - 1), allowing for all indiv but self defaulting to 1 or 2.
S7 * suggest 1 or 2 default for frag mult
* Implications
I1 * won't form an ion just to be closed shell (would require choosing +1 vs. -1)
I2 * unallocated chg or mult lands on the first unspecified fragment able to
bear it (enforced by returning first match encountered; subsequent
matches distribute charge to later frags)
I3 * missing chg or mult from tot - frags will always be allocated as a block, not distributed
Examples
--------
>>> validate_and_fill_chgmult(*sys('He'), 0, [0], 1, [1])
0, [0], 1, [1]
>>> validate_and_fill_chgmult(*sys('He'), None, [None], None, [None])
0, [0], 1, [1]
>>> validate_and_fill_chgmult(*sys('He/He'), None, [None, None], None, [None, None])
0, [0, 0], 1, [1, 1])
>>> validate_and_fill_chgmult(*sys('He/He'), 2, [None, None], None, [None, None])
2, [2, 0], 1, [1, 1])
>>> validate_and_fill_chgmult(*sys('He/He'), None, [2, None], None, [None, None])
2, [2, 0], 1, [1, 1])
>>> validate_and_fill_chgmult(*sys('He/He'), 0, [2, None], None, [None, None])
0, [2, -2], 1, [1, 1])
>>> validate_and_fill_chgmult(*sys('Ne/He/He'), -2, [None, 2, None], None, [None, None, None])
-2, [-4, 2, 0], 1, [1, 1, 1]
>>> validate_and_fill_chgmult(*sys('Ne/He/He'), 2, [None, -2, None], None, [None, None, None])
2, [4, -2, 0], 1, [1, 1, 1]
# 9 - residual +4 distributes to first fragment able to wholly accept it (He+4 is no-go)
>>> validate_and_fill_chgmult(*sys('He/He/Ne'), 2, [None, -2, None], None, [None, None, None])
2, [0, -2, 4], 1, [1, 1, 1]
# 10 - residual +4 unsuited for only open fragment, He, so irreconcilable
>>> validate_and_fill_chgmult(*sys('He/He/Ne'), 2, [None, -2, 0], None, [None, None, None])
ValidationError
# 11 - non-positive multiplicity
>>> validate_and_fill_chgmult(*sys('He/He/Ne'), 2, [2, -2, None], None, [None, None, None])
2, [2, -2, 2], 1, [1, 1, 1])
>>> validate_and_fill_chgmult(*sys('He/He'), None, [-2, 2], None, [None, None])
0, [-2, 2], 1, [1, 1]
>>> validate_and_fill_chgmult(*sys('He/He'), None, [None, -2], None, [None, None])
-2, [0, -2], 1, [1, 1]
>>> validate_and_fill_chgmult(*sys('Ne/Ne'), 0, [None, 4], None, [None, None])
0, [-4, 4], 1, [1, 1]
>>> validate_and_fill_chgmult(*sys('He/He/He'), 4, [2, None, None], None, [None, None, None])
4, [2, 2, 0], 1, [1, 1, 1]
>>> validate_and_fill_chgmult(*sys('He/He'), 0, [-2, 2], None, [None, None])
0, [-2, 2], 1, [1, 1]
>>> validate_and_fill_chgmult(*sys('He/He'), 0, [-2, -2], None, [None, None])
ValidationError
>>> validate_and_fill_chgmult(*sys('He'), None, [None], 0, [None])
ValidationError
>>> validate_and_fill_chgmult(*sys('He'), None, [None], None, [1])
0, [0], 1, [1]
# 20 - doublet non consistent with closed-shell, neutral default charge
>>> validate_and_fill_chgmult(*sys('He'), None, [None], None, [2])
ValidationError
>>> validate_and_fill_chgmult(*sys('He'), None, [None], None, [3])
0, [0], 3, [3]
# 22 - insufficient electrons for pentuplet
>>> validate_and_fill_chgmult(*sys('He'), None, [None], None, [5])
ValidationError
>>> validate_and_fill_chgmult(*sys('He'), None, [-1], None, [2])
-1, [-1], 2, [2]
# 24 - doublet not consistent with even charge
>>> validate_and_fill_chgmult(*sys('He'), None, [-2], None, [2])
ValidationError
>>> validate_and_fill_chgmult(*sys('He/He'), None, [None, None], None, [1, 1])
0, [0, 0], 1, [1, 1]
>>> validate_and_fill_chgmult(*sys('He/He'), None, [None, None], None, [3, 1])
0, [0, 0], 3, [3, 1]
>>> validate_and_fill_chgmult(*sys('He/He'), None, [None, None], None, [1, 3])
0, [0, 0], 3, [1, 3]
>>> validate_and_fill_chgmult(*sys('He/He'), None, [None, None], None, [3, 3])
0, [0, 0], 5, [3, 3]
>>> validate_and_fill_chgmult(*sys('He/He'), None, [None, None], 3, [3, 3])
0, [0, 0], 3, [3, 3]
# 30 - bad parity btwn mult and total # electrons
>>> validate_and_fill_chgmult(*sys('He/He'), None, [None, None], 2, [3, 3])
ValidationError
>>> validate_and_fill_chgmult(*sys('H'), None, [None], None, [None])
0, [0], 2, [2]
>>> validate_and_fill_chgmult(*sys('H'), 1, [None], None, [None])
1, [1], 1, [1]
>>> validate_and_fill_chgmult(*sys('H'), None, [-1], None, [None])
-1, [-1], 1, [1]
>>> validate_and_fill_chgmult(*sys('funnyH'), None, [None], None, [None])
0, [0], 1, [1]
# 35 - insufficient electrons
>>> validate_and_fill_chgmult(*sys('funnierH'), None, [None], None, [None])
ValidationError
>>> validate_and_fill_chgmult(*sys('H/H'), None, [None, None], None, [None, None])
0, [0, 0], 3, [2, 2]
>>> validate_and_fill_chgmult(*sys('H/He'), None, [None, None], None, [None, None])
0, [0, 0], 2, [2, 1]
>>> validate_and_fill_chgmult(*sys('H/He'), None, [1, 1], None, [None, None])
2, [1, 1], 2, [1, 2]
>>> validate_and_fill_chgmult(*sys('H/He'), -2, [-1, None], None, [None, None])
-2, [-1, -1], 2, [1, 2]
>>> validate_and_fill_chgmult(*sys('H/He/Na/Ne'), None, [1, None, 1, None], None, [None, None, None, None])
2, [1, 0, 1, 0], 1, [1, 1, 1, 1]
>>> validate_and_fill_chgmult(*sys('H/He/Na/Ne'), None, [-1, None, 1, None], None, [None, None, None, None])
0, [-1, 0, 1, 0], 1, [1, 1, 1, 1]
>>> validate_and_fill_chgmult(*sys('H/He/Na/Ne'), 2, [None, None, 1, None], None, [None, None, None, None])
2, [1, 0, 1, 0], 1, [1, 1, 1, 1]
>>> validate_and_fill_chgmult(*sys('H/He/Na/Ne'), 3, [None, None, 1, None], None, [None, None, None, None])
3, [0, 2, 1, 0], 2, [2, 1, 1, 1]
>>> validate_and_fill_chgmult(*sys('H/He'), None, [1, None], None, [2, None])
ValidationError
>>> validate_and_fill_chgmult(*sys('H/He'), None, [None, 0], None, [None, 2])
ValidationError
>>> validate_and_fill_chgmult(*sys('H/He'), None, [None, -1], None, [None, 3])
ValidationError
>>> validate_and_fill_chgmult(*sys('H/He/Na/Ne'), None, [None, 1, 0, 1], None, [None, None, None, None])
2, [0, 1, 0, 1], 5, [2, 2, 2, 2]
>>> validate_and_fill_chgmult(*sys('H/He/Na/Ne'), None, [None, 1, 0, None], None, [None, None, None, None])
1, [0, 1, 0, 0], 4, [2, 2, 2, 1]
>>> validate_and_fill_chgmult(*sys('H/He/Na/Ne'), None, [None, 1, 0, None], None, [None, None, 4, None])
1, [0, 1, 0, 0], 6, [2, 2, 4, 1]
>>> validate_and_fill_chgmult(*sys('He/He/He'), 0, [None, None, 1], None, [1, None, 2])
0, [0, -1, 1], 3, [1, 2, 2]
>>> validate_and_fill_chgmult(*sys('N/N/N'), None, [1, 1, 1], 3, [None, 3, None])
3, [1, 1, 1], 3, [1, 3, 1]
>>> validate_and_fill_chgmult(*sys('N/N/N'), None, [1, 1, 1], 3, [None, None, None])
3, [1, 1, 1], 3, [3, 1, 1]
>>> validate_and_fill_chgmult(*sys('N/N/N'), None, [None, None, None], 3, [None, None, 2])
ValidationError
>>> validate_and_fill_chgmult(*sys('N/N/N'), 1, [None, -1, None], 3, [None, None, 2])
1, [2, -1, 0], 3, [2, 1, 2]
# 55 - both (1, (1, 0.0, 0.0), 4, (1, 3, 2)) and (1, (0.0, 0.0, 1), 4, (2, 3, 1)) plausible
>>> validate_and_fill_chgmult(*sys('N/Ne/N'), 1, [None, None, None], 4, [None, 3, None])
1, [1, 0, 0], 4, [1, 3, 2]
>>> validate_and_fill_chgmult(*sys('N/Ne/N'), None, [None, None, 1], 4, [None, 3, None])
1, [0, 0, 1], 4, [2, 3, 1]
>>> validate_and_fill_chgmult(*sys('He/He'), None, [-1, 1], None, [None, None])
0, [-1, 1], 3, [2, 2]
>>> validate_and_fill_chgmult(*sys('Gh'), 1, [None], None, [None])
ValidationError
>>> validate_and_fill_chgmult(*sys('Gh'), -1, [None], None, [None])
ValidationError
>>> validate_and_fill_chgmult(*sys('Gh'), None, [None], 3, [None])
ValidationError
>>> validate_and_fill_chgmult(*sys('He/Gh'), None, [2, None], None, [None, None])
2, [2, 0], 1, [1, 1]
>>> validate_and_fill_chgmult(*sys('Gh/He'), None, [2, None], None, [None, None])
ValidationError
>>> validate_and_fill_chgmult(*sys('Gh/He/Ne'), 2, [None, -2, None], None, [None, None, None])
2, [0, -2, 4], 1, [1, 1, 1]
>>> validate_and_fill_chgmult(*sys('Gh/He/Gh'), 1, [None, None, None], None, [None, None, None])
1, [0, 1, 0], 2, [1, 2, 1]
>>> sys = {
'He': (np.array([2]), np.array([])),
'He/He': (np.array([2, 2]), np.array([1])),
'Ne/He/He': (np.array([10, 2, 2]), np.array([1, 2])),
'He/He/Ne': (np.array([2, 2, 10]), np.array([1, 2])),
'Ne/Ne': (np.array([10, 10]), np.array([1])),
'He/He/He': (np.array([2, 2, 2]), np.array([1, 2])),
'H': (np.array([1]), np.array([])),
'funnyH': (np.array([0]), np.array([])), # has no electrons
'funnierH': (np.array([-1]), np.array([])), # has positron
'H/H': (np.array([1, 1]), np.array([1])),
'H/He': (np.array([1, 2]), np.array([1])),
'H/He/Na/Ne': (np.array([1, 2, 11, 10]), np.array([1, 2, 3])),
'N/N/N': (np.array([7, 7, 7]), np.array([1, 2])),
'N/Ne/N': (np.array([7, 10, 7]), np.array([1, 2])),
'He/Gh': (np.array([2, 0]), np.array([1])),
'Gh/He': (np.array([0, 2]), np.array([1])),
'Gh': (np.array([0, 0]), np.array([])),
'Gh/He/Ne': (np.array([0, 0, 2, 10]), np.array([2, 3])),
'Gh/He/Gh': (np.array([0, 2, 0]), np.array([1, 2]))}
"""
text = []
felez = np.split(zeff, fragment_separators)
nfr = len(felez)
text.append('felez: {}'.format(felez))
cgmp_exact_c = [] # exact_* are candidates for the final value
cgmp_exact_fc = [[] for f in range(nfr)]
cgmp_exact_m = []
cgmp_exact_fm = [[] for f in range(nfr)]
cgmp_range = [] # tests that the final value must pass to be valid
cgmp_rules = [] # key to what rules in cgmp_range are T/F
real_fragments = np.array([not all(f == 0 for f in felez[ifr]) for ifr in range(nfr)])
all_fc_known = all(f is not None for f in fragment_charges)
all_fm_known = all(f is not None for f in fragment_multiplicities)
if zero_ghost_fragments and not all(real_fragments):
print('possibly adjusting charges')
molecular_charge = None
fragment_charges = [(fr if real_fragments[ifr] else 0.0) for ifr, fr in enumerate(fragment_charges)]
molecular_multiplicity = None
fragment_multiplicities = [(fr if real_fragments[ifr] else 1) for ifr, fr in enumerate(fragment_multiplicities)]
# <<< assert broad physical requirements
# * (R1) require all chg & mult exist
cgmp_range.append(lambda c, fc, m, fm: c is not None and
all(f is not None for f in fc) and
m is not None and
all(f is not None for f in fm))
cgmp_rules.append('1')
# * (R2) require total charge to be the sum of fragment charges
cgmp_range.append(lambda c, fc, m, fm: c == sum(fc))
cgmp_rules.append('2')
# * (R3) require mult is positive int
cgmp_range.append(lambda c, fc, m, fm: _mult_ok(m) and all(_mult_ok(f) for f in fm))
cgmp_rules.append('3')
# <<< assert electron count requirements
zel = np.sum(zeff) # note: number electrons in neutral species, not number total electrons
fzel = [np.sum(f) for f in felez]
text.append('zel: {}'.format(zel))
text.append('fzel: {}'.format(fzel))
# * (R4) require sufficient electrons for mult: mult - 1 <= neutral_electrons - chg
cgmp_range.append(lambda c, fc, m, fm: _sufficient_electrons_for_mult(zel, c, m))
cgmp_rules.append('4')
for ifr in range(nfr):
cgmp_range.append(lambda c, fc, m, fm, ifr=ifr: _sufficient_electrons_for_mult(fzel[ifr], fc[ifr], fm[ifr]))
cgmp_rules.append('4-' + str(ifr))
# * (R5) require total parity consistent among neutral_electrons, chg, and mult
cgmp_range.append(lambda c, fc, m, fm: _parity_ok(zel, c, m))
cgmp_rules.append('5')
for ifr in range(nfr):
cgmp_range.append(lambda c, fc, m, fm, ifr=ifr: _parity_ok(fzel[ifr], fc[ifr], fm[ifr]))
cgmp_rules.append('5-' + str(ifr))
# <<< (R6, R7, S1) assert & suggest input values
if molecular_charge is not None:
cgmp_exact_c.append(molecular_charge)
cgmp_range.append(lambda c, fc, m, fm: c == molecular_charge)
cgmp_rules.append('6')
for ifr, chg in enumerate(fragment_charges):
if chg is not None:
cgmp_exact_fc[ifr].append(chg)
cgmp_range.append(lambda c, fc, m, fm, ifr=ifr, chg=chg: fc[ifr] == chg)
cgmp_rules.append('6-' + str(ifr))
if molecular_multiplicity is not None:
cgmp_exact_m.append(molecular_multiplicity)
cgmp_range.append(lambda c, fc, m, fm: m == molecular_multiplicity)
cgmp_rules.append('7')
for ifr, mult in enumerate(fragment_multiplicities):
if mult is not None:
cgmp_exact_fm[ifr].append(mult)
cgmp_range.append(lambda c, fc, m, fm, ifr=ifr, mult=mult: fm[ifr] == mult)
cgmp_rules.append('7-' + str(ifr))
# <<< assert high-spin-rule and suggest "missing quantity" and default values
# * (S2) suggest net frag charge for total charge, allowing for indiv frag defaulting to 0
cgmp_exact_c.append(sum(filter(None, fragment_charges)))
missing_frag_chg = 0. if molecular_charge is None else molecular_charge
missing_frag_chg -= sum(filter(None, fragment_charges))
# * (S3) suggest distributing unallocated charge onto fragment
# * (S4) suggest 0 default charge for fragment
for ifr in range(nfr):
if fragment_charges[ifr] is None: # unneeded, but shortens the exact lists
cgmp_exact_fc[ifr].append(missing_frag_chg)
cgmp_exact_fc[ifr].append(0.)
# * (R8) require that frag mult follow high spin addition unless fully specified
if molecular_multiplicity is None or any(f is None for f in fragment_multiplicities):
cgmp_range.append(lambda c, fc, m, fm: m == _high_spin_sum(fm))
cgmp_rules.append('8')
# * (S5) suggest range of net frag mult for total mult, allowing for indiv frag defaulting to 1 or 2.
# many in range may be unphysical, but those will be caught by physical rules.
if molecular_multiplicity is None: # unneeded, but shortens the exact lists
frag_mult_hi = _high_spin_sum(_apply_default(fragment_multiplicities, 2))
frag_mult_lo = _high_spin_sum(_apply_default(fragment_multiplicities, 1))
for m in range(frag_mult_lo, frag_mult_hi + 1):
cgmp_exact_m.append(m)
# * (S6) suggest range of missing mult = tot - high_spin_sum(frag - 1),
# allowing for all indiv but self defaulting to 1 or 2. Many in range
# may be unphysical, but those will be caught by physical rules.
# * (S7) suggest 1 or 2 default multiplicity for fragment
if molecular_multiplicity is not None and any(f is None for f in fragment_multiplicities):
frag_mult_less_one_none = fragment_multiplicities[:]
frag_mult_less_one_none.remove(None) # "missing" slot to solve for
frag_mult_hi = _high_spin_sum(_apply_default(frag_mult_less_one_none, 2))
frag_mult_lo = _high_spin_sum(_apply_default(frag_mult_less_one_none, 1))
missing_mult_hi = molecular_multiplicity - frag_mult_lo + 1
missing_mult_lo = molecular_multiplicity - frag_mult_hi + 1
else:
missing_mult_hi = 0
missing_mult_lo = 0
for ifr in range(nfr):
if fragment_multiplicities[ifr] is None: # unneeded, but shortens the exact lists
for m in reversed(range(max(missing_mult_lo, 1), missing_mult_hi + 1)):
cgmp_exact_fm[ifr].append(m)
cgmp_exact_fm[ifr].append(1)
cgmp_exact_fm[ifr].append(2)
# * (R9) require that ghost fragments be neutral singlet
for ifr in range(nfr):
if all(f == 0 for f in felez[ifr]):
cgmp_range.append(lambda c, fc, m, fm, ifr=ifr: fc[ifr] == 0 and fm[ifr] == 1)
cgmp_rules.append('9-' + str(ifr))
# <<< reconcile and report
def reconcile(exact_c, exact_fc, exact_m, exact_fm):
"""Returns a member from all combinations of `exact` that passes all tests in cgmp_range, else raises error."""
# remove duplicates
uniq_c = _unique_everseen(exact_c)
uniq_fc = [_unique_everseen(f) for f in exact_fc]
uniq_m = _unique_everseen(exact_m)
uniq_fm = [_unique_everseen(f) for f in exact_fm]
text.append('c: {}'.format(list(exact_c)))
for f in exact_fc:
text.append('fc:'.format(list(f)))
text.append('m: {}'.format(list(exact_m)))
for f in exact_fm:
text.append('fm:'.format(list(f)))
header = True
for candidate in itertools.product(*[uniq_c, itertools.product(*uniq_fc),
uniq_m, itertools.product(*uniq_fm)]): # yapf: disable
cc, cfc, cm, cfm = candidate
if header:
text.append(
"""Assess candidate {}: {}""".format(candidate, ' '.join(('{:3}'.format(r) for r in cgmp_rules))))
header = False
assessment = [fn(cc, cfc, cm, cfm) for fn in cgmp_range]
sass = ['{:3}'.format('T' if b else '') for b in assessment]
text.append("""Assess candidate {:}: {} --> {}""".format(candidate, ' '.join(sass), all(assessment)))
if all(assessment):
return candidate
else:
err = """Inconsistent or unspecified chg/mult: sys chg: {}, frag chg: {}, sys mult: {}, frag mult: {}""".format(
molecular_charge, fragment_charges, molecular_multiplicity, fragment_multiplicities)
if verbose > -1:
print('\n\n' + '\n'.join(text))
raise ValidationError(err)
def stringify(start, final):
fcgmp = '{:^4}'
return fcgmp.format(final) if final == start else fcgmp.format('(' + str(int(final)) + ')')
# TODO could winnow down the exact_* lists a bit by ruling out
# independent values. do this if many-frag molecular systems take too
# long in the itertools.product
c_final, fc_final, m_final, fm_final = reconcile(cgmp_exact_c, cgmp_exact_fc, cgmp_exact_m, cgmp_exact_fm)
c_text = stringify(molecular_charge, c_final)
fc_text = ', '.join((stringify(fs, ff) for fs, ff in zip(fragment_charges, fc_final)))
m_text = stringify(molecular_multiplicity, m_final)
fm_text = ', '.join((stringify(fs, ff) for fs, ff in zip(fragment_multiplicities, fm_final)))
brief = []
brief.append(' {:26} {}'.format(' charge = ' + c_text, 'fragments = ' + fc_text))
brief.append(' {:26} {}'.format('multiplicity = ' + m_text, 'fragments = ' + fm_text))
been_defaulted = []
if c_text.count('(') + fc_text.count('(') > 1:
been_defaulted.append('charge')
if '(' in m_text or '(' in fm_text:
been_defaulted.append('multiplicity')
if been_defaulted:
brief.append(' Note: Default values have been applied for {}. Specify intentions in molecule input block'.
format(' and '.join(been_defaulted)))
if m_final != _high_spin_sum(fm_final):
brief.append(
' Warning: Total multiplicity is not high-spin sum of fragments; may be clobbered by psi4.core.Molecule.update_geometry().'
)
if verbose >= 2:
print('\n'.join(text))
if verbose >= 1:
# TODO add back when printing worked out
#print('\n'.join(brief))
pass
return {'molecular_charge': float(c_final),
'fragment_charges': [float(f) for f in fc_final],
'molecular_multiplicity': m_final,
'fragment_multiplicities': list(fm_final)}
| amjames/psi4 | psi4/driver/qcdb/molparse/chgmult.py | Python | lgpl-3.0 | 26,251 |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
#
# Copyright © 2013 Julien Danjou <julien@danjou.info>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
from oslo_config import cfg
from oslo_log import log as logging
import oslo_messaging as messaging
from oslo_service import service
from blazar import context
LOG = logging.getLogger(__name__)
class RPCClient(object):
def __init__(self, target):
super(RPCClient, self).__init__()
self._client = messaging.RPCClient(
target=target,
transport=messaging.get_rpc_transport(cfg.CONF),
)
def cast(self, name, **kwargs):
ctx = context.current()
self._client.cast(ctx.to_dict(), name, **kwargs)
def call(self, name, **kwargs):
ctx = context.current()
return self._client.call(ctx.to_dict(), name, **kwargs)
class RPCServer(service.Service):
def __init__(self, target):
super(RPCServer, self).__init__()
self._server = messaging.get_rpc_server(
target=target,
transport=messaging.get_rpc_transport(cfg.CONF),
endpoints=[ContextEndpointHandler(self, target)],
executor='eventlet',
)
def start(self):
super(RPCServer, self).start()
self.tg.add_thread(self._server.start)
def wait(self):
super(RPCServer, self).wait()
self._server.wait()
def stop(self):
super(RPCServer, self).stop()
self._server.stop()
class ContextEndpointHandler(object):
def __init__(self, endpoint, target):
self.__endpoint = endpoint
self.target = target
def __getattr__(self, name):
try:
method = getattr(self.__endpoint, name)
def run_method(__ctx, **kwargs):
with context.BlazarContext(**__ctx):
return method(**kwargs)
return run_method
except AttributeError:
LOG.error("No %(method)s method found implemented in "
"%(class)s class",
{'method': name, 'class': self.__endpoint})
def with_empty_context(func):
@functools.wraps(func)
def decorator(*args, **kwargs):
with context.BlazarContext():
return func(*args, **kwargs)
return decorator
def prepare_service(argv=[]):
logging.setup(cfg.CONF, 'blazar')
| stackforge/blazar | blazar/utils/service.py | Python | apache-2.0 | 2,893 |
#!/usr/bin/env python3
import logging
from app import app, logger
root = logging.getLogger()
root.setLevel(logging.DEBUG)
logging.getLogger("sqlalchemy.engine").setLevel(logging.INFO)
if __name__ == '__main__':
logger.warn("This is a debugging configuration. Run with the gunicorn script to run in production.")
app.run(host='0.0.0.0', debug=True)
| KanColleTool/kcsrv | kcsrv.py | Python | mit | 361 |
import clr
clr.AddReference('RevitAPI')
from Autodesk.Revit.DB import *
clr.AddReference("RevitNodes")
import Revit
clr.ImportExtensions(Revit.Elements)
clr.AddReference("RevitServices")
import RevitServices
from RevitServices.Persistence import DocumentManager
from RevitServices.Transactions import TransactionManager
doc = DocumentManager.Instance.CurrentDBDocument
cstype = UnwrapElement(IN[0])
facerefs = IN[1]
elementlist = list()
refarr = ReferenceArray()
for ref in facerefs:
refarr.Append(ref)
TransactionManager.Instance.EnsureInTransaction(doc)
doccreation = doc.Create
newcs = doccreation.NewCurtainSystem2(refarr,cstype)
try:
newcs = doccreation.NewCurtainSystem2(refarr,cstype)
for item in newcs:
elementlist.append(doc.GetElement(item))
except:
donothing = 1
TransactionManager.Instance.TransactionTaskDone()
OUT = elementlist | andydandy74/ClockworkForDynamo | nodes/0.7.x/python/CurtainSystem.ByFace.py | Python | mit | 850 |
"""This example using a PasswordInput is purely for demonstration.
Putting a password plaintext in a CustomJS is not advised since it would expose the password.
"""
from bokeh.models.widgets import PasswordInput, TextInput, PreText, Button
from bokeh.layouts import column, row
from bokeh.plotting import show, output_file
from bokeh.models.callbacks import CustomJS
USER = "Mau"
PASSWD = "Bok3h"
text = PreText(text="LOGIN TO KNOW\nTHE SECRET:")
user = TextInput(placeholder="username", title="(UserName: "+USER+")")
pwd = PasswordInput(placeholder="password", title="(Password: "+PASSWD+")")
btn = Button(label="GO!",width=150)
secret = PreText() # Secret information displayed if correct password entered
## Verify if the password typed is bokeh using a JS script
verify_pwd = CustomJS(args=dict(user=user, pwd=pwd, secret=secret),
code=""" if (user.value = '{}') & (pwd.value == '{}') {{
secret.text = 'Right Password. The Secret is 42.';
return;
}}
secret.text = 'Wrong Password.';
""".format(USER, PASSWD))
#user.callback = verify_pwd # Check password pressing enter.
pwd.callback = verify_pwd # Check password pressing enter.
btn.callback = verify_pwd # Check password clicking on the Button.
output_file("using_password_input.html", title="Password Field")
page = row(column(text,user, pwd,btn),secret)
show(page)
| Ziqi-Li/bknqgis | bokeh/examples/howto/using_password_input.py | Python | gpl-2.0 | 1,515 |
#!/usr/bin/env python
""" This module tries to retrieve as much platform-identifying data as
possible. It makes this information available via function APIs.
If called from the command line, it prints the platform
information concatenated as single string to stdout. The output
format is useable as part of a filename.
"""
# This module is maintained by Marc-Andre Lemburg <mal@egenix.com>.
# If you find problems, please submit bug reports/patches via the
# Python bug tracker (http://bugs.python.org) and assign them to "lemburg".
#
# Note: Please keep this module compatible to Python 1.5.2.
#
# Still needed:
# * more support for WinCE
# * support for MS-DOS (PythonDX ?)
# * support for Amiga and other still unsupported platforms running Python
# * support for additional Linux distributions
#
# Many thanks to all those who helped adding platform-specific
# checks (in no particular order):
#
# Charles G Waldman, David Arnold, Gordon McMillan, Ben Darnell,
# Jeff Bauer, Cliff Crawford, Ivan Van Laningham, Josef
# Betancourt, Randall Hopper, Karl Putland, John Farrell, Greg
# Andruk, Just van Rossum, Thomas Heller, Mark R. Levinson, Mark
# Hammond, Bill Tutt, Hans Nowak, Uwe Zessin (OpenVMS support),
# Colin Kong, Trent Mick, Guido van Rossum, Anthony Baxter, Steve
# Dower
#
# History:
#
# <see CVS and SVN checkin messages for history>
#
# 1.0.8 - changed Windows support to read version from kernel32.dll
# 1.0.7 - added DEV_NULL
# 1.0.6 - added linux_distribution()
# 1.0.5 - fixed Java support to allow running the module on Jython
# 1.0.4 - added IronPython support
# 1.0.3 - added normalization of Windows system name
# 1.0.2 - added more Windows support
# 1.0.1 - reformatted to make doc.py happy
# 1.0.0 - reformatted a bit and checked into Python CVS
# 0.8.0 - added sys.version parser and various new access
# APIs (python_version(), python_compiler(), etc.)
# 0.7.2 - fixed architecture() to use sizeof(pointer) where available
# 0.7.1 - added support for Caldera OpenLinux
# 0.7.0 - some fixes for WinCE; untabified the source file
# 0.6.2 - support for OpenVMS - requires version 1.5.2-V006 or higher and
# vms_lib.getsyi() configured
# 0.6.1 - added code to prevent 'uname -p' on platforms which are
# known not to support it
# 0.6.0 - fixed win32_ver() to hopefully work on Win95,98,NT and Win2k;
# did some cleanup of the interfaces - some APIs have changed
# 0.5.5 - fixed another type in the MacOS code... should have
# used more coffee today ;-)
# 0.5.4 - fixed a few typos in the MacOS code
# 0.5.3 - added experimental MacOS support; added better popen()
# workarounds in _syscmd_ver() -- still not 100% elegant
# though
# 0.5.2 - fixed uname() to return '' instead of 'unknown' in all
# return values (the system uname command tends to return
# 'unknown' instead of just leaving the field emtpy)
# 0.5.1 - included code for slackware dist; added exception handlers
# to cover up situations where platforms don't have os.popen
# (e.g. Mac) or fail on socket.gethostname(); fixed libc
# detection RE
# 0.5.0 - changed the API names referring to system commands to *syscmd*;
# added java_ver(); made syscmd_ver() a private
# API (was system_ver() in previous versions) -- use uname()
# instead; extended the win32_ver() to also return processor
# type information
# 0.4.0 - added win32_ver() and modified the platform() output for WinXX
# 0.3.4 - fixed a bug in _follow_symlinks()
# 0.3.3 - fixed popen() and "file" command invokation bugs
# 0.3.2 - added architecture() API and support for it in platform()
# 0.3.1 - fixed syscmd_ver() RE to support Windows NT
# 0.3.0 - added system alias support
# 0.2.3 - removed 'wince' again... oh well.
# 0.2.2 - added 'wince' to syscmd_ver() supported platforms
# 0.2.1 - added cache logic and changed the platform string format
# 0.2.0 - changed the API to use functions instead of module globals
# since some action take too long to be run on module import
# 0.1.0 - first release
#
# You can always get the latest version of this module at:
#
# http://www.egenix.com/files/python/platform.py
#
# If that URL should fail, try contacting the author.
__copyright__ = """
Copyright (c) 1999-2000, Marc-Andre Lemburg; mailto:mal@lemburg.com
Copyright (c) 2000-2010, eGenix.com Software GmbH; mailto:info@egenix.com
Permission to use, copy, modify, and distribute this software and its
documentation for any purpose and without fee or royalty is hereby granted,
provided that the above copyright notice appear in all copies and that
both that copyright notice and this permission notice appear in
supporting documentation or portions thereof, including modifications,
that you make.
EGENIX.COM SOFTWARE GMBH DISCLAIMS ALL WARRANTIES WITH REGARD TO
THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
FITNESS, IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL,
INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING
FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT,
NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION
WITH THE USE OR PERFORMANCE OF THIS SOFTWARE !
"""
__version__ = '1.0.7'
import sys,string,os,re
### Globals & Constants
# Determine the platform's /dev/null device
try:
DEV_NULL = os.devnull
except AttributeError:
# os.devnull was added in Python 2.4, so emulate it for earlier
# Python versions
if sys.platform in ('dos','win32','win16','os2'):
# Use the old CP/M NUL as device name
DEV_NULL = 'NUL'
else:
# Standard Unix uses /dev/null
DEV_NULL = '/dev/null'
### Platform specific APIs
_libc_search = re.compile(r'(__libc_init)'
'|'
'(GLIBC_([0-9.]+))'
'|'
'(libc(_\w+)?\.so(?:\.(\d[0-9.]*))?)')
def libc_ver(executable=sys.executable,lib='',version='',
chunksize=2048):
""" Tries to determine the libc version that the file executable
(which defaults to the Python interpreter) is linked against.
Returns a tuple of strings (lib,version) which default to the
given parameters in case the lookup fails.
Note that the function has intimate knowledge of how different
libc versions add symbols to the executable and thus is probably
only useable for executables compiled using gcc.
The file is read and scanned in chunks of chunksize bytes.
"""
if hasattr(os.path, 'realpath'):
# Python 2.2 introduced os.path.realpath(); it is used
# here to work around problems with Cygwin not being
# able to open symlinks for reading
executable = os.path.realpath(executable)
f = open(executable,'rb')
binary = f.read(chunksize)
pos = 0
while 1:
m = _libc_search.search(binary,pos)
if not m:
binary = f.read(chunksize)
if not binary:
break
pos = 0
continue
libcinit,glibc,glibcversion,so,threads,soversion = m.groups()
if libcinit and not lib:
lib = 'libc'
elif glibc:
if lib != 'glibc':
lib = 'glibc'
version = glibcversion
elif glibcversion > version:
version = glibcversion
elif so:
if lib != 'glibc':
lib = 'libc'
if soversion and soversion > version:
version = soversion
if threads and version[-len(threads):] != threads:
version = version + threads
pos = m.end()
f.close()
return lib,version
def _dist_try_harder(distname,version,id):
""" Tries some special tricks to get the distribution
information in case the default method fails.
Currently supports older SuSE Linux, Caldera OpenLinux and
Slackware Linux distributions.
"""
if os.path.exists('/var/adm/inst-log/info'):
# SuSE Linux stores distribution information in that file
info = open('/var/adm/inst-log/info').readlines()
distname = 'SuSE'
for line in info:
tv = string.split(line)
if len(tv) == 2:
tag,value = tv
else:
continue
if tag == 'MIN_DIST_VERSION':
version = string.strip(value)
elif tag == 'DIST_IDENT':
values = string.split(value,'-')
id = values[2]
return distname,version,id
if os.path.exists('/etc/.installed'):
# Caldera OpenLinux has some infos in that file (thanks to Colin Kong)
info = open('/etc/.installed').readlines()
for line in info:
pkg = string.split(line,'-')
if len(pkg) >= 2 and pkg[0] == 'OpenLinux':
# XXX does Caldera support non Intel platforms ? If yes,
# where can we find the needed id ?
return 'OpenLinux',pkg[1],id
if os.path.isdir('/usr/lib/setup'):
# Check for slackware version tag file (thanks to Greg Andruk)
verfiles = os.listdir('/usr/lib/setup')
for n in range(len(verfiles)-1, -1, -1):
if verfiles[n][:14] != 'slack-version-':
del verfiles[n]
if verfiles:
verfiles.sort()
distname = 'slackware'
version = verfiles[-1][14:]
return distname,version,id
return distname,version,id
_release_filename = re.compile(r'(\w+)[-_](release|version)')
_lsb_release_version = re.compile(r'(.+)'
' release '
'([\d.]+)'
'[^(]*(?:\((.+)\))?')
_release_version = re.compile(r'([^0-9]+)'
'(?: release )?'
'([\d.]+)'
'[^(]*(?:\((.+)\))?')
# See also http://www.novell.com/coolsolutions/feature/11251.html
# and http://linuxmafia.com/faq/Admin/release-files.html
# and http://data.linux-ntfs.org/rpm/whichrpm
# and http://www.die.net/doc/linux/man/man1/lsb_release.1.html
_supported_dists = (
'SuSE', 'debian', 'fedora', 'redhat', 'centos',
'mandrake', 'mandriva', 'rocks', 'slackware', 'yellowdog', 'gentoo',
'UnitedLinux', 'turbolinux')
def _parse_release_file(firstline):
# Default to empty 'version' and 'id' strings. Both defaults are used
# when 'firstline' is empty. 'id' defaults to empty when an id can not
# be deduced.
version = ''
id = ''
# Parse the first line
m = _lsb_release_version.match(firstline)
if m is not None:
# LSB format: "distro release x.x (codename)"
return tuple(m.groups())
# Pre-LSB format: "distro x.x (codename)"
m = _release_version.match(firstline)
if m is not None:
return tuple(m.groups())
# Unknown format... take the first two words
l = string.split(string.strip(firstline))
if l:
version = l[0]
if len(l) > 1:
id = l[1]
return '', version, id
def linux_distribution(distname='', version='', id='',
supported_dists=_supported_dists,
full_distribution_name=1):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
supported_dists may be given to define the set of Linux
distributions to look for. It defaults to a list of currently
supported Linux distributions identified by their release file
name.
If full_distribution_name is true (default), the full
distribution read from the OS is returned. Otherwise the short
name taken from supported_dists is used.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
try:
etc = os.listdir('/etc')
except os.error:
# Probably not a Unix system
return distname,version,id
etc.sort()
for file in etc:
m = _release_filename.match(file)
if m is not None:
_distname,dummy = m.groups()
if _distname in supported_dists:
distname = _distname
break
else:
return _dist_try_harder(distname,version,id)
# Read the first line
f = open('/etc/'+file, 'r')
firstline = f.readline()
f.close()
_distname, _version, _id = _parse_release_file(firstline)
if _distname and full_distribution_name:
distname = _distname
if _version:
version = _version
if _id:
id = _id
return distname, version, id
# To maintain backwards compatibility:
def dist(distname='',version='',id='',
supported_dists=_supported_dists):
""" Tries to determine the name of the Linux OS distribution name.
The function first looks for a distribution release file in
/etc and then reverts to _dist_try_harder() in case no
suitable files are found.
Returns a tuple (distname,version,id) which default to the
args given as parameters.
"""
return linux_distribution(distname, version, id,
supported_dists=supported_dists,
full_distribution_name=0)
class _popen:
""" Fairly portable (alternative) popen implementation.
This is mostly needed in case os.popen() is not available, or
doesn't work as advertised, e.g. in Win9X GUI programs like
PythonWin or IDLE.
Writing to the pipe is currently not supported.
"""
tmpfile = ''
pipe = None
bufsize = None
mode = 'r'
def __init__(self,cmd,mode='r',bufsize=None):
if mode != 'r':
raise ValueError,'popen()-emulation only supports read mode'
import tempfile
self.tmpfile = tmpfile = tempfile.mktemp()
os.system(cmd + ' > %s' % tmpfile)
self.pipe = open(tmpfile,'rb')
self.bufsize = bufsize
self.mode = mode
def read(self):
return self.pipe.read()
def readlines(self):
if self.bufsize is not None:
return self.pipe.readlines()
def close(self,
remove=os.unlink,error=os.error):
if self.pipe:
rc = self.pipe.close()
else:
rc = 255
if self.tmpfile:
try:
remove(self.tmpfile)
except error:
pass
return rc
# Alias
__del__ = close
def popen(cmd, mode='r', bufsize=None):
""" Portable popen() interface.
"""
# Find a working popen implementation preferring win32pipe.popen
# over os.popen over _popen
popen = None
if os.environ.get('OS','') == 'Windows_NT':
# On NT win32pipe should work; on Win9x it hangs due to bugs
# in the MS C lib (see MS KnowledgeBase article Q150956)
try:
import win32pipe
except ImportError:
pass
else:
popen = win32pipe.popen
if popen is None:
if hasattr(os,'popen'):
popen = os.popen
# Check whether it works... it doesn't in GUI programs
# on Windows platforms
if sys.platform == 'win32': # XXX Others too ?
try:
popen('')
except os.error:
popen = _popen
else:
popen = _popen
if bufsize is None:
return popen(cmd,mode)
else:
return popen(cmd,mode,bufsize)
def _norm_version(version, build=''):
""" Normalize the version and build strings and return a single
version string using the format major.minor.build (or patchlevel).
"""
l = string.split(version,'.')
if build:
l.append(build)
try:
ints = map(int,l)
except ValueError:
strings = l
else:
strings = map(str,ints)
version = string.join(strings[:3],'.')
return version
_ver_output = re.compile(r'(?:([\w ]+) ([\w.]+) '
'.*'
'\[.* ([\d.]+)\])')
# Examples of VER command output:
#
# Windows 2000: Microsoft Windows 2000 [Version 5.00.2195]
# Windows XP: Microsoft Windows XP [Version 5.1.2600]
# Windows Vista: Microsoft Windows [Version 6.0.6002]
#
# Note that the "Version" string gets localized on different
# Windows versions.
def _syscmd_ver(system='', release='', version='',
supported_platforms=('win32','win16','dos','os2')):
""" Tries to figure out the OS version used and returns
a tuple (system,release,version).
It uses the "ver" shell command for this which is known
to exists on Windows, DOS and OS/2. XXX Others too ?
In case this fails, the given parameters are used as
defaults.
"""
if sys.platform not in supported_platforms:
return system,release,version
# Try some common cmd strings
for cmd in ('ver','command /c ver','cmd /c ver'):
try:
pipe = popen(cmd)
info = pipe.read()
if pipe.close():
raise os.error,'command failed'
# XXX How can I suppress shell errors from being written
# to stderr ?
except os.error,why:
#print 'Command %s failed: %s' % (cmd,why)
continue
except IOError,why:
#print 'Command %s failed: %s' % (cmd,why)
continue
else:
break
else:
return system,release,version
# Parse the output
info = string.strip(info)
m = _ver_output.match(info)
if m is not None:
system,release,version = m.groups()
# Strip trailing dots from version and release
if release[-1] == '.':
release = release[:-1]
if version[-1] == '.':
version = version[:-1]
# Normalize the version and build strings (eliminating additional
# zeros)
version = _norm_version(version)
return system,release,version
_WIN32_CLIENT_RELEASES = {
(5, 0): "2000",
(5, 1): "XP",
# Strictly, 5.2 client is XP 64-bit, but platform.py historically
# has always called it 2003 Server
(5, 2): "2003Server",
(5, None): "post2003",
(6, 0): "Vista",
(6, 1): "7",
(6, 2): "8",
(6, 3): "8.1",
(6, None): "post8.1",
(10, 0): "10",
(10, None): "post10",
}
# Server release name lookup will default to client names if necessary
_WIN32_SERVER_RELEASES = {
(5, 2): "2003Server",
(6, 0): "2008Server",
(6, 1): "2008ServerR2",
(6, 2): "2012Server",
(6, 3): "2012ServerR2",
(6, None): "post2012ServerR2",
}
def _get_real_winver(maj, min, build):
if maj < 6 or (maj == 6 and min < 2):
return maj, min, build
from ctypes import (c_buffer, POINTER, byref, create_unicode_buffer,
Structure, WinDLL)
from ctypes.wintypes import DWORD, HANDLE
class VS_FIXEDFILEINFO(Structure):
_fields_ = [
("dwSignature", DWORD),
("dwStrucVersion", DWORD),
("dwFileVersionMS", DWORD),
("dwFileVersionLS", DWORD),
("dwProductVersionMS", DWORD),
("dwProductVersionLS", DWORD),
("dwFileFlagsMask", DWORD),
("dwFileFlags", DWORD),
("dwFileOS", DWORD),
("dwFileType", DWORD),
("dwFileSubtype", DWORD),
("dwFileDateMS", DWORD),
("dwFileDateLS", DWORD),
]
kernel32 = WinDLL('kernel32')
version = WinDLL('version')
# We will immediately double the length up to MAX_PATH, but the
# path may be longer, so we retry until the returned string is
# shorter than our buffer.
name_len = actual_len = 130
while actual_len == name_len:
name_len *= 2
name = create_unicode_buffer(name_len)
actual_len = kernel32.GetModuleFileNameW(HANDLE(kernel32._handle),
name, len(name))
if not actual_len:
return maj, min, build
size = version.GetFileVersionInfoSizeW(name, None)
if not size:
return maj, min, build
ver_block = c_buffer(size)
if (not version.GetFileVersionInfoW(name, None, size, ver_block) or
not ver_block):
return maj, min, build
pvi = POINTER(VS_FIXEDFILEINFO)()
if not version.VerQueryValueW(ver_block, "", byref(pvi), byref(DWORD())):
return maj, min, build
maj = pvi.contents.dwProductVersionMS >> 16
min = pvi.contents.dwProductVersionMS & 0xFFFF
build = pvi.contents.dwProductVersionLS >> 16
return maj, min, build
def win32_ver(release='', version='', csd='', ptype=''):
try:
from sys import getwindowsversion
except ImportError:
return release, version, csd, ptype
try:
from winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE
except ImportError:
from _winreg import OpenKeyEx, QueryValueEx, CloseKey, HKEY_LOCAL_MACHINE
winver = getwindowsversion()
maj, min, build = _get_real_winver(*winver[:3])
version = '{0}.{1}.{2}'.format(maj, min, build)
release = (_WIN32_CLIENT_RELEASES.get((maj, min)) or
_WIN32_CLIENT_RELEASES.get((maj, None)) or
release)
# getwindowsversion() reflect the compatibility mode Python is
# running under, and so the service pack value is only going to be
# valid if the versions match.
if winver[:2] == (maj, min):
try:
csd = 'SP{}'.format(winver.service_pack_major)
except AttributeError:
if csd[:13] == 'Service Pack ':
csd = 'SP' + csd[13:]
# VER_NT_SERVER = 3
if getattr(winver, 'product', None) == 3:
release = (_WIN32_SERVER_RELEASES.get((maj, min)) or
_WIN32_SERVER_RELEASES.get((maj, None)) or
release)
key = None
try:
key = OpenKeyEx(HKEY_LOCAL_MACHINE,
r'SOFTWARE\Microsoft\Windows NT\CurrentVersion')
ptype = QueryValueEx(key, 'CurrentType')[0]
except:
pass
finally:
if key:
CloseKey(key)
return release, version, csd, ptype
def _mac_ver_lookup(selectors,default=None):
from gestalt import gestalt
import MacOS
l = []
append = l.append
for selector in selectors:
try:
append(gestalt(selector))
except (RuntimeError, MacOS.Error):
append(default)
return l
def _bcd2str(bcd):
return hex(bcd)[2:]
def _mac_ver_gestalt():
"""
Thanks to Mark R. Levinson for mailing documentation links and
code examples for this function. Documentation for the
gestalt() API is available online at:
http://www.rgaros.nl/gestalt/
"""
# Check whether the version info module is available
try:
import gestalt
import MacOS
except ImportError:
return None
# Get the infos
sysv,sysa = _mac_ver_lookup(('sysv','sysa'))
# Decode the infos
if sysv:
major = (sysv & 0xFF00) >> 8
minor = (sysv & 0x00F0) >> 4
patch = (sysv & 0x000F)
if (major, minor) >= (10, 4):
# the 'sysv' gestald cannot return patchlevels
# higher than 9. Apple introduced 3 new
# gestalt codes in 10.4 to deal with this
# issue (needed because patch levels can
# run higher than 9, such as 10.4.11)
major,minor,patch = _mac_ver_lookup(('sys1','sys2','sys3'))
release = '%i.%i.%i' %(major, minor, patch)
else:
release = '%s.%i.%i' % (_bcd2str(major),minor,patch)
if sysa:
machine = {0x1: '68k',
0x2: 'PowerPC',
0xa: 'i386'}.get(sysa,'')
versioninfo=('', '', '')
return release,versioninfo,machine
def _mac_ver_xml():
fn = '/System/Library/CoreServices/SystemVersion.plist'
if not os.path.exists(fn):
return None
try:
import plistlib
except ImportError:
return None
pl = plistlib.readPlist(fn)
release = pl['ProductVersion']
versioninfo=('', '', '')
machine = os.uname()[4]
if machine in ('ppc', 'Power Macintosh'):
# for compatibility with the gestalt based code
machine = 'PowerPC'
return release,versioninfo,machine
def mac_ver(release='',versioninfo=('','',''),machine=''):
""" Get MacOS version information and return it as tuple (release,
versioninfo, machine) with versioninfo being a tuple (version,
dev_stage, non_release_version).
Entries which cannot be determined are set to the parameter values
which default to ''. All tuple entries are strings.
"""
# First try reading the information from an XML file which should
# always be present
info = _mac_ver_xml()
if info is not None:
return info
# If that doesn't work for some reason fall back to reading the
# information using gestalt calls.
info = _mac_ver_gestalt()
if info is not None:
return info
# If that also doesn't work return the default values
return release,versioninfo,machine
def _java_getprop(name,default):
from java.lang import System
try:
value = System.getProperty(name)
if value is None:
return default
return value
except AttributeError:
return default
def java_ver(release='',vendor='',vminfo=('','',''),osinfo=('','','')):
""" Version interface for Jython.
Returns a tuple (release,vendor,vminfo,osinfo) with vminfo being
a tuple (vm_name,vm_release,vm_vendor) and osinfo being a
tuple (os_name,os_version,os_arch).
Values which cannot be determined are set to the defaults
given as parameters (which all default to '').
"""
# Import the needed APIs
try:
import java.lang
except ImportError:
return release,vendor,vminfo,osinfo
vendor = _java_getprop('java.vendor', vendor)
release = _java_getprop('java.version', release)
vm_name, vm_release, vm_vendor = vminfo
vm_name = _java_getprop('java.vm.name', vm_name)
vm_vendor = _java_getprop('java.vm.vendor', vm_vendor)
vm_release = _java_getprop('java.vm.version', vm_release)
vminfo = vm_name, vm_release, vm_vendor
os_name, os_version, os_arch = osinfo
os_arch = _java_getprop('java.os.arch', os_arch)
os_name = _java_getprop('java.os.name', os_name)
os_version = _java_getprop('java.os.version', os_version)
osinfo = os_name, os_version, os_arch
return release, vendor, vminfo, osinfo
### System name aliasing
def system_alias(system,release,version):
""" Returns (system,release,version) aliased to common
marketing names used for some systems.
It also does some reordering of the information in some cases
where it would otherwise cause confusion.
"""
if system == 'Rhapsody':
# Apple's BSD derivative
# XXX How can we determine the marketing release number ?
return 'MacOS X Server',system+release,version
elif system == 'SunOS':
# Sun's OS
if release < '5':
# These releases use the old name SunOS
return system,release,version
# Modify release (marketing release = SunOS release - 3)
l = string.split(release,'.')
if l:
try:
major = int(l[0])
except ValueError:
pass
else:
major = major - 3
l[0] = str(major)
release = string.join(l,'.')
if release < '6':
system = 'Solaris'
else:
# XXX Whatever the new SunOS marketing name is...
system = 'Solaris'
elif system == 'IRIX64':
# IRIX reports IRIX64 on platforms with 64-bit support; yet it
# is really a version and not a different platform, since 32-bit
# apps are also supported..
system = 'IRIX'
if version:
version = version + ' (64bit)'
else:
version = '64bit'
elif system in ('win32','win16'):
# In case one of the other tricks
system = 'Windows'
return system,release,version
### Various internal helpers
def _platform(*args):
""" Helper to format the platform string in a filename
compatible format e.g. "system-version-machine".
"""
# Format the platform string
platform = string.join(
map(string.strip,
filter(len, args)),
'-')
# Cleanup some possible filename obstacles...
replace = string.replace
platform = replace(platform,' ','_')
platform = replace(platform,'/','-')
platform = replace(platform,'\\','-')
platform = replace(platform,':','-')
platform = replace(platform,';','-')
platform = replace(platform,'"','-')
platform = replace(platform,'(','-')
platform = replace(platform,')','-')
# No need to report 'unknown' information...
platform = replace(platform,'unknown','')
# Fold '--'s and remove trailing '-'
while 1:
cleaned = replace(platform,'--','-')
if cleaned == platform:
break
platform = cleaned
while platform[-1] == '-':
platform = platform[:-1]
return platform
def _node(default=''):
""" Helper to determine the node name of this machine.
"""
try:
import socket
except ImportError:
# No sockets...
return default
try:
return socket.gethostname()
except socket.error:
# Still not working...
return default
# os.path.abspath is new in Python 1.5.2:
if not hasattr(os.path,'abspath'):
def _abspath(path,
isabs=os.path.isabs,join=os.path.join,getcwd=os.getcwd,
normpath=os.path.normpath):
if not isabs(path):
path = join(getcwd(), path)
return normpath(path)
else:
_abspath = os.path.abspath
def _follow_symlinks(filepath):
""" In case filepath is a symlink, follow it until a
real file is reached.
"""
filepath = _abspath(filepath)
while os.path.islink(filepath):
filepath = os.path.normpath(
os.path.join(os.path.dirname(filepath),os.readlink(filepath)))
return filepath
def _syscmd_uname(option,default=''):
""" Interface to the system's uname command.
"""
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
try:
f = os.popen('uname %s 2> %s' % (option, DEV_NULL))
except (AttributeError,os.error):
return default
output = string.strip(f.read())
rc = f.close()
if not output or rc:
return default
else:
return output
def _syscmd_file(target,default=''):
""" Interface to the system's file command.
The function uses the -b option of the file command to have it
ommit the filename in its output and if possible the -L option
to have the command follow symlinks. It returns default in
case the command should fail.
"""
# We do the import here to avoid a bootstrap issue.
# See c73b90b6dadd changeset.
#
# [..]
# ranlib libpython2.7.a
# gcc -o python \
# Modules/python.o \
# libpython2.7.a -lsocket -lnsl -ldl -lm
# Traceback (most recent call last):
# File "./setup.py", line 8, in <module>
# from platform import machine as platform_machine
# File "[..]/build/Lib/platform.py", line 116, in <module>
# import sys,string,os,re,subprocess
# File "[..]/build/Lib/subprocess.py", line 429, in <module>
# import select
# ImportError: No module named select
import subprocess
if sys.platform in ('dos','win32','win16','os2'):
# XXX Others too ?
return default
target = _follow_symlinks(target)
try:
proc = subprocess.Popen(['file', target],
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
except (AttributeError,os.error):
return default
output = proc.communicate()[0]
rc = proc.wait()
if not output or rc:
return default
else:
return output
### Information about the used architecture
# Default values for architecture; non-empty strings override the
# defaults given as parameters
_default_architecture = {
'win32': ('','WindowsPE'),
'win16': ('','Windows'),
'dos': ('','MSDOS'),
}
_architecture_split = re.compile(r'[\s,]').split
def architecture(executable=sys.executable,bits='',linkage=''):
""" Queries the given executable (defaults to the Python interpreter
binary) for various architecture information.
Returns a tuple (bits,linkage) which contains information about
the bit architecture and the linkage format used for the
executable. Both values are returned as strings.
Values that cannot be determined are returned as given by the
parameter presets. If bits is given as '', the sizeof(pointer)
(or sizeof(long) on Python version < 1.5.2) is used as
indicator for the supported pointer size.
The function relies on the system's "file" command to do the
actual work. This is available on most if not all Unix
platforms. On some non-Unix platforms where the "file" command
does not exist and the executable is set to the Python interpreter
binary defaults from _default_architecture are used.
"""
# Use the sizeof(pointer) as default number of bits if nothing
# else is given as default.
if not bits:
import struct
try:
size = struct.calcsize('P')
except struct.error:
# Older installations can only query longs
size = struct.calcsize('l')
bits = str(size*8) + 'bit'
# Get data from the 'file' system command
if executable:
output = _syscmd_file(executable, '')
else:
output = ''
if not output and \
executable == sys.executable:
# "file" command did not return anything; we'll try to provide
# some sensible defaults then...
if sys.platform in _default_architecture:
b, l = _default_architecture[sys.platform]
if b:
bits = b
if l:
linkage = l
return bits, linkage
# Split the output into a list of strings omitting the filename
fileout = _architecture_split(output)[1:]
if 'executable' not in fileout:
# Format not supported
return bits,linkage
# Bits
if '32-bit' in fileout:
bits = '32bit'
elif 'N32' in fileout:
# On Irix only
bits = 'n32bit'
elif '64-bit' in fileout:
bits = '64bit'
# Linkage
if 'ELF' in fileout:
linkage = 'ELF'
elif 'PE' in fileout:
# E.g. Windows uses this format
if 'Windows' in fileout:
linkage = 'WindowsPE'
else:
linkage = 'PE'
elif 'COFF' in fileout:
linkage = 'COFF'
elif 'MS-DOS' in fileout:
linkage = 'MSDOS'
else:
# XXX the A.OUT format also falls under this class...
pass
return bits,linkage
### Portable uname() interface
_uname_cache = None
def uname():
""" Fairly portable uname interface. Returns a tuple
of strings (system,node,release,version,machine,processor)
identifying the underlying platform.
Note that unlike the os.uname function this also returns
possible processor information as an additional tuple entry.
Entries which cannot be determined are set to ''.
"""
global _uname_cache
no_os_uname = 0
if _uname_cache is not None:
return _uname_cache
processor = ''
# Get some infos from the builtin os.uname API...
try:
system,node,release,version,machine = os.uname()
except AttributeError:
no_os_uname = 1
if no_os_uname or not filter(None, (system, node, release, version, machine)):
# Hmm, no there is either no uname or uname has returned
#'unknowns'... we'll have to poke around the system then.
if no_os_uname:
system = sys.platform
release = ''
version = ''
node = _node()
machine = ''
use_syscmd_ver = 1
# Try win32_ver() on win32 platforms
if system == 'win32':
release,version,csd,ptype = win32_ver()
if release and version:
use_syscmd_ver = 0
# Try to use the PROCESSOR_* environment variables
# available on Win XP and later; see
# http://support.microsoft.com/kb/888731 and
# http://www.geocities.com/rick_lively/MANUALS/ENV/MSWIN/PROCESSI.HTM
if not machine:
# WOW64 processes mask the native architecture
if "PROCESSOR_ARCHITEW6432" in os.environ:
machine = os.environ.get("PROCESSOR_ARCHITEW6432", '')
else:
machine = os.environ.get('PROCESSOR_ARCHITECTURE', '')
if not processor:
processor = os.environ.get('PROCESSOR_IDENTIFIER', machine)
# Try the 'ver' system command available on some
# platforms
if use_syscmd_ver:
system,release,version = _syscmd_ver(system)
# Normalize system to what win32_ver() normally returns
# (_syscmd_ver() tends to return the vendor name as well)
if system == 'Microsoft Windows':
system = 'Windows'
elif system == 'Microsoft' and release == 'Windows':
# Under Windows Vista and Windows Server 2008,
# Microsoft changed the output of the ver command. The
# release is no longer printed. This causes the
# system and release to be misidentified.
system = 'Windows'
if '6.0' == version[:3]:
release = 'Vista'
else:
release = ''
# In case we still don't know anything useful, we'll try to
# help ourselves
if system in ('win32','win16'):
if not version:
if system == 'win32':
version = '32bit'
else:
version = '16bit'
system = 'Windows'
elif system[:4] == 'java':
release,vendor,vminfo,osinfo = java_ver()
system = 'Java'
version = string.join(vminfo,', ')
if not version:
version = vendor
# System specific extensions
if system == 'OpenVMS':
# OpenVMS seems to have release and version mixed up
if not release or release == '0':
release = version
version = ''
# Get processor information
try:
import vms_lib
except ImportError:
pass
else:
csid, cpu_number = vms_lib.getsyi('SYI$_CPU',0)
if (cpu_number >= 128):
processor = 'Alpha'
else:
processor = 'VAX'
if not processor:
# Get processor information from the uname system command
processor = _syscmd_uname('-p','')
#If any unknowns still exist, replace them with ''s, which are more portable
if system == 'unknown':
system = ''
if node == 'unknown':
node = ''
if release == 'unknown':
release = ''
if version == 'unknown':
version = ''
if machine == 'unknown':
machine = ''
if processor == 'unknown':
processor = ''
# normalize name
if system == 'Microsoft' and release == 'Windows':
system = 'Windows'
release = 'Vista'
_uname_cache = system,node,release,version,machine,processor
return _uname_cache
### Direct interfaces to some of the uname() return values
def system():
""" Returns the system/OS name, e.g. 'Linux', 'Windows' or 'Java'.
An empty string is returned if the value cannot be determined.
"""
return uname()[0]
def node():
""" Returns the computer's network name (which may not be fully
qualified)
An empty string is returned if the value cannot be determined.
"""
return uname()[1]
def release():
""" Returns the system's release, e.g. '2.2.0' or 'NT'
An empty string is returned if the value cannot be determined.
"""
return uname()[2]
def version():
""" Returns the system's release version, e.g. '#3 on degas'
An empty string is returned if the value cannot be determined.
"""
return uname()[3]
def machine():
""" Returns the machine type, e.g. 'i386'
An empty string is returned if the value cannot be determined.
"""
return uname()[4]
def processor():
""" Returns the (true) processor name, e.g. 'amdk6'
An empty string is returned if the value cannot be
determined. Note that many platforms do not provide this
information or simply return the same value as for machine(),
e.g. NetBSD does this.
"""
return uname()[5]
### Various APIs for extracting information from sys.version
_sys_version_parser = re.compile(
r'([\w.+]+)\s*' # "version<space>"
r'\(#?([^,]+)' # "(#buildno"
r'(?:,\s*([\w ]*)' # ", builddate"
r'(?:,\s*([\w :]*))?)?\)\s*' # ", buildtime)<space>"
r'\[([^\]]+)\]?') # "[compiler]"
_ironpython_sys_version_parser = re.compile(
r'IronPython\s*'
'([\d\.]+)'
'(?: \(([\d\.]+)\))?'
' on (.NET [\d\.]+)')
# IronPython covering 2.6 and 2.7
_ironpython26_sys_version_parser = re.compile(
r'([\d.]+)\s*'
'\(IronPython\s*'
'[\d.]+\s*'
'\(([\d.]+)\) on ([\w.]+ [\d.]+(?: \(\d+-bit\))?)\)'
)
_pypy_sys_version_parser = re.compile(
r'([\w.+]+)\s*'
'\(#?([^,]+),\s*([\w ]+),\s*([\w :]+)\)\s*'
'\[PyPy [^\]]+\]?')
_sys_version_cache = {}
def _sys_version(sys_version=None):
""" Returns a parsed version of Python's sys.version as tuple
(name, version, branch, revision, buildno, builddate, compiler)
referring to the Python implementation name, version, branch,
revision, build number, build date/time as string and the compiler
identification string.
Note that unlike the Python sys.version, the returned value
for the Python version will always include the patchlevel (it
defaults to '.0').
The function returns empty strings for tuple entries that
cannot be determined.
sys_version may be given to parse an alternative version
string, e.g. if the version was read from a different Python
interpreter.
"""
# Get the Python version
if sys_version is None:
sys_version = sys.version
# Try the cache first
result = _sys_version_cache.get(sys_version, None)
if result is not None:
return result
# Parse it
if 'IronPython' in sys_version:
# IronPython
name = 'IronPython'
if sys_version.startswith('IronPython'):
match = _ironpython_sys_version_parser.match(sys_version)
else:
match = _ironpython26_sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse IronPython sys.version: %s' %
repr(sys_version))
version, alt_version, compiler = match.groups()
buildno = ''
builddate = ''
elif sys.platform.startswith('java'):
# Jython
name = 'Jython'
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse Jython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, _ = match.groups()
if builddate is None:
builddate = ''
compiler = sys.platform
elif "PyPy" in sys_version:
# PyPy
name = "PyPy"
match = _pypy_sys_version_parser.match(sys_version)
if match is None:
raise ValueError("failed to parse PyPy sys.version: %s" %
repr(sys_version))
version, buildno, builddate, buildtime = match.groups()
compiler = ""
else:
# CPython
match = _sys_version_parser.match(sys_version)
if match is None:
raise ValueError(
'failed to parse CPython sys.version: %s' %
repr(sys_version))
version, buildno, builddate, buildtime, compiler = \
match.groups()
name = 'CPython'
if builddate is None:
builddate = ''
elif buildtime:
builddate = builddate + ' ' + buildtime
if hasattr(sys, 'subversion'):
# sys.subversion was added in Python 2.5
_, branch, revision = sys.subversion
else:
branch = ''
revision = ''
# Add the patchlevel version if missing
l = string.split(version, '.')
if len(l) == 2:
l.append('0')
version = string.join(l, '.')
# Build and cache the result
result = (name, version, branch, revision, buildno, builddate, compiler)
_sys_version_cache[sys_version] = result
return result
def python_implementation():
""" Returns a string identifying the Python implementation.
Currently, the following implementations are identified:
'CPython' (C implementation of Python),
'IronPython' (.NET implementation of Python),
'Jython' (Java implementation of Python),
'PyPy' (Python implementation of Python).
"""
return _sys_version()[0]
def python_version():
""" Returns the Python version as string 'major.minor.patchlevel'
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return _sys_version()[1]
def python_version_tuple():
""" Returns the Python version as tuple (major, minor, patchlevel)
of strings.
Note that unlike the Python sys.version, the returned value
will always include the patchlevel (it defaults to 0).
"""
return tuple(string.split(_sys_version()[1], '.'))
def python_branch():
""" Returns a string identifying the Python implementation
branch.
For CPython this is the Subversion branch from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[2]
def python_revision():
""" Returns a string identifying the Python implementation
revision.
For CPython this is the Subversion revision from which the
Python binary was built.
If not available, an empty string is returned.
"""
return _sys_version()[3]
def python_build():
""" Returns a tuple (buildno, builddate) stating the Python
build number and date as strings.
"""
return _sys_version()[4:6]
def python_compiler():
""" Returns a string identifying the compiler used for compiling
Python.
"""
return _sys_version()[6]
### The Opus Magnum of platform strings :-)
_platform_cache = {}
def platform(aliased=0, terse=0):
""" Returns a single string identifying the underlying platform
with as much useful information as possible (but no more :).
The output is intended to be human readable rather than
machine parseable. It may look different on different
platforms and this is intended.
If "aliased" is true, the function will use aliases for
various platforms that report system names which differ from
their common names, e.g. SunOS will be reported as
Solaris. The system_alias() function is used to implement
this.
Setting terse to true causes the function to return only the
absolute minimum information needed to identify the platform.
"""
result = _platform_cache.get((aliased, terse), None)
if result is not None:
return result
# Get uname information and then apply platform specific cosmetics
# to it...
system,node,release,version,machine,processor = uname()
if machine == processor:
processor = ''
if aliased:
system,release,version = system_alias(system,release,version)
if system == 'Windows':
# MS platforms
rel,vers,csd,ptype = win32_ver(version)
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,version,csd)
elif system in ('Linux',):
# Linux based systems
distname,distversion,distid = dist('')
if distname and not terse:
platform = _platform(system,release,machine,processor,
'with',
distname,distversion,distid)
else:
# If the distribution name is unknown check for libc vs. glibc
libcname,libcversion = libc_ver(sys.executable)
platform = _platform(system,release,machine,processor,
'with',
libcname+libcversion)
elif system == 'Java':
# Java platforms
r,v,vminfo,(os_name,os_version,os_arch) = java_ver()
if terse or not os_name:
platform = _platform(system,release,version)
else:
platform = _platform(system,release,version,
'on',
os_name,os_version,os_arch)
elif system == 'MacOS':
# MacOS platforms
if terse:
platform = _platform(system,release)
else:
platform = _platform(system,release,machine)
else:
# Generic handler
if terse:
platform = _platform(system,release)
else:
bits,linkage = architecture(sys.executable)
platform = _platform(system,release,machine,processor,bits,linkage)
_platform_cache[(aliased, terse)] = platform
return platform
### Command line interface
if __name__ == '__main__':
# Default is to print the aliased verbose platform string
terse = ('terse' in sys.argv or '--terse' in sys.argv)
aliased = (not 'nonaliased' in sys.argv and not '--nonaliased' in sys.argv)
print platform(aliased,terse)
sys.exit(0)
| JVenberg/PokemonGo-Bot-Desktop | pywin/Lib/platform.py | Python | mit | 51,537 |
"""
A module for converting numbers or color arguments to *RGB* or *RGBA*
*RGB* and *RGBA* are sequences of, respectively, 3 or 4 floats in the
range 0-1.
This module includes functions and classes for color specification
conversions, and for mapping numbers to colors in a 1-D array of
colors called a colormap. Colormapping typically involves two steps:
a data array is first mapped onto the range 0-1 using an instance
of :class:`Normalize` or of a subclass; then this number in the 0-1
range is mapped to a color using an instance of a subclass of
:class:`Colormap`. Two are provided here:
:class:`LinearSegmentedColormap`, which is used to generate all
the built-in colormap instances, but is also useful for making
custom colormaps, and :class:`ListedColormap`, which is used for
generating a custom colormap from a list of color specifications.
The module also provides a single instance, *colorConverter*, of the
:class:`ColorConverter` class providing methods for converting single
color specifications or sequences of them to *RGB* or *RGBA*.
Commands which take color arguments can use several formats to specify
the colors. For the basic builtin colors, you can use a single letter
- b : blue
- g : green
- r : red
- c : cyan
- m : magenta
- y : yellow
- k : black
- w : white
Gray shades can be given as a string encoding a float in the 0-1
range, e.g.::
color = '0.75'
For a greater range of colors, you have two options. You can specify
the color using an html hex string, as in::
color = '#eeefff'
or you can pass an *R* , *G* , *B* tuple, where each of *R* , *G* , *B*
are in the range [0,1].
Finally, legal html names for colors, like 'red', 'burlywood' and
'chartreuse' are supported.
"""
import re
import numpy as np
from numpy import ma
import matplotlib.cbook as cbook
parts = np.__version__.split('.')
NP_MAJOR, NP_MINOR = map(int, parts[:2])
# true if clip supports the out kwarg
NP_CLIP_OUT = NP_MAJOR>=1 and NP_MINOR>=2
cnames = {
'aliceblue' : '#F0F8FF',
'antiquewhite' : '#FAEBD7',
'aqua' : '#00FFFF',
'aquamarine' : '#7FFFD4',
'azure' : '#F0FFFF',
'beige' : '#F5F5DC',
'bisque' : '#FFE4C4',
'black' : '#000000',
'blanchedalmond' : '#FFEBCD',
'blue' : '#0000FF',
'blueviolet' : '#8A2BE2',
'brown' : '#A52A2A',
'burlywood' : '#DEB887',
'cadetblue' : '#5F9EA0',
'chartreuse' : '#7FFF00',
'chocolate' : '#D2691E',
'coral' : '#FF7F50',
'cornflowerblue' : '#6495ED',
'cornsilk' : '#FFF8DC',
'crimson' : '#DC143C',
'cyan' : '#00FFFF',
'darkblue' : '#00008B',
'darkcyan' : '#008B8B',
'darkgoldenrod' : '#B8860B',
'darkgray' : '#A9A9A9',
'darkgreen' : '#006400',
'darkkhaki' : '#BDB76B',
'darkmagenta' : '#8B008B',
'darkolivegreen' : '#556B2F',
'darkorange' : '#FF8C00',
'darkorchid' : '#9932CC',
'darkred' : '#8B0000',
'darksalmon' : '#E9967A',
'darkseagreen' : '#8FBC8F',
'darkslateblue' : '#483D8B',
'darkslategray' : '#2F4F4F',
'darkturquoise' : '#00CED1',
'darkviolet' : '#9400D3',
'deeppink' : '#FF1493',
'deepskyblue' : '#00BFFF',
'dimgray' : '#696969',
'dodgerblue' : '#1E90FF',
'firebrick' : '#B22222',
'floralwhite' : '#FFFAF0',
'forestgreen' : '#228B22',
'fuchsia' : '#FF00FF',
'gainsboro' : '#DCDCDC',
'ghostwhite' : '#F8F8FF',
'gold' : '#FFD700',
'goldenrod' : '#DAA520',
'gray' : '#808080',
'green' : '#008000',
'greenyellow' : '#ADFF2F',
'honeydew' : '#F0FFF0',
'hotpink' : '#FF69B4',
'indianred' : '#CD5C5C',
'indigo' : '#4B0082',
'ivory' : '#FFFFF0',
'khaki' : '#F0E68C',
'lavender' : '#E6E6FA',
'lavenderblush' : '#FFF0F5',
'lawngreen' : '#7CFC00',
'lemonchiffon' : '#FFFACD',
'lightblue' : '#ADD8E6',
'lightcoral' : '#F08080',
'lightcyan' : '#E0FFFF',
'lightgoldenrodyellow' : '#FAFAD2',
'lightgreen' : '#90EE90',
'lightgrey' : '#D3D3D3',
'lightpink' : '#FFB6C1',
'lightsalmon' : '#FFA07A',
'lightseagreen' : '#20B2AA',
'lightskyblue' : '#87CEFA',
'lightslategray' : '#778899',
'lightsteelblue' : '#B0C4DE',
'lightyellow' : '#FFFFE0',
'lime' : '#00FF00',
'limegreen' : '#32CD32',
'linen' : '#FAF0E6',
'magenta' : '#FF00FF',
'maroon' : '#800000',
'mediumaquamarine' : '#66CDAA',
'mediumblue' : '#0000CD',
'mediumorchid' : '#BA55D3',
'mediumpurple' : '#9370DB',
'mediumseagreen' : '#3CB371',
'mediumslateblue' : '#7B68EE',
'mediumspringgreen' : '#00FA9A',
'mediumturquoise' : '#48D1CC',
'mediumvioletred' : '#C71585',
'midnightblue' : '#191970',
'mintcream' : '#F5FFFA',
'mistyrose' : '#FFE4E1',
'moccasin' : '#FFE4B5',
'navajowhite' : '#FFDEAD',
'navy' : '#000080',
'oldlace' : '#FDF5E6',
'olive' : '#808000',
'olivedrab' : '#6B8E23',
'orange' : '#FFA500',
'orangered' : '#FF4500',
'orchid' : '#DA70D6',
'palegoldenrod' : '#EEE8AA',
'palegreen' : '#98FB98',
'palevioletred' : '#AFEEEE',
'papayawhip' : '#FFEFD5',
'peachpuff' : '#FFDAB9',
'peru' : '#CD853F',
'pink' : '#FFC0CB',
'plum' : '#DDA0DD',
'powderblue' : '#B0E0E6',
'purple' : '#800080',
'red' : '#FF0000',
'rosybrown' : '#BC8F8F',
'royalblue' : '#4169E1',
'saddlebrown' : '#8B4513',
'salmon' : '#FA8072',
'sandybrown' : '#FAA460',
'seagreen' : '#2E8B57',
'seashell' : '#FFF5EE',
'sienna' : '#A0522D',
'silver' : '#C0C0C0',
'skyblue' : '#87CEEB',
'slateblue' : '#6A5ACD',
'slategray' : '#708090',
'snow' : '#FFFAFA',
'springgreen' : '#00FF7F',
'steelblue' : '#4682B4',
'tan' : '#D2B48C',
'teal' : '#008080',
'thistle' : '#D8BFD8',
'tomato' : '#FF6347',
'turquoise' : '#40E0D0',
'violet' : '#EE82EE',
'wheat' : '#F5DEB3',
'white' : '#FFFFFF',
'whitesmoke' : '#F5F5F5',
'yellow' : '#FFFF00',
'yellowgreen' : '#9ACD32',
}
# add british equivs
for k, v in cnames.items():
if k.find('gray')>=0:
k = k.replace('gray', 'grey')
cnames[k] = v
def is_color_like(c):
'Return *True* if *c* can be converted to *RGB*'
try:
colorConverter.to_rgb(c)
return True
except ValueError:
return False
def rgb2hex(rgb):
'Given an rgb or rgba sequence of 0-1 floats, return the hex string'
return '#%02x%02x%02x' % tuple([round(val*255) for val in rgb[:3]])
hexColorPattern = re.compile("\A#[a-fA-F0-9]{6}\Z")
def hex2color(s):
"""
Take a hex string *s* and return the corresponding rgb 3-tuple
Example: #efefef -> (0.93725, 0.93725, 0.93725)
"""
if not isinstance(s, basestring):
raise TypeError('hex2color requires a string argument')
if hexColorPattern.match(s) is None:
raise ValueError('invalid hex color string "%s"' % s)
return tuple([int(n, 16)/255.0 for n in (s[1:3], s[3:5], s[5:7])])
class ColorConverter:
"""
Provides methods for converting color specifications to *RGB* or *RGBA*
Caching is used for more efficient conversion upon repeated calls
with the same argument.
Ordinarily only the single instance instantiated in this module,
*colorConverter*, is needed.
"""
colors = {
'b' : (0.0, 0.0, 1.0),
'g' : (0.0, 0.5, 0.0),
'r' : (1.0, 0.0, 0.0),
'c' : (0.0, 0.75, 0.75),
'm' : (0.75, 0, 0.75),
'y' : (0.75, 0.75, 0),
'k' : (0.0, 0.0, 0.0),
'w' : (1.0, 1.0, 1.0),
}
cache = {}
def to_rgb(self, arg):
"""
Returns an *RGB* tuple of three floats from 0-1.
*arg* can be an *RGB* or *RGBA* sequence or a string in any of
several forms:
1) a letter from the set 'rgbcmykw'
2) a hex color string, like '#00FFFF'
3) a standard name, like 'aqua'
4) a float, like '0.4', indicating gray on a 0-1 scale
if *arg* is *RGBA*, the *A* will simply be discarded.
"""
try: return self.cache[arg]
except KeyError: pass
except TypeError: # could be unhashable rgb seq
arg = tuple(arg)
try: return self.cache[arg]
except KeyError: pass
except TypeError:
raise ValueError(
'to_rgb: arg "%s" is unhashable even inside a tuple'
% (str(arg),))
try:
if cbook.is_string_like(arg):
argl = arg.lower()
color = self.colors.get(argl, None)
if color is None:
str1 = cnames.get(argl, argl)
if str1.startswith('#'):
color = hex2color(str1)
else:
fl = float(argl)
if fl < 0 or fl > 1:
raise ValueError(
'gray (string) must be in range 0-1')
color = tuple([fl]*3)
elif cbook.iterable(arg):
if len(arg) > 4 or len(arg) < 3:
raise ValueError(
'sequence length is %d; must be 3 or 4'%len(arg))
color = tuple(arg[:3])
if [x for x in color if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbg sequence outside 0-1 range')
else:
raise ValueError('cannot convert argument to rgb sequence')
self.cache[arg] = color
except (KeyError, ValueError, TypeError), exc:
raise ValueError('to_rgb: Invalid rgb arg "%s"\n%s' % (str(arg), exc))
# Error messages could be improved by handling TypeError
# separately; but this should be rare and not too hard
# for the user to figure out as-is.
return color
def to_rgba(self, arg, alpha=None):
"""
Returns an *RGBA* tuple of four floats from 0-1.
For acceptable values of *arg*, see :meth:`to_rgb`.
In addition, if *arg* is "none" (case-insensitive),
then (0,0,0,0) will be returned.
If *arg* is an *RGBA* sequence and *alpha* is not *None*,
*alpha* will replace the original *A*.
"""
try:
if arg.lower() == 'none':
return (0.0, 0.0, 0.0, 0.0)
except AttributeError:
pass
try:
if not cbook.is_string_like(arg) and cbook.iterable(arg):
if len(arg) == 4:
if [x for x in arg if (float(x) < 0) or (x > 1)]:
# This will raise TypeError if x is not a number.
raise ValueError('number in rbga sequence outside 0-1 range')
if alpha is None:
return tuple(arg)
if alpha < 0.0 or alpha > 1.0:
raise ValueError("alpha must be in range 0-1")
return arg[0], arg[1], arg[2], alpha
r,g,b = arg[:3]
if [x for x in (r,g,b) if (float(x) < 0) or (x > 1)]:
raise ValueError('number in rbg sequence outside 0-1 range')
else:
r,g,b = self.to_rgb(arg)
if alpha is None:
alpha = 1.0
return r,g,b,alpha
except (TypeError, ValueError), exc:
raise ValueError('to_rgba: Invalid rgba arg "%s"\n%s' % (str(arg), exc))
def to_rgba_array(self, c, alpha=None):
"""
Returns a numpy array of *RGBA* tuples.
Accepts a single mpl color spec or a sequence of specs.
Special case to handle "no color": if *c* is "none" (case-insensitive),
then an empty array will be returned. Same for an empty list.
"""
try:
nc = len(c)
except TypeError:
raise ValueError(
"Cannot convert argument type %s to rgba array" % type(c))
try:
if nc == 0 or c.lower() == 'none':
return np.zeros((0,4), dtype=np.float)
except AttributeError:
pass
try:
# Single value? Put it in an array with a single row.
return np.array([self.to_rgba(c, alpha)], dtype=np.float)
except ValueError:
if isinstance(c, np.ndarray):
if c.ndim != 2 and c.dtype.kind not in 'SU':
raise ValueError("Color array must be two-dimensional")
if (c.ndim == 2 and c.shape[1] == 4 and c.dtype.kind == 'f'):
if (c.ravel() > 1).any() or (c.ravel() < 0).any():
raise ValueError(
"number in rgba sequence is outside 0-1 range")
result = np.asarray(c, np.float)
if alpha is not None:
if alpha > 1 or alpha < 0:
raise ValueError("alpha must be in 0-1 range")
result[:,3] = alpha
return result
# This alpha operation above is new, and depends
# on higher levels to refrain from setting alpha
# to values other than None unless there is
# intent to override any existing alpha values.
# It must be some other sequence of color specs.
result = np.zeros((nc, 4), dtype=np.float)
for i, cc in enumerate(c):
result[i] = self.to_rgba(cc, alpha)
return result
colorConverter = ColorConverter()
def makeMappingArray(N, data, gamma=1.0):
"""Create an *N* -element 1-d lookup table
*data* represented by a list of x,y0,y1 mapping correspondences.
Each element in this list represents how a value between 0 and 1
(inclusive) represented by x is mapped to a corresponding value
between 0 and 1 (inclusive). The two values of y are to allow
for discontinuous mapping functions (say as might be found in a
sawtooth) where y0 represents the value of y for values of x
<= to that given, and y1 is the value to be used for x > than
that given). The list must start with x=0, end with x=1, and
all values of x must be in increasing order. Values between
the given mapping points are determined by simple linear interpolation.
Alternatively, data can be a function mapping values between 0 - 1
to 0 - 1.
The function returns an array "result" where ``result[x*(N-1)]``
gives the closest value for values of x between 0 and 1.
"""
if callable(data):
xind = np.linspace(0, 1, N)**gamma
lut = np.clip(np.array(data(xind), dtype=np.float), 0, 1)
return lut
try:
adata = np.array(data)
except:
raise TypeError("data must be convertable to an array")
shape = adata.shape
if len(shape) != 2 and shape[1] != 3:
raise ValueError("data must be nx3 format")
x = adata[:,0]
y0 = adata[:,1]
y1 = adata[:,2]
if x[0] != 0. or x[-1] != 1.0:
raise ValueError(
"data mapping points must start with x=0. and end with x=1")
if np.sometrue(np.sort(x)-x):
raise ValueError(
"data mapping points must have x in increasing order")
# begin generation of lookup table
x = x * (N-1)
lut = np.zeros((N,), np.float)
xind = (N - 1) * np.linspace(0, 1, N)**gamma
ind = np.searchsorted(x, xind)[1:-1]
lut[1:-1] = ( ((xind[1:-1] - x[ind-1]) / (x[ind] - x[ind-1]))
* (y0[ind] - y1[ind-1]) + y1[ind-1])
lut[0] = y1[0]
lut[-1] = y0[-1]
# ensure that the lut is confined to values between 0 and 1 by clipping it
np.clip(lut, 0.0, 1.0)
#lut = where(lut > 1., 1., lut)
#lut = where(lut < 0., 0., lut)
return lut
class Colormap:
"""Base class for all scalar to rgb mappings
Important methods:
* :meth:`set_bad`
* :meth:`set_under`
* :meth:`set_over`
"""
def __init__(self, name, N=256):
"""
Public class attributes:
:attr:`N` : number of rgb quantization levels
:attr:`name` : name of colormap
"""
self.name = name
self.N = N
self._rgba_bad = (0.0, 0.0, 0.0, 0.0) # If bad, don't paint anything.
self._rgba_under = None
self._rgba_over = None
self._i_under = N
self._i_over = N+1
self._i_bad = N+2
self._isinit = False
def __call__(self, X, alpha=None, bytes=False):
"""
*X* is either a scalar or an array (of any dimension).
If scalar, a tuple of rgba values is returned, otherwise
an array with the new shape = oldshape+(4,). If the X-values
are integers, then they are used as indices into the array.
If they are floating point, then they must be in the
interval (0.0, 1.0).
Alpha must be a scalar between 0 and 1, or None.
If bytes is False, the rgba values will be floats on a
0-1 scale; if True, they will be uint8, 0-255.
"""
if not self._isinit: self._init()
mask_bad = None
if not cbook.iterable(X):
vtype = 'scalar'
xa = np.array([X])
else:
vtype = 'array'
xma = ma.array(X, copy=False)
mask_bad = xma.mask
xa = xma.data.copy() # Copy here to avoid side effects.
del xma
# masked values are substituted below; no need to fill them here
if xa.dtype.char in np.typecodes['Float']:
# Treat 1.0 as slightly less than 1.
cbook._putmask(xa, xa==1.0, np.nextafter(xa.dtype.type(1),
xa.dtype.type(0)))
# The following clip is fast, and prevents possible
# conversion of large positive values to negative integers.
xa *= self.N
if NP_CLIP_OUT:
np.clip(xa, -1, self.N, out=xa)
else:
xa = np.clip(xa, -1, self.N)
# ensure that all 'under' values will still have negative
# value after casting to int
cbook._putmask(xa, xa<0.0, -1)
xa = xa.astype(int)
# Set the over-range indices before the under-range;
# otherwise the under-range values get converted to over-range.
cbook._putmask(xa, xa>self.N-1, self._i_over)
cbook._putmask(xa, xa<0, self._i_under)
if mask_bad is not None:
if mask_bad.shape == xa.shape:
cbook._putmask(xa, mask_bad, self._i_bad)
elif mask_bad:
xa.fill(self._i_bad)
if bytes:
lut = (self._lut * 255).astype(np.uint8)
else:
lut = self._lut.copy() # Don't let alpha modify original _lut.
if alpha is not None:
alpha = min(alpha, 1.0) # alpha must be between 0 and 1
alpha = max(alpha, 0.0)
if bytes:
alpha = int(alpha * 255)
if (lut[-1] == 0).all():
lut[:-1, -1] = alpha
# All zeros is taken as a flag for the default bad
# color, which is no color--fully transparent. We
# don't want to override this.
else:
lut[:,-1] = alpha
# If the bad value is set to have a color, then we
# override its alpha just as for any other value.
rgba = np.empty(shape=xa.shape+(4,), dtype=lut.dtype)
lut.take(xa, axis=0, mode='clip', out=rgba)
# twice as fast as lut[xa];
# using the clip or wrap mode and providing an
# output array speeds it up a little more.
if vtype == 'scalar':
rgba = tuple(rgba[0,:])
return rgba
def set_bad(self, color = 'k', alpha = None):
'''Set color to be used for masked values.
'''
self._rgba_bad = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_under(self, color = 'k', alpha = None):
'''Set color to be used for low out-of-range values.
Requires norm.clip = False
'''
self._rgba_under = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def set_over(self, color = 'k', alpha = None):
'''Set color to be used for high out-of-range values.
Requires norm.clip = False
'''
self._rgba_over = colorConverter.to_rgba(color, alpha)
if self._isinit: self._set_extremes()
def _set_extremes(self):
if self._rgba_under:
self._lut[self._i_under] = self._rgba_under
else:
self._lut[self._i_under] = self._lut[0]
if self._rgba_over:
self._lut[self._i_over] = self._rgba_over
else:
self._lut[self._i_over] = self._lut[self.N-1]
self._lut[self._i_bad] = self._rgba_bad
def _init():
'''Generate the lookup table, self._lut'''
raise NotImplementedError("Abstract class only")
def is_gray(self):
if not self._isinit: self._init()
return (np.alltrue(self._lut[:,0] == self._lut[:,1])
and np.alltrue(self._lut[:,0] == self._lut[:,2]))
class LinearSegmentedColormap(Colormap):
"""Colormap objects based on lookup tables using linear segments.
The lookup table is generated using linear interpolation for each
primary color, with the 0-1 domain divided into any number of
segments.
"""
def __init__(self, name, segmentdata, N=256, gamma=1.0):
"""Create color map from linear mapping segments
segmentdata argument is a dictionary with a red, green and blue
entries. Each entry should be a list of *x*, *y0*, *y1* tuples,
forming rows in a table.
Example: suppose you want red to increase from 0 to 1 over
the bottom half, green to do the same over the middle half,
and blue over the top half. Then you would use::
cdict = {'red': [(0.0, 0.0, 0.0),
(0.5, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'green': [(0.0, 0.0, 0.0),
(0.25, 0.0, 0.0),
(0.75, 1.0, 1.0),
(1.0, 1.0, 1.0)],
'blue': [(0.0, 0.0, 0.0),
(0.5, 0.0, 0.0),
(1.0, 1.0, 1.0)]}
Each row in the table for a given color is a sequence of
*x*, *y0*, *y1* tuples. In each sequence, *x* must increase
monotonically from 0 to 1. For any input value *z* falling
between *x[i]* and *x[i+1]*, the output value of a given color
will be linearly interpolated between *y1[i]* and *y0[i+1]*::
row i: x y0 y1
/
/
row i+1: x y0 y1
Hence y0 in the first row and y1 in the last row are never used.
.. seealso::
:meth:`LinearSegmentedColormap.from_list`
Static method; factory function for generating a
smoothly-varying LinearSegmentedColormap.
:func:`makeMappingArray`
For information about making a mapping array.
"""
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
Colormap.__init__(self, name, N)
self._segmentdata = segmentdata
self._gamma = gamma
def _init(self):
self._lut = np.ones((self.N + 3, 4), np.float)
self._lut[:-3, 0] = makeMappingArray(self.N,
self._segmentdata['red'], self._gamma)
self._lut[:-3, 1] = makeMappingArray(self.N,
self._segmentdata['green'], self._gamma)
self._lut[:-3, 2] = makeMappingArray(self.N,
self._segmentdata['blue'], self._gamma)
self._isinit = True
self._set_extremes()
def set_gamma(self, gamma):
"""
Set a new gamma value and regenerate color map.
"""
self._gamma = gamma
self._init()
@staticmethod
def from_list(name, colors, N=256, gamma=1.0):
"""
Make a linear segmented colormap with *name* from a sequence
of *colors* which evenly transitions from colors[0] at val=0
to colors[-1] at val=1. *N* is the number of rgb quantization
levels.
Alternatively, a list of (value, color) tuples can be given
to divide the range unevenly.
"""
if not cbook.iterable(colors):
raise ValueError('colors must be iterable')
if cbook.iterable(colors[0]) and len(colors[0]) == 2 and \
not cbook.is_string_like(colors[0]):
# List of value, color pairs
vals, colors = zip(*colors)
else:
vals = np.linspace(0., 1., len(colors))
cdict = dict(red=[], green=[], blue=[])
for val, color in zip(vals, colors):
r,g,b = colorConverter.to_rgb(color)
cdict['red'].append((val, r, r))
cdict['green'].append((val, g, g))
cdict['blue'].append((val, b, b))
return LinearSegmentedColormap(name, cdict, N, gamma)
class ListedColormap(Colormap):
"""Colormap object generated from a list of colors.
This may be most useful when indexing directly into a colormap,
but it can also be used to generate special colormaps for ordinary
mapping.
"""
def __init__(self, colors, name = 'from_list', N = None):
"""
Make a colormap from a list of colors.
*colors*
a list of matplotlib color specifications,
or an equivalent Nx3 floating point array (*N* rgb values)
*name*
a string to identify the colormap
*N*
the number of entries in the map. The default is *None*,
in which case there is one colormap entry for each
element in the list of colors. If::
N < len(colors)
the list will be truncated at *N*. If::
N > len(colors)
the list will be extended by repetition.
"""
self.colors = colors
self.monochrome = False # True only if all colors in map are identical;
# needed for contouring.
if N is None:
N = len(self.colors)
else:
if cbook.is_string_like(self.colors):
self.colors = [self.colors] * N
self.monochrome = True
elif cbook.iterable(self.colors):
self.colors = list(self.colors) # in case it was a tuple
if len(self.colors) == 1:
self.monochrome = True
if len(self.colors) < N:
self.colors = list(self.colors) * N
del(self.colors[N:])
else:
try: gray = float(self.colors)
except TypeError: pass
else: self.colors = [gray] * N
self.monochrome = True
Colormap.__init__(self, name, N)
def _init(self):
rgb = np.array([colorConverter.to_rgb(c)
for c in self.colors], np.float)
self._lut = np.zeros((self.N + 3, 4), np.float)
self._lut[:-3, :-1] = rgb
self._lut[:-3, -1] = 1
self._isinit = True
self._set_extremes()
class Normalize:
"""
Normalize a given value to the 0-1 range
"""
def __init__(self, vmin=None, vmax=None, clip=False):
"""
If *vmin* or *vmax* is not given, they are taken from the input's
minimum and maximum value respectively. If *clip* is *True* and
the given value falls outside the range, the returned value
will be 0 or 1, whichever is closer. Returns 0 if::
vmin==vmax
Works with scalars or arrays, including masked arrays. If
*clip* is *True*, masked values are set to 1; otherwise they
remain masked. Clipping silently defeats the purpose of setting
the over, under, and masked colors in the colormap, so it is
likely to lead to surprises; therefore the default is
*clip* = *False*.
"""
self.vmin = vmin
self.vmax = vmax
self.clip = clip
@staticmethod
def process_value(value):
"""
Homogenize the input *value* for easy and efficient normalization.
*value* can be a scalar or sequence.
Returns *result*, *is_scalar*, where *result* is a
masked array matching *value*. Float dtypes are preserved;
integer types with two bytes or smaller are converted to
np.float32, and larger types are converted to np.float.
Preserving float32 when possible, and using in-place operations,
can greatly improve speed for large arrays.
Experimental; we may want to add an option to force the
use of float32.
"""
if cbook.iterable(value):
is_scalar = False
result = ma.asarray(value)
if result.dtype.kind == 'f':
if isinstance(value, np.ndarray):
result = result.copy()
elif result.dtype.itemsize > 2:
result = result.astype(np.float)
else:
result = result.astype(np.float32)
else:
is_scalar = True
result = ma.array([value]).astype(np.float)
return result, is_scalar
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin == vmax:
result.fill(0) # Or should it be all masked? Or 0.5?
else:
vmin = float(vmin)
vmax = float(vmax)
if clip:
mask = ma.getmask(result)
result = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
# ma division is very slow; we can take a shortcut
resdat = result.data
resdat -= vmin
resdat /= (vmax - vmin)
result = np.ma.array(resdat, mask=result.mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin = float(self.vmin)
vmax = float(self.vmax)
if cbook.iterable(value):
val = ma.asarray(value)
return vmin + val * (vmax - vmin)
else:
return vmin + value * (vmax - vmin)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
def scaled(self):
'return true if vmin and vmax set'
return (self.vmin is not None and self.vmax is not None)
class LogNorm(Normalize):
"""
Normalize a given value to the 0-1 range on a log scale
"""
def __call__(self, value, clip=None):
if clip is None:
clip = self.clip
result, is_scalar = self.process_value(value)
result = ma.masked_less_equal(result, 0, copy=False)
self.autoscale_None(result)
vmin, vmax = self.vmin, self.vmax
if vmin > vmax:
raise ValueError("minvalue must be less than or equal to maxvalue")
elif vmin<=0:
raise ValueError("values must all be positive")
elif vmin==vmax:
result.fill(0)
else:
if clip:
mask = ma.getmask(result)
val = ma.array(np.clip(result.filled(vmax), vmin, vmax),
mask=mask)
#result = (ma.log(result)-np.log(vmin))/(np.log(vmax)-np.log(vmin))
# in-place equivalent of above can be much faster
resdat = result.data
mask = result.mask
if mask is np.ma.nomask:
mask = (resdat <= 0)
else:
mask |= resdat <= 0
cbook._putmask(resdat, mask, 1)
np.log(resdat, resdat)
resdat -= np.log(vmin)
resdat /= (np.log(vmax) - np.log(vmin))
result = np.ma.array(resdat, mask=mask, copy=False)
if is_scalar:
result = result[0]
return result
def inverse(self, value):
if not self.scaled():
raise ValueError("Not invertible until scaled")
vmin, vmax = self.vmin, self.vmax
if cbook.iterable(value):
val = ma.asarray(value)
return vmin * ma.power((vmax/vmin), val)
else:
return vmin * pow((vmax/vmin), value)
def autoscale(self, A):
'''
Set *vmin*, *vmax* to min, max of *A*.
'''
A = ma.masked_less_equal(A, 0, copy=False)
self.vmin = ma.min(A)
self.vmax = ma.max(A)
def autoscale_None(self, A):
' autoscale only None-valued vmin or vmax'
if self.vmin is not None and self.vmax is not None:
return
A = ma.masked_less_equal(A, 0, copy=False)
if self.vmin is None:
self.vmin = ma.min(A)
if self.vmax is None:
self.vmax = ma.max(A)
class BoundaryNorm(Normalize):
'''
Generate a colormap index based on discrete intervals.
Unlike :class:`Normalize` or :class:`LogNorm`,
:class:`BoundaryNorm` maps values to integers instead of to the
interval 0-1.
Mapping to the 0-1 interval could have been done via
piece-wise linear interpolation, but using integers seems
simpler, and reduces the number of conversions back and forth
between integer and floating point.
'''
def __init__(self, boundaries, ncolors, clip=False):
'''
*boundaries*
a monotonically increasing sequence
*ncolors*
number of colors in the colormap to be used
If::
b[i] <= v < b[i+1]
then v is mapped to color j;
as i varies from 0 to len(boundaries)-2,
j goes from 0 to ncolors-1.
Out-of-range values are mapped to -1 if low and ncolors
if high; these are converted to valid indices by
:meth:`Colormap.__call__` .
'''
self.clip = clip
self.vmin = boundaries[0]
self.vmax = boundaries[-1]
self.boundaries = np.asarray(boundaries)
self.N = len(self.boundaries)
self.Ncmap = ncolors
if self.N-1 == self.Ncmap:
self._interp = False
else:
self._interp = True
def __call__(self, x, clip=None):
if clip is None:
clip = self.clip
x = ma.asarray(x)
mask = ma.getmaskarray(x)
xx = x.filled(self.vmax+1)
if clip:
np.clip(xx, self.vmin, self.vmax)
iret = np.zeros(x.shape, dtype=np.int16)
for i, b in enumerate(self.boundaries):
iret[xx>=b] = i
if self._interp:
iret = (iret * (float(self.Ncmap-1)/(self.N-2))).astype(np.int16)
iret[xx<self.vmin] = -1
iret[xx>=self.vmax] = self.Ncmap
ret = ma.array(iret, mask=mask)
if ret.shape == () and not mask:
ret = int(ret) # assume python scalar
return ret
def inverse(self, value):
return ValueError("BoundaryNorm is not invertible")
class NoNorm(Normalize):
'''
Dummy replacement for Normalize, for the case where we
want to use indices directly in a
:class:`~matplotlib.cm.ScalarMappable` .
'''
def __call__(self, value, clip=None):
return value
def inverse(self, value):
return value
# compatibility with earlier class names that violated convention:
normalize = Normalize
no_norm = NoNorm
def rgb_to_hsv(arr):
"""
convert rgb values in a numpy array to hsv values
input and output arrays should have shape (M,N,3)
"""
out = np.zeros_like(arr)
arr_max = arr.max(-1)
ipos = arr_max > 0
delta = arr.ptp(-1)
s = np.zeros_like(delta)
s[ipos] = delta[ipos] / arr_max[ipos]
ipos = delta > 0
# red is max
idx = (arr[:,:,0] == arr_max) & ipos
out[idx, 0] = (arr[idx, 1] - arr[idx, 2]) / delta[idx]
# green is max
idx = (arr[:,:,1] == arr_max) & ipos
out[idx, 0] = 2. + (arr[idx, 2] - arr[idx, 0] ) / delta[idx]
# blue is max
idx = (arr[:,:,2] == arr_max) & ipos
out[idx, 0] = 4. + (arr[idx, 0] - arr[idx, 1] ) / delta[idx]
out[:,:,0] = (out[:,:,0]/6.0) % 1.0
out[:,:,1] = s
out[:,:,2] = arr_max
return out
def hsv_to_rgb(hsv):
"""
convert hsv values in a numpy array to rgb values
both input and output arrays have shape (M,N,3)
"""
h = hsv[:,:,0]; s = hsv[:,:,1]; v = hsv[:,:,2]
r = np.empty_like(h); g = np.empty_like(h); b = np.empty_like(h)
i = (h*6.0).astype(np.int)
f = (h*6.0) - i
p = v*(1.0 - s)
q = v*(1.0 - s*f)
t = v*(1.0 - s*(1.0-f))
idx = i%6 == 0
r[idx] = v[idx]; g[idx] = t[idx]; b[idx] = p[idx]
idx = i == 1
r[idx] = q[idx]; g[idx] = v[idx]; b[idx] = p[idx]
idx = i == 2
r[idx] = p[idx]; g[idx] = v[idx]; b[idx] = t[idx]
idx = i == 3
r[idx] = p[idx]; g[idx] = q[idx]; b[idx] = v[idx]
idx = i == 4
r[idx] = t[idx]; g[idx] = p[idx]; b[idx] = v[idx]
idx = i == 5
r[idx] = v[idx]; g[idx] = p[idx]; b[idx] = q[idx]
idx = s == 0
r[idx] = v[idx]; g[idx] = v[idx]; b[idx] = v[idx]
rgb = np.empty_like(hsv)
rgb[:,:,0]=r; rgb[:,:,1]=g; rgb[:,:,2]=b
return rgb
class LightSource(object):
"""
Create a light source coming from the specified azimuth and elevation.
Angles are in degrees, with the azimuth measured
clockwise from north and elevation up from the zero plane of the surface.
The :meth:`shade` is used to produce rgb values for a shaded relief image
given a data array.
"""
def __init__(self,azdeg=315,altdeg=45,\
hsv_min_val=0,hsv_max_val=1,hsv_min_sat=1,hsv_max_sat=0):
"""
Specify the azimuth (measured clockwise from south) and altitude
(measured up from the plane of the surface) of the light source
in degrees.
The color of the resulting image will be darkened
by moving the (s,v) values (in hsv colorspace) toward
(hsv_min_sat, hsv_min_val) in the shaded regions, or
lightened by sliding (s,v) toward
(hsv_max_sat hsv_max_val) in regions that are illuminated.
The default extremes are chose so that completely shaded points
are nearly black (s = 1, v = 0) and completely illuminated points
are nearly white (s = 0, v = 1).
"""
self.azdeg = azdeg
self.altdeg = altdeg
self.hsv_min_val = hsv_min_val
self.hsv_max_val = hsv_max_val
self.hsv_min_sat = hsv_min_sat
self.hsv_max_sat = hsv_max_sat
def shade(self,data,cmap):
"""
Take the input data array, convert to HSV values in the
given colormap, then adjust those color values
to given the impression of a shaded relief map with a
specified light source.
RGBA values are returned, which can then be used to
plot the shaded image with imshow.
"""
rgb0 = cmap((data-data.min())/(data.max()-data.min()))
rgb1 = self.shade_rgb(rgb0, elevation=data)
rgb0[:,:,0:3] = rgb1
return rgb0
def shade_rgb(self,rgb, elevation, fraction=1.):
"""
Take the input RGB array (ny*nx*3) adjust their color values
to given the impression of a shaded relief map with a
specified light source using the elevation (ny*nx).
A new RGB array ((ny*nx*3)) is returned.
"""
# imagine an artificial sun placed at infinity in
# some azimuth and elevation position illuminating our surface. The parts of
# the surface that slope toward the sun should brighten while those sides
# facing away should become darker.
# convert alt, az to radians
az = self.azdeg*np.pi/180.0
alt = self.altdeg*np.pi/180.0
# gradient in x and y directions
dx, dy = np.gradient(elevation)
slope = 0.5*np.pi - np.arctan(np.hypot(dx, dy))
aspect = np.arctan2(dx, dy)
intensity = np.sin(alt)*np.sin(slope) + np.cos(alt)*np.cos(slope)*np.cos(-az -\
aspect - 0.5*np.pi)
# rescale to interval -1,1
# +1 means maximum sun exposure and -1 means complete shade.
intensity = (intensity - intensity.min())/(intensity.max() - intensity.min())
intensity = (2.*intensity - 1.)*fraction
# convert to rgb, then rgb to hsv
#rgb = cmap((data-data.min())/(data.max()-data.min()))
hsv = rgb_to_hsv(rgb[:,:,0:3])
# modify hsv values to simulate illumination.
hsv[:,:,1] = np.where(np.logical_and(np.abs(hsv[:,:,1])>1.e-10,intensity>0),\
(1.-intensity)*hsv[:,:,1]+intensity*self.hsv_max_sat, hsv[:,:,1])
hsv[:,:,2] = np.where(intensity > 0, (1.-intensity)*hsv[:,:,2] +\
intensity*self.hsv_max_val, hsv[:,:,2])
hsv[:,:,1] = np.where(np.logical_and(np.abs(hsv[:,:,1])>1.e-10,intensity<0),\
(1.+intensity)*hsv[:,:,1]-intensity*self.hsv_min_sat, hsv[:,:,1])
hsv[:,:,2] = np.where(intensity < 0, (1.+intensity)*hsv[:,:,2] -\
intensity*self.hsv_min_val, hsv[:,:,2])
hsv[:,:,1:] = np.where(hsv[:,:,1:]<0.,0,hsv[:,:,1:])
hsv[:,:,1:] = np.where(hsv[:,:,1:]>1.,1,hsv[:,:,1:])
# convert modified hsv back to rgb.
return hsv_to_rgb(hsv)
| ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/lib/matplotlib/colors.py | Python | gpl-2.0 | 44,182 |
# -*- coding: utf-8 -*-
"""
@author: Fabio Erculiani <lxnay@sabayon.org>
@contact: lxnay@sabayon.org
@copyright: Fabio Erculiani
@license: GPL-2
B{Entropy Command Line Client}.
"""
import argparse
from entropy.i18n import _
from entropy.output import TextInterface
from entropy.cache import EntropyCacher
from _entropy.solo.commands.descriptor import SoloCommandDescriptor
from _entropy.solo.commands.command import SoloCommand
class SoloYell(SoloCommand):
"""
Main Solo yell command.
"""
NAME = "yell"
ALIASES = []
CATCH_ALL = False
ALLOW_UNPRIVILEGED = True
HIDDEN = True
_CACHE_KEY = "SoloYellStatus"
_MESSAGES = {
0: _("You should run equo --help"),
1: _("You didn't run equo --help, did you?"),
2: _("Did you even read equo --help??"),
3: _("I give up. Run that equo --help !!!!!!!"),
4: _("OH MY GOD. RUN equo --heeeeeeeeeeeeeelp"),
5: _("Illiteracy is a huge problem in this world"),
6: _("Ok i give up, you are hopeless"),
7: _("Go to hell."),
8: _("Go to hell."),
9: _("Go to hell."),
10: _("Go to hell."),
11: _("Go to hell."),
12: _("Stop that, you idiot."),
13: _("Go to hell."),
}
INTRODUCTION = """\
Yell at user.
"""
SEE_ALSO = "equo-help(1)"
def man(self):
"""
Overridden from SoloCommand.
"""
return self._man()
def parse(self):
"""
Parse command
"""
return self._show_yell, []
@staticmethod
def read():
cacher = EntropyCacher()
status = cacher.pop(SoloYell._CACHE_KEY)
if status is None:
status = 0
SoloYell.write(status)
return status
@staticmethod
def write(status):
cacher = EntropyCacher()
try:
cacher.save(SoloYell._CACHE_KEY, status)
except IOError:
pass
@staticmethod
def reset():
"""
Reset Yell Status.
"""
cacher = EntropyCacher()
try:
cacher.save(SoloYell._CACHE_KEY, 0)
except IOError:
pass
def _show_yell(self, *args):
yell_id = SoloYell.read()
max_id = max(list(SoloYell._MESSAGES.keys()))
yell_message = SoloYell._MESSAGES.get(
yell_id, max_id)
# do not use entropy_client here
# it is slow and might interfere with
# other Client inits.
text = TextInterface()
text.output(
yell_message,
importance=1,
level="warning")
new_yell_id = yell_id + 1
if new_yell_id <= max_id:
SoloYell.write(new_yell_id)
return 1
SoloCommandDescriptor.register(
SoloCommandDescriptor(
SoloYell,
SoloYell.NAME,
_("yell at user"))
)
| Sabayon/entropy | client/solo/commands/yell.py | Python | gpl-2.0 | 2,897 |
import sys
from hoplite.exceptions import JobFailedError
def run(config, status):
address = "10.2.1.1"
uuid = "5"
# Get an arbitrary traceback to use for testing
try:
raise TypeError()
except:
test_traceback = sys.exc_info()[2]
previous_exception = {
"type": "Test Type String",
"message": "Test Message",
"exception_object": 'pickled_string'
}
raise JobFailedError(address, uuid, test_traceback, previous_exception)
| ni/hoplite | tests/test_resources/test_jobs_package/test_jobs_package/throw_job_failed_exception.py | Python | mit | 493 |
import os
from pandas import DataFrame
from cave.analyzer.base_analyzer import BaseAnalyzer
from cave.utils.helpers import check_for_features
from cave.utils.hpbandster_helpers import format_budgets
class FeatureImportance(BaseAnalyzer):
def __init__(self,
runscontainer,
):
super().__init__(runscontainer)
check_for_features(runscontainer.scenario)
formatted_budgets = format_budgets(self.runscontainer.get_budgets())
for budget, run in zip(self.runscontainer.get_budgets(),
self.runscontainer.get_aggregated(keep_budgets=True, keep_folders=False)):
feat_imp, plots = self.feature_importance(
pimp=run.pimp,
output_dir=run.output_dir,
)
self.result[formatted_budgets[budget]] = plots
# Add to run so other analysis-methods can use the information
run.share_information['feature_importance'] = feat_imp
def get_name(self):
return "Feature Importance"
def feature_importance(self, pimp, output_dir):
self.logger.info("... plotting feature importance")
old_values = (pimp.forwardsel_feat_imp, pimp._parameters_to_evaluate, pimp.forwardsel_cv)
pimp.forwardsel_feat_imp = True
pimp._parameters_to_evaluate = -1
pimp.forwardsel_cv = False
dir_ = os.path.join(output_dir, 'feature_plots/importance')
os.makedirs(dir_, exist_ok=True)
res = pimp.evaluate_scenario(['forward-selection'], dir_)
feat_importance = res[0]['forward-selection']['imp']
plots = [os.path.join(dir_, 'forward-selection-barplot.png'),
os.path.join(dir_, 'forward-selection-chng.png')]
# Restore values
pimp.forwardsel_feat_imp, pimp._parameters_to_evaluate, pimp.forwardsel_cv = old_values
table = DataFrame(data=list(feat_importance.values()), index=list(feat_importance.keys()), columns=["Error"])
table = table.to_html()
result = {'Table': {'table': table}}
for p in plots:
result[os.path.splitext(os.path.basename(p))[0]] = {'figure': p}
return feat_importance, result
| automl/SpySMAC | cave/analyzer/feature_analysis/feature_importance.py | Python | bsd-3-clause | 2,224 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2020 Martin Raspaud
# Author(s):
# Martin Raspaud <martin.raspaud@smhi.se>
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module to find new files on an s3 bucket."""
import json
import logging
import posixpath
from datetime import datetime, timedelta
import time
from contextlib import contextmanager
import fsspec.implementations.zip
import s3fs
from dateutil import tz
from posttroll.message import Message
from posttroll.publisher import Publish
from trollsift import Parser
logger = logging.getLogger(__name__)
@contextmanager
def sleeper(duration):
"""Make sure the block takes at least *duration* seconds."""
start_time = datetime.utcnow()
yield
end_time = datetime.utcnow()
waiting_time = duration - (end_time - start_time).total_seconds()
time.sleep(max(waiting_time, 0))
class DatetimeHolder:
"""Holder for the last_fetch datetime."""
last_fetch = datetime.now(tz.UTC) - timedelta(hours=12)
def get_last_files(path, *args, pattern=None, **kwargs):
"""Get the last files from path (s3 bucket and directory)."""
fs = s3fs.S3FileSystem(*args, **kwargs)
files = _get_files_since_last_fetch(fs, path)
files = _match_files_to_pattern(files, path, pattern)
_reset_last_fetch_from_file_list(files)
return fs, files
def _reset_last_fetch_from_file_list(files):
newest_files = sorted(list(files), key=(lambda x: x['LastModified']), reverse=True)
if newest_files:
set_last_fetch(newest_files[0]['LastModified'])
def _get_files_since_last_fetch(fs, path):
files = fs.ls(path, detail=True)
files = list(filter((lambda x: x['LastModified'] > DatetimeHolder.last_fetch), files))
return files
def _match_files_to_pattern(files, path, pattern):
if pattern is not None:
parser = Parser(posixpath.join(path, pattern))
matching_files = []
for file in files:
try:
metadata = parser.parse(file['name'])
file['metadata'] = metadata
matching_files.append(file)
except ValueError:
pass
return matching_files
return files
def set_last_fetch(timestamp):
"""Set the last fetch time."""
DatetimeHolder.last_fetch = timestamp
def create_message(fs, file, subject, metadata=None):
"""Create a message to send."""
if isinstance(file, (list, tuple)):
file_data = {'dataset': []}
for file_item in file:
file_data['dataset'].append(_create_message_metadata(fs, file_item))
message_type = 'dataset'
else:
file_data = _create_message_metadata(fs, file)
message_type = 'file'
if metadata:
file_data.update(metadata)
return Message(subject, message_type, file_data)
def _create_message_metadata(fs, file):
"""Create a message to send."""
loaded_fs = json.loads(fs)
uri = _create_uri(file, loaded_fs)
uid = _create_uid_from_uri(uri, loaded_fs)
base_data = {'filesystem': loaded_fs, 'uri': uri, 'uid': uid}
base_data.update(file.get('metadata', dict()))
return base_data
def _create_uri(file, loaded_fs):
protocol = loaded_fs["protocol"]
if protocol == 'abstract' and 'zip' in loaded_fs['cls']:
protocol = 'zip'
uri = protocol + ':///' + file['name']
return uri
def _create_uid_from_uri(uri, loaded_fs):
uid = uri
if 'target_protocol' in loaded_fs:
uid += '::' + loaded_fs['target_protocol'] + ':///' + (loaded_fs.get('fo') or loaded_fs['args'][0])
return uid
def filelist_to_messages(fs, files, subject):
"""Convert filelist to a list of posttroll messages."""
return [create_message(fs.to_json(), file, subject) for file in files]
def filelist_unzip_to_messages(fs, files, subject):
"""Unzip files in filelist if necessary, create posttroll messages."""
messages = []
for file in files:
if file['name'].endswith('.zip'):
zipfs = fsspec.implementations.zip.ZipFileSystem(fo=file['name'],
target_protocol=fs.protocol[0],
target_options=fs.storage_options)
file_list = list(zipfs.find('/', detail=True).values())
messages.append(create_message(zipfs.to_json(), file_list, subject, file.get('metadata')))
else:
messages.append(create_message(fs.to_json(), file, subject))
return messages
def publish_new_files(bucket, config):
"""Publish files newly arrived in bucket."""
with Publish("s3_stalker") as pub:
time_back = config['timedelta']
subject = config['subject']
pattern = config.get('file_pattern')
with sleeper(2.5):
set_last_fetch(datetime.now(tz.UTC) - timedelta(**time_back))
s3_kwargs = config['s3_kwargs']
fs, files = get_last_files(bucket, pattern=pattern, **s3_kwargs)
messages = filelist_unzip_to_messages(fs, files, subject)
for message in messages:
logger.info("Publishing %s", str(message))
pub.send(str(message))
| pytroll/pytroll-collectors | pytroll_collectors/s3stalker.py | Python | gpl-3.0 | 5,789 |
# Copyright (c) 2018, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.utils.rename_field import rename_field
def execute():
# Rename and reload the Land Unit and Linked Land Unit doctypes
if frappe.db.table_exists('Land Unit') and not frappe.db.table_exists('Location'):
frappe.rename_doc('DocType', 'Land Unit', 'Location', force=True)
frappe.reload_doc('assets', 'doctype', 'location')
if frappe.db.table_exists('Linked Land Unit') and not frappe.db.table_exists('Linked Location'):
frappe.rename_doc('DocType', 'Linked Land Unit', 'Linked Location', force=True)
frappe.reload_doc('assets', 'doctype', 'linked_location')
if not frappe.db.table_exists('Crop Cycle'):
frappe.reload_doc('agriculture', 'doctype', 'crop_cycle')
# Rename the fields in related doctypes
if 'linked_land_unit' in frappe.db.get_table_columns('Crop Cycle'):
rename_field('Crop Cycle', 'linked_land_unit', 'linked_location')
if 'land_unit' in frappe.db.get_table_columns('Linked Location'):
rename_field('Linked Location', 'land_unit', 'location')
if not frappe.db.exists("Location", "All Land Units"):
frappe.get_doc({"doctype": "Location", "is_group": True, "location_name": "All Land Units"}).insert(ignore_permissions=True)
if frappe.db.table_exists('Land Unit'):
land_units = frappe.get_all('Land Unit', fields=['*'], order_by='lft')
for land_unit in land_units:
if not frappe.db.exists('Location', land_unit.get('land_unit_name')):
frappe.get_doc({
'doctype': 'Location',
'location_name': land_unit.get('land_unit_name'),
'parent_location': land_unit.get('parent_land_unit') or "All Land Units",
'is_container': land_unit.get('is_container'),
'is_group': land_unit.get('is_group'),
'latitude': land_unit.get('latitude'),
'longitude': land_unit.get('longitude'),
'area': land_unit.get('area'),
'location': land_unit.get('location'),
'lft': land_unit.get('lft'),
'rgt': land_unit.get('rgt')
}).insert(ignore_permissions=True)
# Delete the Land Unit and Linked Land Unit doctypes
if frappe.db.table_exists('Land Unit'):
frappe.delete_doc('DocType', 'Land Unit', force=1)
if frappe.db.table_exists('Linked Land Unit'):
frappe.delete_doc('DocType', 'Linked Land Unit', force=1)
| gsnbng/erpnext | erpnext/patches/v11_0/merge_land_unit_with_location.py | Python | agpl-3.0 | 2,375 |
# ===========================================================================
#
# Library: PyCTK
# Filename: test_axeswidget.py
#
# Copyright (c) 2015 Lamond Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0.txt
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ===========================================================================
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyCTK.Widgets import ctkAxesWidget
class Widget(QWidget):
def __init__(self, parent=None, **kwargs):
super().__init__(parent, **kwargs)
l=QVBoxLayout(self)
self._axesWidget=ctkAxesWidget(self)
l.addWidget(self._axesWidget)
if __name__=="__main__":
from sys import argv, exit
a=QApplication(argv)
w=Widget()
w.show()
w.raise_()
exit(a.exec_()) | lamondlab/pyctk | tests/test_axeswidget.py | Python | apache-2.0 | 1,326 |
# -*- coding: utf-8 -*-
# Copyright 2022 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated code. DO NOT EDIT!
#
# Snippet for ListLakes
# NOTE: This snippet has been automatically generated for illustrative purposes only.
# It may require modifications to work in your environment.
# To install the latest published package dependency, execute the following:
# python3 -m pip install google-cloud-dataplex
# [START dataplex_v1_generated_DataplexService_ListLakes_async]
from google.cloud import dataplex_v1
async def sample_list_lakes():
# Create a client
client = dataplex_v1.DataplexServiceAsyncClient()
# Initialize request argument(s)
request = dataplex_v1.ListLakesRequest(
parent="parent_value",
)
# Make the request
page_result = client.list_lakes(request=request)
# Handle the response
async for response in page_result:
print(response)
# [END dataplex_v1_generated_DataplexService_ListLakes_async]
| googleapis/python-dataplex | samples/generated_samples/dataplex_v1_generated_dataplex_service_list_lakes_async.py | Python | apache-2.0 | 1,491 |
# Copyright (c) 2013, Web Notes Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import webnotes
import webnotes.utils
from webnotes.utils import cstr
from webnotes import _
class DocType():
def __init__(self, d, dl):
self.doc, self.doclist = d, dl
def onload(self):
if self.doc.email_sent:
self.doc.fields["__status_count"] = dict(webnotes.conn.sql("""select status, count(*)
from `tabBulk Email` where ref_doctype=%s and ref_docname=%s
group by status""", (self.doc.doctype, self.doc.name))) or None
def test_send(self, doctype="Lead"):
self.recipients = self.doc.test_email_id.split(",")
self.send_to_doctype = "Lead"
self.send_bulk()
webnotes.msgprint("""Scheduled to send to %s""" % self.doc.test_email_id)
def send_emails(self):
"""send emails to leads and customers"""
if self.doc.email_sent:
webnotes.msgprint("""Newsletter has already been sent""", raise_exception=1)
self.recipients = self.get_recipients()
self.send_bulk()
webnotes.msgprint("""Scheduled to send to %d %s(s)""" % (len(self.recipients),
self.send_to_doctype))
webnotes.conn.set(self.doc, "email_sent", 1)
def get_recipients(self):
if self.doc.send_to_type=="Contact":
self.send_to_doctype = "Contact"
if self.doc.contact_type == "Customer":
return webnotes.conn.sql_list("""select email_id from tabContact
where ifnull(email_id, '') != '' and ifnull(customer, '') != ''""")
elif self.doc.contact_type == "Supplier":
return webnotes.conn.sql_list("""select email_id from tabContact
where ifnull(email_id, '') != '' and ifnull(supplier, '') != ''""")
elif self.doc.contact_type == "Referrer Physician":
return webnotes.conn.sql_list("""select email_id from tabContact
where ifnull(email_id, '') != '' and ifnull(referrer_physician, '') != ''""")
elif self.doc.send_to_type=="Lead":
self.send_to_doctype = "Lead"
conditions = []
if self.doc.lead_source and self.doc.lead_source != "All":
conditions.append(" and source='%s'" % self.doc.lead_source)
if self.doc.lead_status and self.doc.lead_status != "All":
conditions.append(" and status='%s'" % self.doc.lead_status)
if conditions:
conditions = "".join(conditions)
return webnotes.conn.sql_list("""select email_id from tabLead
where ifnull(email_id, '') != '' %s""" % (conditions or ""))
elif self.doc.email_list:
email_list = [cstr(email).strip() for email in self.doc.email_list.split(",")]
for email in email_list:
create_lead(email)
self.send_to_doctype = "Lead"
return email_list
def send_bulk(self):
self.validate_send()
sender = self.doc.send_from or webnotes.utils.get_formatted_email(self.doc.owner)
from webnotes.utils.email_lib.bulk import send
if not webnotes.flags.in_test:
webnotes.conn.auto_commit_on_many_writes = True
send(recipients = self.recipients, sender = sender,
subject = self.doc.subject, message = self.doc.message,
doctype = self.send_to_doctype, email_field = "email_id",
ref_doctype = self.doc.doctype, ref_docname = self.doc.name)
if not webnotes.flags.in_test:
webnotes.conn.auto_commit_on_many_writes = False
def validate_send(self):
if self.doc.fields.get("__islocal"):
webnotes.msgprint(_("""Please save the Newsletter before sending."""),
raise_exception=1)
from webnotes import conf
if (conf.get("status") or None) == "Trial":
webnotes.msgprint(_("""Sending newsletters is not allowed for Trial users, \
to prevent abuse of this feature."""), raise_exception=1)
@webnotes.whitelist()
def get_lead_options():
return {
"sources": ["All"] + filter(None,
webnotes.conn.sql_list("""select distinct source from tabLead""")),
"statuses": ["All"] + filter(None,
webnotes.conn.sql_list("""select distinct status from tabLead"""))
}
def create_lead(email_id):
"""create a lead if it does not exist"""
from email.utils import parseaddr
from webnotes.model.doc import get_default_naming_series
real_name, email_id = parseaddr(email_id)
if webnotes.conn.get_value("Lead", {"email_id": email_id}):
return
lead = webnotes.bean({
"doctype": "Lead",
"email_id": email_id,
"lead_name": real_name or email_id,
"status": "Contacted",
"naming_series": get_default_naming_series("Lead"),
"company": webnotes.conn.get_default("company"),
"source": "Email"
})
lead.insert()
| saurabh6790/medapp | support/doctype/newsletter/newsletter.py | Python | agpl-3.0 | 4,556 |
"""
Copyright (c) 2012-2013 RockStor, Inc. <http://rockstor.com>
This file is part of RockStor.
RockStor is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published
by the Free Software Foundation; either version 2 of the License,
or (at your option) any later version.
RockStor is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
from rest_framework.response import Response
from django.db import transaction
from django.conf import settings
from storageadmin.models import (SambaShare, Disk)
from storageadmin.serializers import SambaShareSerializer
from storageadmin.util import handle_exception
from storageadmin.exceptions import RockStorAPIException
import rest_framework_custom as rfc
from share_helpers import validate_share
from system.osi import (refresh_smb_config, restart_samba)
from fs.btrfs import (mount_share, is_share_mounted)
import logging
logger = logging.getLogger(__name__)
class SambaView(rfc.GenericView):
serializer_class = SambaShareSerializer
CREATE_MASKS = ('0777', '0755', '0744', '0700',)
def get_queryset(self, *args, **kwargs):
if ('id' in kwargs):
self.paginate_by = 0
try:
return SambaShare.objects.get(id=kwargs['id'])
except:
return []
return SambaShare.objects.all()
@transaction.commit_on_success
def post(self, request):
if ('shares' not in request.DATA):
e_msg = ('Must provide share names')
handle_exception(Exception(e_msg), request)
shares = [validate_share(s, request) for s in request.DATA['shares']]
options = {
'comment': 'samba export',
'browsable': 'yes',
'guest_ok': 'no',
'read_only': 'no',
'create_mask': '0755',
'admin_users': 'Administrator',
}
if ('comment' in request.DATA):
options['comment'] = request.DATA['comment']
if ('browsable' in request.DATA):
if (request.DATA['browsable'] != 'yes' and
request.DATA['browsable'] != 'no'):
e_msg = ('Invalid choice for browsable. Possible '
'choices are yes or no.')
handle_exception(Exception(e_msg), request)
options['browsable'] = request.DATA['browsable']
if ('guest_ok' in request.DATA):
if (request.DATA['guest_ok'] != 'yes' and
request.DATA['guest_ok'] != 'no'):
e_msg = ('Invalid choice for guest_ok. Possible '
'options are yes or no.')
handle_exception(Exception(e_msg), request)
options['guest_ok'] = request.DATA['guest_ok']
if ('read_only' in request.DATA):
if (request.DATA['read_only'] != 'yes' and
request.DATA['read_only'] != 'no'):
e_msg = ('Invalid choice for read_only. Possible '
'options are yes or no.')
handle_exception(Exception(e_msg), request)
options['read_only'] = request.DATA['read_only']
if ('create_mask' in request.DATA):
if (request.DATA['create_mask'] not in self.CREATE_MASKS):
e_msg = ('Invalid choice for create_mask. Possible '
'options are: %s' % self.CREATE_MASKS)
handle_exception(Exception(e_msg), request)
if ('admin_users' in request.DATA):
options['admin_users'] = request.DATA['admin_users']
for share in shares:
if (SambaShare.objects.filter(share=share).exists()):
e_msg = ('Share(%s) is already exported via Samba' %
share.name)
handle_exception(Exception(e_msg), request)
try:
for share in shares:
mnt_pt = ('%s%s' % (settings.MNT_PT, share.name))
smb_share = SambaShare(share=share, path=mnt_pt,
comment=options['comment'],
browsable=options['browsable'],
read_only=options['read_only'],
guest_ok=options['guest_ok'],
create_mask=options['create_mask'],
admin_users=options['admin_users'])
smb_share.save()
if (not is_share_mounted(share.name)):
pool_device = Disk.objects.filter(pool=share.pool)[0].name
mount_share(share.subvol_name, pool_device, mnt_pt)
refresh_smb_config(list(SambaShare.objects.all()))
restart_samba()
return Response()
except RockStorAPIException:
raise
except Exception, e:
handle_exception(e, request)
@transaction.commit_on_success
def delete(self, request, id):
try:
smbo = SambaShare.objects.get(id=id)
smbo.delete()
except:
e_msg = ('Samba export for the id(%s) does not exist' % id)
handle_exception(Exception(e_msg), request)
try:
refresh_smb_config(list(SambaShare.objects.all()))
restart_samba()
return Response()
except Exception, e:
e_msg = ('System error occured while restarting Samba server')
handle_exception(Exception(e_msg), request)
| kamal-gade/rockstor-core | src/rockstor/storageadmin/views/samba.py | Python | gpl-3.0 | 5,842 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import inspect
EXPECT_TESTS_COVER_FUNCTION = 'EXPECT_TESTS_COVER_FUNCTION'
def covers(coverage_path_function):
"""Allows annotation of a Test generator function with a function that will
return a list of coverage patterns which should be enabled during the use of
the Test generator function.
"""
def _decorator(func):
setattr(func, EXPECT_TESTS_COVER_FUNCTION, coverage_path_function)
return func
return _decorator
def get_cover_list(test_gen_function):
"""Given a Test generator, return the list of coverage globs that should
be included while executing the Test generator."""
return getattr(test_gen_function, EXPECT_TESTS_COVER_FUNCTION,
lambda: [inspect.getabsfile(test_gen_function)])()
| shishkander/recipes-py | recipe_engine/third_party/expect_tests/util.py | Python | bsd-3-clause | 903 |
# coding=utf-8
# the inclusion of the tests module is not meant to offer best practices for
# testing in general, but rather to support the `find_packages` example in
# setup.py that excludes installing the "tests" package
def test_success():
assert True
| eyesee1/python-pinballmap | tests/test_simple.py | Python | mit | 261 |
# python 3 has different package names
try: from cStringIO import StringIO
except ImportError: from io import StringIO
from unittest import TestCase
import functools
from tornado.testing import AsyncTestCase
from tornado.httpclient import HTTPRequest, HTTPResponse
from tornado_stub_client.collection import RequestCollection
from tornado_stub_client.httpclient import AsyncHTTPStubClient
class ClientTest(AsyncTestCase, TestCase):
def setUp(self):
super(ClientTest, self).setUp()
RequestCollection.reset()
def test_add_then_fetch(self):
req = HTTPRequest("/hello")
resp_partial = functools.partial(HTTPResponse,
code=200, buffer=StringIO("response value"))
RequestCollection.add(req, resp_partial)
client = AsyncHTTPStubClient()
client.fetch(req, self.stop)
response = self.wait()
self.assertEqual(response.code, 200)
self.assertEqual(response.body, "response value")
def test_fetch_string_converts_to_request_object(self):
req = HTTPRequest("/hello")
resp_partial = functools.partial(HTTPResponse,
code=200, buffer=StringIO("response value"))
RequestCollection.add(req, resp_partial)
client = AsyncHTTPStubClient()
client.fetch("/hello", self.stop)
response = self.wait()
self.assertEqual(response.code, 200)
self.assertEqual(response.body, "response value")
def test_fetch_wrong_thing_returns_404(self):
client = AsyncHTTPStubClient()
client.fetch("/nothingasdfads", self.stop)
response = self.wait()
self.assertEqual(response.code, 404)
self.assertEqual(response.body, None)
def test_post_and_get_are_different(self):
req = HTTPRequest("/hello")
resp_partial = functools.partial(HTTPResponse,
buffer=StringIO("response value"))
RequestCollection.add(req, resp_partial)
AsyncHTTPStubClient().fetch("/hello", self.stop, method="POST")
response = self.wait()
self.assertEqual(response.code, 404)
| venmo/tornado-stub-client | tests/test_client.py | Python | mit | 2,106 |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# -*- encoding: utf-8 -*-
#
# Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import Table, MetaData
ENGINE = 'InnoDB'
CHARSET = 'utf8'
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
table = Table('ifaces', meta, autoload=True)
table.rename('ports')
def downgrade(migrate_engine):
raise NotImplementedError('Downgrade from version 003 is unsupported.')
| citrix-openstack-build/ironic | ironic/db/sqlalchemy/migrate_repo/versions/004_rename_ifaces_to_ports.py | Python | apache-2.0 | 1,050 |
from django.test import TestCase
from api.models import UsernameSnippet
class TestUsernameSnippet(TestCase):
@classmethod
def setUpTestData(cls):
UsernameSnippet.objects.create(available=True)
def test_existence(self):
u = UsernameSnippet.objects.first()
self.assertIsInstance(u, UsernameSnippet)
self.assertEqual(u.available, True)
def test_field_types(self):
u = UsernameSnippet.objects.first()
self.assertIsInstance(u.available, bool)
| jeremyphilemon/uniqna | api/tests/test_models.py | Python | bsd-3-clause | 459 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2017, Anaconda, Inc. All rights reserved.
#
# Powered by the Bokeh Development Team.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
from __future__ import absolute_import, division, print_function, unicode_literals
import pytest ; pytest
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Standard library imports
# External imports
# Bokeh imports
from bokeh._testing.util.api import verify_all
# Module under test
#import bokeh.sampledata.commits as bsc
#-----------------------------------------------------------------------------
# Setup
#-----------------------------------------------------------------------------
ALL = (
'data',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
Test___all__ = pytest.mark.sampledata(verify_all("bokeh.sampledata.commits", ALL))
@pytest.mark.sampledata
def test_data(pd):
import bokeh.sampledata.commits as bsc
assert isinstance(bsc.data, pd.DataFrame)
# check detail for package data
assert len(bsc.data) == 4916
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
| dennisobrien/bokeh | bokeh/sampledata/tests/test_commits.py | Python | bsd-3-clause | 1,961 |
# Copyright 2013 Mirantis Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
from neutronclient._i18n import _
from neutronclient.neutron import v2_0 as neutronV20
class ListVip(neutronV20.ListCommand):
"""List vips that belong to a given tenant."""
resource = 'vip'
list_columns = ['id', 'name', 'algorithm', 'address', 'protocol',
'admin_state_up', 'status']
pagination_support = True
sorting_support = True
class ShowVip(neutronV20.ShowCommand):
"""Show information of a given vip."""
resource = 'vip'
class CreateVip(neutronV20.CreateCommand):
"""Create a vip."""
resource = 'vip'
def add_known_arguments(self, parser):
parser.add_argument(
'pool_id', metavar='POOL',
help=_('ID or name of the pool to which this vip belongs.'))
parser.add_argument(
'--address',
help=_('IP address of the vip.'))
parser.add_argument(
'--admin-state-down',
dest='admin_state', action='store_false',
help=_('Set admin state up to false.'))
parser.add_argument(
'--connection-limit',
help=_('The maximum number of connections per second allowed for '
'the vip. Valid values: a positive integer or -1 '
'for unlimited (default).'))
parser.add_argument(
'--description',
help=_('Description of the vip to be created.'))
parser.add_argument(
'--name',
required=True,
help=_('Name of the vip to be created.'))
parser.add_argument(
'--protocol-port',
required=True,
help=_('TCP port on which to listen for client traffic that is '
'associated with the vip address.'))
parser.add_argument(
'--protocol',
required=True, choices=['TCP', 'HTTP', 'HTTPS'],
help=_('Protocol for balancing.'))
parser.add_argument(
'--subnet-id', metavar='SUBNET',
required=True,
help=_('The subnet on which to allocate the vip address.'))
def args2body(self, parsed_args):
_pool_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'pool', parsed_args.pool_id)
_subnet_id = neutronV20.find_resourceid_by_name_or_id(
self.get_client(), 'subnet', parsed_args.subnet_id)
body = {'pool_id': _pool_id,
'admin_state_up': parsed_args.admin_state,
'subnet_id': _subnet_id}
neutronV20.update_dict(parsed_args, body,
['address', 'connection_limit', 'description',
'name', 'protocol_port', 'protocol',
'tenant_id'])
return {self.resource: body}
class UpdateVip(neutronV20.UpdateCommand):
"""Update a given vip."""
resource = 'vip'
class DeleteVip(neutronV20.DeleteCommand):
"""Delete a given vip."""
resource = 'vip'
| eayunstack/python-neutronclient | neutronclient/neutron/v2_0/lb/vip.py | Python | apache-2.0 | 3,623 |
# Copyright 2013: Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from rally.benchmark.scenarios.neutron import utils
from tests.unit import fakes
from tests.unit import test
NEUTRON_UTILS = "rally.benchmark.scenarios.neutron.utils."
class NeutronScenarioTestCase(test.TestCase):
def setUp(self):
super(NeutronScenarioTestCase, self).setUp()
self.network = mock.Mock()
@mock.patch(NEUTRON_UTILS + "NeutronScenario._generate_random_name")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_create_network(self, mock_clients, mock_random_name):
neutron_scenario = utils.NeutronScenario()
explicit_name = "explicit_name"
random_name = "random_name"
mock_random_name.return_value = random_name
mock_clients("neutron").create_network.return_value = self.network
# Network name is specified
network_data = {"name": explicit_name, "admin_state_up": False}
expected_network_data = {"network": network_data}
network = neutron_scenario._create_network(network_data)
mock_clients("neutron").create_network.assert_called_once_with(
expected_network_data)
self.assertEqual(self.network, network)
self._test_atomic_action_timer(neutron_scenario.atomic_actions(),
"neutron.create_network")
mock_clients("neutron").create_network.reset_mock()
# Network name is random generated
network_data = {"admin_state_up": False}
expected_network_data["network"]["name"] = random_name
network = neutron_scenario._create_network(network_data)
mock_clients("neutron").create_network.assert_called_once_with(
expected_network_data)
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_list_networks(self, mock_clients):
scenario = utils.NeutronScenario()
networks_list = []
networks_dict = {"networks": networks_list}
mock_clients("neutron").list_networks.return_value = networks_dict
return_networks_list = scenario._list_networks()
self.assertEqual(networks_list, return_networks_list)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.list_networks")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_update_network(self, mock_clients):
scenario = utils.NeutronScenario()
expected_network = {
"network": {
"name": "network-name_updated",
"admin_state_up": False
}
}
mock_clients("neutron").update_network.return_value = expected_network
network = {"network": {"name": "network-name", "id": "network-id"}}
network_update_args = {"name": "_updated", "admin_state_up": False}
result_network = scenario._update_network(network, network_update_args)
mock_clients("neutron").update_network.assert_called_once_with(
network["network"]["id"], expected_network)
self.assertEqual(result_network, expected_network)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.update_network")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_delete_network(self, mock_clients):
scenario = utils.NeutronScenario()
network_create_args = {}
network = scenario._create_network(network_create_args)
scenario._delete_network(network)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.delete_network")
@mock.patch(NEUTRON_UTILS + "NeutronScenario._generate_random_name",
return_value="test_subnet")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_create_subnet(self, mock_clients, mock_random_name):
scenario = utils.NeutronScenario()
network_id = "fake-id"
start_cidr = "192.168.0.0/24"
network = {"network": {"id": network_id}}
expected_subnet_data = {
"subnet": {
"network_id": network_id,
"cidr": start_cidr,
"ip_version": scenario.SUBNET_IP_VERSION,
"name": mock_random_name.return_value
}
}
# Default options
subnet_data = {"network_id": network_id}
scenario._create_subnet(network, subnet_data, start_cidr)
mock_clients("neutron").create_subnet.assert_called_once_with(
expected_subnet_data)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.create_subnet")
mock_clients("neutron").create_subnet.reset_mock()
# Custom options
extras = {"cidr": "192.168.16.0/24", "allocation_pools": []}
subnet_data.update(extras)
expected_subnet_data["subnet"].update(extras)
scenario._create_subnet(network, subnet_data)
mock_clients("neutron").create_subnet.assert_called_once_with(
expected_subnet_data)
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_list_subnets(self, mock_clients):
subnets = [{"name": "fake1"}, {"name": "fake2"}]
mock_clients("neutron").list_subnets.return_value = {
"subnets": subnets
}
scenario = utils.NeutronScenario()
result = scenario._list_subnets()
self.assertEqual(subnets, result)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.list_subnets")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_update_subnet(self, mock_clients):
scenario = utils.NeutronScenario()
expected_subnet = {
"subnet": {
"name": "subnet-name_updated",
"enable_dhcp": False
}
}
mock_clients("neutron").update_subnet.return_value = expected_subnet
subnet = {"subnet": {"name": "subnet-name", "id": "subnet-id"}}
subnet_update_args = {"name": "_updated", "enable_dhcp": False}
result_subnet = scenario._update_subnet(subnet, subnet_update_args)
mock_clients("neutron").update_subnet.assert_called_once_with(
subnet["subnet"]["id"], expected_subnet)
self.assertEqual(result_subnet, expected_subnet)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.update_subnet")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_delete_subnet(self, mock_clients):
scenario = utils.NeutronScenario()
network = scenario._create_network({})
subnet = scenario._create_subnet(network, {})
scenario._delete_subnet(subnet)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.delete_subnet")
@mock.patch(NEUTRON_UTILS + "NeutronScenario._generate_random_name")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_create_router(self, mock_clients, mock_random_name):
scenario = utils.NeutronScenario()
router = mock.Mock()
explicit_name = "explicit_name"
random_name = "random_name"
mock_random_name.return_value = random_name
mock_clients("neutron").create_router.return_value = router
# Default options
result_router = scenario._create_router({})
mock_clients("neutron").create_router.assert_called_once_with(
{"router": {"name": random_name}})
self.assertEqual(result_router, router)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.create_router")
mock_clients("neutron").create_router.reset_mock()
# Custom options
router_data = {"name": explicit_name, "admin_state_up": True}
result_router = scenario._create_router(router_data)
mock_clients("neutron").create_router.assert_called_once_with(
{"router": router_data})
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_list_routers(self, mock_clients):
scenario = utils.NeutronScenario()
routers = [mock.Mock()]
mock_clients("neutron").list_routers.return_value = {
"routers": routers}
self.assertEqual(routers, scenario._list_routers())
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.list_routers")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_update_router(self, mock_clients):
scenario = utils.NeutronScenario()
expected_router = {
"router": {
"name": "router-name_updated",
"admin_state_up": False
}
}
mock_clients("neutron").update_router.return_value = expected_router
router = {
"router": {
"id": "router-id",
"name": "router-name",
"admin_state_up": True
}
}
router_update_args = {"name": "_updated", "admin_state_up": False}
result_router = scenario._update_router(router, router_update_args)
mock_clients("neutron").update_router.assert_called_once_with(
router["router"]["id"], expected_router)
self.assertEqual(result_router, expected_router)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.update_router")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_delete_router(self, mock_clients):
scenario = utils.NeutronScenario()
router = scenario._create_router({})
scenario._delete_router(router)
mock_clients("neutron").delete_router.assert_called_once_with(
router["router"]["id"])
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.delete_router")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_remove_interface_router(self, mock_clients):
subnet = {"name": "subnet-name", "id": "subnet-id"}
router_data = {"id": 1}
scenario = utils.NeutronScenario()
router = scenario._create_router(router_data)
scenario._add_interface_router(subnet, router)
scenario._remove_interface_router(subnet, router)
mock_remove_router = mock_clients("neutron").remove_interface_router
mock_remove_router.assert_called_once_with(
router["id"], {"subnet_id": subnet["id"]})
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.remove_interface_router")
def test_SUBNET_IP_VERSION(self):
"""Curent NeutronScenario implementation supports only IPv4."""
self.assertEqual(utils.NeutronScenario.SUBNET_IP_VERSION, 4)
@mock.patch(NEUTRON_UTILS + "NeutronScenario._generate_random_name")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_create_port(self, mock_clients, mock_rand_name):
scenario = utils.NeutronScenario()
net_id = "network-id"
net = {"network": {"id": net_id}}
rand_name = "random-name"
mock_rand_name.return_value = rand_name
expected_port_args = {
"port": {
"network_id": net_id,
"name": rand_name
}
}
# Defaults
port_create_args = {}
scenario._create_port(net, port_create_args)
mock_clients("neutron"
).create_port.assert_called_once_with(expected_port_args)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.create_port")
mock_clients("neutron").create_port.reset_mock()
# Custom options
port_args = {"admin_state_up": True}
expected_port_args["port"].update(port_args)
scenario._create_port(net, port_args)
mock_clients("neutron"
).create_port.assert_called_once_with(expected_port_args)
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_list_ports(self, mock_clients):
scenario = utils.NeutronScenario()
ports = [{"name": "port1"}, {"name": "port2"}]
mock_clients("neutron").list_ports.return_value = {"ports": ports}
self.assertEqual(ports, scenario._list_ports())
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.list_ports")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_update_port(self, mock_clients):
scenario = utils.NeutronScenario()
expected_port = {
"port": {
"name": "port-name_updated",
"admin_state_up": False,
"device_id": "dummy_id",
"device_owner": "dummy_owner"
}
}
mock_clients("neutron").update_port.return_value = expected_port
port = {
"port": {
"id": "port-id",
"name": "port-name",
"admin_state_up": True
}
}
port_update_args = {
"name": "_updated",
"admin_state_up": False,
"device_id": "dummy_id",
"device_owner": "dummy_owner"
}
result_port = scenario._update_port(port, port_update_args)
mock_clients("neutron").update_port.assert_called_once_with(
port["port"]["id"], expected_port)
self.assertEqual(result_port, expected_port)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.update_port")
@mock.patch(NEUTRON_UTILS + "NeutronScenario.clients")
def test_delete_port(self, mock_clients):
scenario = utils.NeutronScenario()
network = scenario._create_network({})
port = scenario._create_port(network, {})
scenario._delete_port(port)
self._test_atomic_action_timer(scenario.atomic_actions(),
"neutron.create_port")
@mock.patch(NEUTRON_UTILS + "NeutronScenario._create_subnet",
return_value={
"subnet": {
"name": "subnet-name",
"id": "subnet-id",
"enable_dhcp": False
}
})
@mock.patch(NEUTRON_UTILS + "NeutronScenario._create_network",
return_value={
"network": {
"id": "fake-id"
}
})
def test_create_network_and_subnets(self,
mock_create_network,
mock_create_subnet):
scenario = utils.NeutronScenario()
network_create_args = {}
subnet_create_args = {}
subnets_per_network = 4
mock_create_network.reset_mock()
mock_create_subnet.reset_mock()
# Default options
scenario._create_network_and_subnets(
network_create_args=network_create_args,
subnet_create_args=subnet_create_args,
subnets_per_network=subnets_per_network)
mock_create_network.assert_called_once_with({})
mock_create_subnet.assert_has_calls(
[mock.call({"network": {"id": "fake-id"}},
{}, "1.0.0.0/24")] * subnets_per_network)
mock_create_network.reset_mock()
mock_create_subnet.reset_mock()
# Custom options
scenario._create_network_and_subnets(
network_create_args=network_create_args,
subnet_create_args={"allocation_pools": []},
subnet_cidr_start="10.10.10.0/24",
subnets_per_network=subnets_per_network)
mock_create_network.assert_called_once_with({})
mock_create_subnet.assert_has_calls(
[mock.call({"network": {"id": "fake-id"}},
{"allocation_pools": []},
"10.10.10.0/24")] * subnets_per_network)
@mock.patch(NEUTRON_UTILS + "network_wrapper.generate_cidr")
def test_functional_create_network_and_subnets(self, mock_generate_cidr):
scenario = utils.NeutronScenario(clients=fakes.FakeClients())
network_create_args = {"name": "foo_network"}
subnet_create_args = {}
subnets_per_network = 5
subnet_cidr_start = "1.1.1.0/24"
cidrs = ["1.1.%d.0/24" % i for i in range(subnets_per_network)]
cidrs_ = iter(cidrs)
mock_generate_cidr.side_effect = lambda **kw: next(cidrs_)
network, subnets = scenario._create_network_and_subnets(
network_create_args,
subnet_create_args,
subnets_per_network,
subnet_cidr_start)
self.assertEqual(network["network"]["name"], "foo_network")
# This checks both data (cidrs seem to be enough) and subnets number
result_cidrs = sorted([s["subnet"]["cidr"] for s in subnets])
self.assertEqual(cidrs, result_cidrs)
| varunarya10/rally | tests/unit/benchmark/scenarios/neutron/test_utils.py | Python | apache-2.0 | 18,043 |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Branch.pushed'
db.add_column('editor_branch', 'pushed', self.gf('django.db.models.fields.BooleanField')(default=False), keep_default=False)
# Adding field 'Branch.title'
db.add_column('editor_branch', 'title', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
# Adding field 'Branch.comment'
db.add_column('editor_branch', 'comment', self.gf('django.db.models.fields.TextField')(default=''), keep_default=False)
def backwards(self, orm):
# Deleting field 'Branch.pushed'
db.delete_column('editor_branch', 'pushed')
# Deleting field 'Branch.title'
db.delete_column('editor_branch', 'title')
# Deleting field 'Branch.comment'
db.delete_column('editor_branch', 'comment')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'editor.branch': {
'Meta': {'object_name': 'Branch'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'comment': ('django.db.models.fields.TextField', [], {'default': "''"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['projects.Project']"}),
'pushed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.TextField', [], {'default': "''"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'projects.project': {
'Meta': {'ordering': "('slug',)", 'object_name': 'Project'},
'copyright': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'docs_directory': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'extensions': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255'}),
'featured': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'pub_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'repo': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'repo_type': ('django.db.models.fields.CharField', [], {'default': "'git'", 'max_length': '10'}),
'skip': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '1'}),
'suffix': ('django.db.models.fields.CharField', [], {'default': "'.rst'", 'max_length': '10'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '20'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'projects'", 'to': "orm['auth.User']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'})
},
'taggit.tag': {
'Meta': {'object_name': 'Tag'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '100', 'db_index': 'True'})
},
'taggit.taggeditem': {
'Meta': {'object_name': 'TaggedItem'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_tagged_items'", 'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_id': ('django.db.models.fields.IntegerField', [], {'db_index': 'True'}),
'tag': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'taggit_taggeditem_items'", 'to': "orm['taggit.Tag']"})
}
}
complete_apps = ['editor']
| alex/readthedocs.org | readthedocs/editor/migrations/0002_auto__add_field_branch_pushed__add_field_branch_title__add_field_branc.py | Python | mit | 8,041 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class NumberTestCase(IntegrationTestCase):
def test_fetch_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.pricing.v2.voice \
.numbers(destination_number="+15017122661").fetch()
self.holodeck.assert_has_request(Request(
'get',
'https://pricing.twilio.com/v2/Voice/Numbers/+15017122661',
))
def test_fetch_response(self):
self.holodeck.mock(Response(
200,
'''
{
"country": "United States",
"destination_number": "+18001234567",
"inbound_call_price": {
"base_price": null,
"current_price": null,
"number_type": null
},
"iso_country": "US",
"origination_number": "+987654321",
"outbound_call_prices": [
{
"base_price": "0.013",
"current_price": "0.013",
"origination_prefixes": [
"ALL"
]
}
],
"price_unit": "USD",
"url": "https://pricing.twilio.com/v2/Voice/Numbers/+18001234567"
}
'''
))
actual = self.client.pricing.v2.voice \
.numbers(destination_number="+15017122661").fetch()
self.assertIsNotNone(actual)
| tysonholub/twilio-python | tests/integration/pricing/v2/voice/test_number.py | Python | mit | 1,884 |
# -*- coding: utf-8 -*-
import repy
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
setup(
name=repy.__pkgname__,
version=repy.__version__,
description="Python Regex cli tool",
author_name=repy.__author_name__,
author_email=repy.__author_email__,
packages=["repy"],
scripts=['bin/repy-cli'],
data_files=[],
include_package_data=True,
install_requires=['docopt>=0.6.1'],
classifiers=(
# As from https://pypi.python.org/pypi?%3Aaction=list_classifiers
#'Development Status :: 1 - Planning',
#'Development Status :: 2 - Pre-Alpha',
'Development Status :: 3 - Alpha',
#'Development Status :: 4 - Beta',
#'Development Status :: 5 - Production/Stable',
#'Development Status :: 6 - Mature',
#'Development Status :: 7 - Inactive',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Environment :: Console',
'Operating System :: POSIX',
# 'Operating System :: Microsoft :: Windows',
'Topic :: Software Development',
'Topic :: cli',
'Topic :: Regex')
)
| Grokzen/repy | setup.py | Python | mit | 1,433 |
# -*- coding: latin-1 -*-
"""
SimpleGUICS2Pygame package (January 8, 2014)
It is primarily a standard Python_ (2 **and** 3) module
reimplementing the SimpleGUI particular module of CodeSkulptor_
(a browser Python interpreter).
Require Pygame_
(except for the Timer class)
(`Unofficial Windows Binaries`_)
(and must be installed separately).
Module simpleplot require matplotlib_ .
`Online HTML documentation`_ on Read The Docs.
| Sources and installers on Bitbucket: https://bitbucket.org/OPiMedia/simpleguics2pygame
| and on PyPI: https://pypi.python.org/pypi/SimpleGUICS2Pygame .
Piece of SimpleGUICS2Pygame.
https://bitbucket.org/OPiMedia/simpleguics2pygame
GPLv3 --- Copyright (C) 2013 Olivier Pirson
http://www.opimedia.be/
.. _CodeSkulptor: http://www.codeskulptor.org/
.. _`Online HTML documentation`: https://readthedocs.org/docs/simpleguics2pygame/en/latest/
.. _Pygame: http://www.pygame.org/
.. _Python: http://www.python.org/
.. _`Unofficial Windows Binaries`: http://www.lfd.uci.edu/~gohlke/pythonlibs/#pygame
v.01.04.00 --- December 16, 2013
v.01.03.00 --- December 13, 2013
v.01.02.00 --- November 8, 2013
v.01.01.00 --- November 1st, 2013
v.01.00.02 --- October 31, 2013
v.01.00.01 --- October 9, 2013
v.01.00.00 --- July 13, 2013
v.00.92.00 --- June 27, 2013
v.00.91.00 --- June 23, 2013
v.00.90.10 --- June 19, 2013
v.00.90.00 --- June 13, 2013
Started on May 21, 2013
`Complete changelog`_
.. _`Complete changelog`: https://simpleguics2pygame.readthedocs.org/en/latest/ChangeLog.html
"""
_VERSION = '01.04.00'
"""
Version of SimpleGUICS2Pygame package.
"""
_WEBSITE = 'https://bitbucket.org/OPiMedia/simpleguics2pygame'
"""
Website of the project.
"""
#
# GPLv3
# ------
# Copyright (C) 2013 Olivier Pirson
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
| moehuster/python | SimpleGUICS2Pygame/__init__.py | Python | gpl-2.0 | 2,379 |
# -*- coding: utf-8 -*-
# The Keccak sponge function, designed by Guido Bertoni, Joan Daemen,
# Michaël Peeters and Gilles Van Assche. For more information, feedback or
# questions, please refer to our website: http://keccak.noekeon.org/
#
# Implementation by Renaud Bauvin,
# hereby denoted as "the implementer".
#
# To the extent possible under law, the implementer has waived all copyright
# and related or neighboring rights to the source code in this file.
# http://creativecommons.org/publicdomain/zero/1.0/
import math
class KeccakError(Exception):
"""Class of error used in the Keccak implementation
Use: raise KeccakError.KeccakError("Text to be displayed")"""
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class Keccak:
"""
Class implementing the Keccak sponge function
"""
def __init__(self, b=1600):
"""Constructor:
b: parameter b, must be 25, 50, 100, 200, 400, 800 or 1600 (default value)"""
self.setB(b)
def setB(self,b):
"""Set the value of the parameter b (and thus w,l and nr)
b: parameter b, must be choosen among [25, 50, 100, 200, 400, 800, 1600]
"""
if b not in [25, 50, 100, 200, 400, 800, 1600]:
raise KeccakError.KeccakError('b value not supported - use 25, 50, 100, 200, 400, 800 or 1600')
# Update all the parameters based on the used value of b
self.b=b
self.w=b//25
self.l=int(math.log(self.w,2))
self.nr=12+2*self.l
# Constants
## Round constants
RC=[0x0000000000000001,
0x0000000000008082,
0x800000000000808A,
0x8000000080008000,
0x000000000000808B,
0x0000000080000001,
0x8000000080008081,
0x8000000000008009,
0x000000000000008A,
0x0000000000000088,
0x0000000080008009,
0x000000008000000A,
0x000000008000808B,
0x800000000000008B,
0x8000000000008089,
0x8000000000008003,
0x8000000000008002,
0x8000000000000080,
0x000000000000800A,
0x800000008000000A,
0x8000000080008081,
0x8000000000008080,
0x0000000080000001,
0x8000000080008008]
## Rotation offsets
r=[[0, 36, 3, 41, 18] ,
[1, 44, 10, 45, 2] ,
[62, 6, 43, 15, 61] ,
[28, 55, 25, 21, 56] ,
[27, 20, 39, 8, 14] ]
## Generic utility functions
def rot(self,x,n):
"""Bitwise rotation (to the left) of n bits considering the \
string of bits is w bits long"""
n = n%self.w
return ((x>>(self.w-n))+(x<<n))%(1<<self.w)
def fromHexStringToLane(self, string):
"""Convert a string of bytes written in hexadecimal to a lane value"""
#Check that the string has an even number of characters i.e. whole number of bytes
if len(string)%2!=0:
raise KeccakError.KeccakError("The provided string does not end with a full byte")
#Perform the modification
temp=''
nrBytes=len(string)//2
for i in range(nrBytes):
offset=(nrBytes-i-1)*2
temp+=string[offset:offset+2]
return int(temp, 16)
def fromLaneToHexString(self, lane):
"""Convert a lane value to a string of bytes written in hexadecimal"""
laneHexBE = (("%%0%dX" % (self.w//4)) % lane)
#Perform the modification
temp=''
nrBytes=len(laneHexBE)//2
for i in range(nrBytes):
offset=(nrBytes-i-1)*2
temp+=laneHexBE[offset:offset+2]
return temp.upper()
def printState(self, state, info):
"""Print on screen the state of the sponge function preceded by \
string info
state: state of the sponge function
info: a string of characters used as identifier"""
print("Current value of state: %s" % (info))
for y in range(5):
line=[]
for x in range(5):
line.append(hex(state[x][y]))
print('\t%s' % line)
### Conversion functions String <-> Table (and vice-versa)
def convertStrToTable(self,string):
"""Convert a string of bytes to its 5×5 matrix representation
string: string of bytes of hex-coded bytes (e.g. '9A2C...')"""
#Check that input paramaters
if self.w%8!= 0:
raise KeccakError("w is not a multiple of 8")
if len(string)!=2*(self.b)//8:
raise KeccakError.KeccakError("string can't be divided in 25 blocks of w bits\
i.e. string must have exactly b bits")
#Convert
output=[[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0]]
for x in range(5):
for y in range(5):
offset=2*((5*y+x)*self.w)//8
output[x][y]=self.fromHexStringToLane(string[offset:offset+(2*self.w//8)])
return output
def convertTableToStr(self,table):
"""Convert a 5×5 matrix representation to its string representation"""
#Check input format
if self.w%8!= 0:
raise KeccakError.KeccakError("w is not a multiple of 8")
if (len(table)!=5) or (False in [len(row)==5 for row in table]):
raise KeccakError.KeccakError("table must be 5×5")
#Convert
output=['']*25
for x in range(5):
for y in range(5):
output[5*y+x]=self.fromLaneToHexString(table[x][y])
output =''.join(output).upper()
return output
def Round(self,A,RCfixed):
"""Perform one round of computation as defined in the Keccak-f permutation
A: current state (5×5 matrix)
RCfixed: value of round constant to use (integer)
"""
#Initialisation of temporary variables
B=[[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0]]
C= [0,0,0,0,0]
D= [0,0,0,0,0]
#Theta step
for x in range(5):
C[x] = A[x][0]^A[x][1]^A[x][2]^A[x][3]^A[x][4]
for x in range(5):
D[x] = C[(x-1)%5]^self.rot(C[(x+1)%5],1)
for x in range(5):
for y in range(5):
A[x][y] = A[x][y]^D[x]
#Rho and Pi steps
for x in range(5):
for y in range(5):
B[y][(2*x+3*y)%5] = self.rot(A[x][y], self.r[x][y])
#Chi step
for x in range(5):
for y in range(5):
A[x][y] = B[x][y]^((~B[(x+1)%5][y]) & B[(x+2)%5][y])
#Iota step
A[0][0] = A[0][0]^RCfixed
#print A
return A
def KeccakF(self,A, verbose=False):
"""Perform Keccak-f function on the state A
A: 5×5 matrix containing the state
verbose: a boolean flag activating the printing of intermediate computations
"""
if verbose:
self.printState(A,"Before first round")
for i in range(self.nr):
#NB: result is truncated to lane size
A = self.Round(A,self.RC[i]%(1<<self.w))
if verbose:
self.printState(A,"Satus end of round #%d/%d" % (i+1,self.nr))
return A
def appendBit(self, M, bit):
"""Append a bit to M
M: message pair (length in bits, string of hex characters ('9AFC...'))
bit: 0 or 1
Example: appendBit([7, '30'],1) returns [8,'B0']
Example: appendBit([8, '30'],1) returns [9,'3001']
"""
[my_string_length, my_string]=M
if my_string_length>(len(my_string)//2*8):
raise KeccakError.KeccakError("the string is too short to contain the number of bits announced")
if ((my_string_length%8) == 0):
my_string = my_string[0:my_string_length//8*2] + "%02X" % bit
my_string_length = my_string_length + 1
else:
nr_bytes_filled = my_string_length//8
nbr_bits_filled = my_string_length%8
my_byte = int(my_string[nr_bytes_filled*2:nr_bytes_filled*2+2],16)
my_byte = my_byte + bit*(2**(nbr_bits_filled))
my_byte = "%02X" % my_byte
my_string = my_string[0:nr_bytes_filled*2] + my_byte
my_string_length = my_string_length + 1
return [my_string_length, my_string]
def appendDelimitedSuffix(self, M, suffix):
"""Append a delimited suffix to M
M: message pair (length in bits, string of hex characters ('9AFC...'))
suffix: integer coding a string of 0 to 7 bits, from LSB to MSB, delimited by a bit 1 at MSB
Example: appendDelimitedSuffix([3, '00'], 0x06) returns [5, '10']
Example: appendDelimitedSuffix([3, '00'], 0x1F) returns [7, '78']
Example: appendDelimitedSuffix([8, '00'], 0x06) returns [10, '0002']
Example: appendDelimitedSuffix([8, '00'], 0x1F) returns [12, '000F']
"""
if (suffix == 0):
raise KeccakError.KeccakError("the delimited suffix must not be zero")
while(suffix != 1):
M = self.appendBit(M, suffix%2)
suffix = suffix//2
return M
def delimitedSuffixInBinary(self, delimitedSuffix):
binary = ''
while(delimitedSuffix != 1):
binary = binary + ('%d' % (delimitedSuffix%2))
delimitedSuffix = delimitedSuffix//2
return binary
### Padding rule
def pad10star1(self, M, n):
"""Pad M with the pad10*1 padding rule to reach a length multiple of r bits
M: message pair (length in bits, string of hex characters ('9AFC...')
n: length in bits (must be a multiple of 8)
Example: pad10star1([60, 'BA594E0FB9EBBD03'],8) returns 'BA594E0FB9EBBD93'
"""
[my_string_length, my_string]=M
# Check the parameter n
if n%8!=0:
raise KeccakError.KeccakError("n must be a multiple of 8")
# Check the length of the provided string
if len(my_string)%2!=0:
raise KeccakError.KeccakError("there must be an even number of digits")
if my_string_length>(len(my_string)//2*8):
raise KeccakError.KeccakError("the string is too short to contain the number of bits announced")
nr_bytes_filled=my_string_length//8
nbr_bits_filled=my_string_length%8
l = my_string_length % n
if ((n-8) <= l <= (n-2)):
if (nbr_bits_filled == 0):
my_byte = 0
else:
my_byte=int(my_string[nr_bytes_filled*2:nr_bytes_filled*2+2],16)
my_byte=my_byte+2**(nbr_bits_filled)+2**7
my_byte="%02X" % my_byte
my_string=my_string[0:nr_bytes_filled*2]+my_byte
else:
if (nbr_bits_filled == 0):
my_byte = 0
else:
my_byte=int(my_string[nr_bytes_filled*2:nr_bytes_filled*2+2],16)
my_byte=my_byte+2**(nbr_bits_filled)
my_byte="%02X" % my_byte
my_string=my_string[0:nr_bytes_filled*2]+my_byte
while((8*len(my_string)//2)%n < (n-8)):
my_string=my_string+'00'
my_string = my_string+'80'
return my_string
def Keccak(self,M,r=1024,c=576,suffix=0x01,n=1024,verbose=False):
"""Compute the Keccak[r,c,d] sponge function on message M
M: message pair (length in bits, string of hex characters ('9AFC...')
r: bitrate in bits (defautl: 1024)
c: capacity in bits (default: 576)
suffix: the delimited suffix to append to all inputs (0x01 means none, 0x06 for SHA3-* and 0x1F for SHAKE*)
n: length of output in bits (default: 1024),
verbose: print the details of computations(default:False)
"""
#Check the inputs
if (r<0) or (r%8!=0):
raise KeccakError.KeccakError('r must be a multiple of 8 in this implementation')
if (n%8!=0):
raise KeccakError.KeccakError('outputLength must be a multiple of 8')
self.setB(r+c)
if verbose:
print("Create a Keccak[r=%d, c=%d] function with '%s' suffix" % (r,c,self.delimitedSuffixInBinary(suffix)))
#Compute lane length (in bits)
w=(r+c)//25
# Initialisation of state
S=[[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0],
[0,0,0,0,0]]
# Appending the suffix
M = self.appendDelimitedSuffix(M, suffix)
if verbose:
print("After appending the suffix: ", M)
#Padding of messages
P = self.pad10star1(M, r)
if verbose:
print("String ready to be absorbed: %s (will be completed by %d x '00')" % (P, c//8))
#Absorbing phase
for i in range((len(P)*8//2)//r):
Pi=self.convertStrToTable(P[i*(2*r//8):(i+1)*(2*r//8)]+'00'*(c//8))
for y in range(5):
for x in range(5):
S[x][y] = S[x][y]^Pi[x][y]
S = self.KeccakF(S, verbose)
if verbose:
print("Value after absorption : %s" % (self.convertTableToStr(S)))
#Squeezing phase
Z = ''
outputLength = n
while outputLength>0:
string=self.convertTableToStr(S)
Z = Z + string[:r*2//8]
outputLength -= r
if outputLength>0:
S = self.KeccakF(S, verbose)
# NB: done by block of length r, could have to be cut if outputLength
# is not a multiple of r
if verbose:
print("Value after squeezing : %s" % (self.convertTableToStr(S)))
return Z[:2*n//8]
| vinayps/cs205_project | code/Keccak.py | Python | gpl-3.0 | 13,828 |
# Copyright 2015 Mirantis Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ddt
from manilaclient.tests.functional import base
@ddt.ddt
class SharesMetadataReadWriteTest(base.BaseTestCase):
@classmethod
def setUpClass(cls):
super(SharesMetadataReadWriteTest, cls).setUpClass()
cls.share = cls.create_share(
client=cls.get_user_client(),
cleanup_in_class=True)
def test_set_metadata_in_share_creation(self):
md = {"key1": "value1", "key2": "value2"}
# Create share with metadata
share = self.create_share(
metadata=md, cleanup_in_class=False, client=self.get_user_client())
# Read share metadata
metadata = self.user_client.get_share_metadata(share["id"])
# Verify share metadata
self.assertEqual(2, len(metadata))
self.assertIn('key1', metadata)
self.assertIn('key2', metadata)
self.assertEqual(md['key1'], metadata['key1'])
self.assertEqual(md['key2'], metadata['key2'])
def test_set_and_get_metadata(self):
# Create share
share = self.create_share(
cleanup_in_class=False, client=self.get_user_client())
# Set share metadata
md = {"key3": "value3", "key4": "value4"}
self.user_client.set_share_metadata(share["id"], md)
# Read share metadata
metadata = self.user_client.get_share_metadata(share["id"])
# Verify share metadata
self.assertEqual(2, len(metadata))
self.assertIn('key3', metadata)
self.assertIn('key4', metadata)
self.assertEqual(md['key3'], metadata['key3'])
self.assertEqual(md['key4'], metadata['key4'])
def test_set_and_delete_metadata(self):
# Create share
share = self.create_share(
cleanup_in_class=False, client=self.get_user_client())
# Set share metadata
md = {"key3": "value3", "key4": "value4"}
self.user_client.set_share_metadata(share["id"], md)
# Unset share metadata
self.user_client.unset_share_metadata(share["id"], md.keys())
# Verify deletion of share metadata
metadata = self.user_client.get_share_metadata(share["id"])
self.assertEqual({}, metadata)
def test_set_and_add_metadata(self):
md = {'key5': 'value5'}
# Create share with metadata
share = self.create_share(
metadata=md, cleanup_in_class=False, client=self.get_user_client())
# Set share metadata
self.user_client.set_share_metadata(share["id"], {'key6': 'value6'})
self.user_client.set_share_metadata(share["id"], {'key7': 'value7'})
# Read share metadata
metadata = self.user_client.get_share_metadata(share["id"])
# Verify share metadata
self.assertEqual(3, len(metadata))
for i in (5, 6, 7):
key = 'key%s' % i
self.assertIn(key, metadata)
self.assertEqual('value%s' % i, metadata[key])
def test_set_and_replace_metadata(self):
md = {'key8': 'value8'}
# Create share with metadata
share = self.create_share(
metadata=md, cleanup_in_class=False, client=self.get_user_client())
# Set share metadata
self.user_client.set_share_metadata(share["id"], {'key9': 'value9'})
# Replace all existing share metadata
self.user_client.update_all_share_metadata(
share["id"], {'key10': 'value10'})
# Read share metadata
metadata = self.user_client.get_share_metadata(share["id"])
# Verify share metadata
self.assertEqual(1, len(metadata))
self.assertIn('key10', metadata)
self.assertEqual('value10', metadata['key10'])
@ddt.data(
{"k": "value"},
{"k" * 255: "value"},
{"key": "v"},
{"key": "v" * 1023})
def test_set_metadata_min_max_sizes_of_keys_and_values(self, metadata):
# Set share metadata
self.user_client.set_share_metadata(self.share["id"], metadata)
# Read share metadata
get = self.user_client.get_share_metadata(self.share["id"])
# Verify share metadata
key = metadata.keys()[0]
self.assertIn(key, get)
self.assertEqual(metadata[key], get[key])
@ddt.data(
{"k": "value"},
{"k" * 255: "value"},
{"key": "v"},
{"key": "v" * 1023})
def test_update_metadata_min_max_sizes_of_keys_and_values(self, metadata):
# Update share metadata
self.user_client.update_all_share_metadata(self.share["id"], metadata)
# Read share metadata
get = self.user_client.get_share_metadata(self.share["id"])
# Verify share metadata
self.assertEqual(len(metadata), len(get))
for key in metadata:
self.assertIn(key, get)
self.assertEqual(metadata[key], get[key])
| sniperganso/python-manilaclient | manilaclient/tests/functional/test_shares_metadata.py | Python | apache-2.0 | 5,486 |
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Tests for the functional saver."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.core.protobuf import config_pb2
from tensorflow.python.eager import test
from tensorflow.python.eager import wrap_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.platform import gfile
from tensorflow.python.training.saving import functional_saver
from tensorflow.python.training.saving import saveable_object_util
class SaverTest(test.TestCase):
@test_util.run_in_graph_and_eager_modes
def test_resource_variable(self):
v1 = resource_variable_ops.ResourceVariable(2.)
self.evaluate(v1.initializer)
saver = functional_saver._SingleDeviceSaver(
saveable_object_util.saveable_objects_for_op(v1, "x"))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(saver.save(constant_op.constant(prefix)))
self.assertEqual(2, len(gfile.Glob(prefix + "*")))
self.evaluate(v1.assign(1.))
self.evaluate(saver.restore(prefix))
self.assertEqual(2., self.evaluate(v1))
v2 = resource_variable_ops.ResourceVariable(3.)
self.evaluate(v2.initializer)
second_saver = functional_saver._SingleDeviceSaver(
saveable_object_util.saveable_objects_for_op(v2, "x"))
self.evaluate(second_saver.restore(prefix))
self.assertEqual(2., self.evaluate(v2))
def test_to_proto(self):
v1 = resource_variable_ops.ResourceVariable(2.)
saver = functional_saver.MultiDeviceSaver(
saveable_object_util.saveable_objects_for_op(v1, "x"))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
proto_accumulator = []
wrapped = wrap_function.wrap_function(
lambda: proto_accumulator.append(saver.to_proto()), signature=())
self.assertEqual(1, len(proto_accumulator))
proto = proto_accumulator[0]
save = wrapped.prune(
feeds=wrapped.graph.get_tensor_by_name(proto.filename_tensor_name),
fetches=wrapped.graph.get_tensor_by_name(proto.save_tensor_name))
restore = wrapped.prune(
feeds=wrapped.graph.get_tensor_by_name(proto.filename_tensor_name),
fetches=wrapped.graph.get_operation_by_name(proto.restore_op_name))
save_path = save(constant_op.constant(prefix))
v1.assign(1.)
restore(constant_op.constant(save_path))
self.assertEqual(2., self.evaluate(v1))
v2 = resource_variable_ops.ResourceVariable(3.)
second_saver = functional_saver.MultiDeviceSaver(
saveable_object_util.saveable_objects_for_op(v2, "x"))
second_saver.restore(save_path)
self.assertEqual(2., self.evaluate(v2))
@test_util.run_v1_only(
"Needs an API to setup multiple devices, b/124805129")
# Set up multiple devices when graph building. Before test.main() we configure
# the devices for eager execution.
@test_util.run_in_graph_and_eager_modes(
config=config_pb2.ConfigProto(device_count={"CPU": 3}))
def test_checkpoint_is_sharded_by_device(self):
with ops.device("cpu:0"):
v0 = resource_variable_ops.ResourceVariable(0.)
with ops.device("cpu:1"):
v1 = resource_variable_ops.ResourceVariable(1.)
with ops.device("cpu:2"):
v2 = resource_variable_ops.ResourceVariable(2.)
self.evaluate([v0.initializer, v1.initializer, v2.initializer])
saver = functional_saver.MultiDeviceSaver(
list(saveable_object_util.saveable_objects_for_op(v0, "v0"))
+ list(saveable_object_util.saveable_objects_for_op(v1, "v1"))
+ list(saveable_object_util.saveable_objects_for_op(v2, "v2")))
prefix = os.path.join(self.get_temp_dir(), "ckpt")
self.evaluate(saver.save(constant_op.constant(prefix)))
self.assertEqual(4, len(gfile.Glob(prefix + "*")))
self.evaluate(v0.assign(-1.))
self.evaluate(v1.assign(-1.))
self.evaluate(v2.assign(-1.))
self.evaluate(saver.restore(constant_op.constant(prefix)))
self.assertEqual(0., self.evaluate(v0))
self.assertEqual(1., self.evaluate(v1))
self.assertEqual(2., self.evaluate(v2))
if __name__ == "__main__":
ops.enable_eager_execution(
config=config_pb2.ConfigProto(device_count={"CPU": 3}))
test.main()
| ghchinoy/tensorflow | tensorflow/python/training/saving/functional_saver_test.py | Python | apache-2.0 | 5,040 |
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
import json
from c7n.exceptions import PolicyValidationError
class StructureParser:
"""Provide fast validation and inspection of a policy file.
Intent is to provide more humane validation for top level errors
instead of printing full schema as error message.
"""
allowed_file_keys = {'vars', 'policies'}
required_policy_keys = {'name', 'resource'}
allowed_policy_keys = {'name', 'resource', 'title', 'description', 'mode',
'tags', 'max-resources', 'metadata', 'query',
'filters', 'actions', 'source', 'conditions',
# legacy keys subject to deprecation.
'region', 'start', 'end', 'tz', 'max-resources-percent',
'comments', 'comment'}
def validate(self, data):
if not isinstance(data, dict):
raise PolicyValidationError((
"Policy file top level data structure "
"should be a mapping/dict, instead found:%s") % (
type(data).__name__))
dkeys = set(data.keys())
extra = dkeys.difference(self.allowed_file_keys)
if extra:
raise PolicyValidationError((
'Policy files top level keys are %s, found extra: %s' % (
', '.join(self.allowed_file_keys),
', '.join(extra))))
if 'policies' not in data:
raise PolicyValidationError("`policies` list missing")
pdata = data.get('policies', [])
if not isinstance(pdata, list):
raise PolicyValidationError((
'`policies` key should be an array/list found: %s' % (
type(pdata).__name__)))
for p in pdata:
self.validate_policy(p)
def validate_policy(self, p):
if not isinstance(p, dict):
raise PolicyValidationError((
'policy must be a dictionary/mapping found:%s policy:\n %s' % (
type(p).__name__, json.dumps(p, indent=2))))
pkeys = set(p)
if self.required_policy_keys.difference(pkeys):
raise PolicyValidationError(
'policy missing required keys (name, resource) data:\n %s' % (
json.dumps(p, indent=2)))
if pkeys.difference(self.allowed_policy_keys):
raise PolicyValidationError(
'policy:%s has unknown keys: %s' % (
p['name'], ','.join(pkeys.difference(self.allowed_policy_keys))))
if not isinstance(p.get('filters', []), (list, type(None))):
raise PolicyValidationError((
'policy:%s must use a list for filters found:%s' % (
p['name'], type(p['filters']).__name__)))
element_types = (dict, str)
for f in p.get('filters', ()):
if not isinstance(f, element_types):
raise PolicyValidationError((
'policy:%s filter must be a mapping/dict found:%s' % (
p.get('name', 'unknown'), type(f).__name__)))
if not isinstance(p.get('actions', []), (list, type(None))):
raise PolicyValidationError((
'policy:%s must use a list for actions found:%s' % (
p.get('name', 'unknown'), type(p['actions']).__name__)))
for a in p.get('actions', ()):
if not isinstance(a, element_types):
raise PolicyValidationError((
'policy:%s action must be a mapping/dict found:%s' % (
p.get('name', 'unknown'), type(a).__name__)))
def get_resource_types(self, data):
resources = set()
for p in data.get('policies', []):
rtype = p['resource']
if '.' not in rtype:
rtype = 'aws.%s' % rtype
resources.add(rtype)
return resources
| thisisshi/cloud-custodian | c7n/structure.py | Python | apache-2.0 | 3,895 |
from __future__ import unicode_literals, absolute_import
from .token import Token
# Math
ADD = Token('+')
SUB = Token('-')
MUL = Token('*')
DIV = Token('/')
MOD = Token('%')
POW = Token('^')
# Comparison
EQ = Token('=')
NEQ = Token('<>')
LT = Token('<')
GT = Token('>')
LTE = Token('<=')
GTE = Token('>=')
# Boolean
AND = Token('AND')
OR = Token('OR')
XOR = Token('XOR')
NOT = Token('NOT')
# Regex
REGEXP = Token('=~')
# String/collection
CONCAT = Token('+')
# Collection
IN = Token('IN')
# Operator sets
math = {ADD, SUB, MUL, DIV, MOD, POW}
comparison = {EQ, NEQ, LT, GT, LTE, GTE}
boolean = {AND, OR, XOR, NOT}
regexp = {REGEXP}
string = {CONCAT}
collection = {CONCAT, IN}
# All operators
operators = math | comparison | boolean | regexp | string | collection
| bruth/cypher | cypher/operators.py | Python | bsd-2-clause | 772 |
# Author: Denis A. Engemann <d.engemann@gmail.com>
#
# License: BSD (3-clause)
import os.path as op
from nose.tools import assert_equal, assert_raises
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn
from mne.decoding import compute_ems
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
curdir = op.join(op.dirname(__file__))
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_sklearn
def test_ems():
"""Test event-matched spatial filters"""
raw = io.read_raw_fif(raw_fname, preload=False)
# create unequal number of events
events = read_events(event_name)
events[-2, 2] = 3
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
assert_raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
epochs = epochs.equalize_event_counts(epochs.event_id, copy=False)[0]
assert_raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
surrogates, filters, conditions = compute_ems(epochs)
assert_equal(list(set(conditions)), [1, 3])
events = read_events(event_name)
event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs = epochs.equalize_event_counts(epochs.event_id, copy=False)[0]
n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
assert_raises(ValueError, compute_ems, epochs)
surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
assert_equal(n_expected, len(surrogates))
assert_equal(n_expected, len(conditions))
assert_equal(list(set(conditions)), [2, 3])
raw.close()
| wronk/mne-python | mne/decoding/tests/test_ems.py | Python | bsd-3-clause | 2,002 |
# This file is part of pybgen.
#
# The MIT License (MIT)
#
# Copyright (c) 2017 Louis-Philippe Lemieux Perreault
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import random
import unittest
import numpy as np
from pkg_resources import resource_filename
from .. import pybgen
from .truths import truths
__all__ = ["reader_tests"]
class ReaderTests(unittest.TestCase):
def setUp(self):
# Getting the truth for this file
self.truths = truths["dosage"][self.truth_filename]
# Reading the BGEN files
bgen_fn = resource_filename(__name__, self.bgen_filename)
self.bgen = pybgen.PyBGEN(bgen_fn)
def tearDown(self):
# Closing the object
self.bgen.close()
def _compare_variant(self, expected, observed):
"""Compare two variants."""
self.assertEqual(expected.name, observed.name)
self.assertEqual(expected.chrom, observed.chrom)
self.assertEqual(expected.pos, observed.pos)
self.assertEqual(expected.a1, observed.a1)
self.assertEqual(expected.a2, observed.a2)
def test_check_returned_value(self):
"""Tests the module is returning dosage data."""
self.assertFalse(self.bgen._return_probs)
def test_repr(self):
"""Tests the __repr__ representation."""
self.assertEqual(
"PyBGEN({:,d} samples; {:,d} variants)".format(
self.truths["nb_samples"],
self.truths["nb_variants"],
),
str(self.bgen),
)
def test_nb_samples(self):
"""Tests the number of samples."""
self.assertEqual(self.truths["nb_samples"], self.bgen.nb_samples)
def test_nb_variants(self):
"""Tests the number of variants."""
self.assertEqual(self.truths["nb_variants"], self.bgen.nb_variants)
def test_samples(self):
"""Tests the samples attribute."""
if self.truths["samples"] is None:
self.assertTrue(self.bgen.samples is None)
else:
self.assertEqual(self.truths["samples"], self.bgen.samples)
def test_get_first_variant(self):
"""Tests getting the first variant of the file."""
# The variant to retrieve
name = "RSID_2"
# Getting the results (there should be only one
r = self.bgen.get_variant(name)
self.assertEqual(1, len(r))
variant, dosage = r.pop()
# Checking the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Checking the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
def test_get_middle_variant(self):
"""Tests getting a variant in the middle of the file."""
# The variant to retrieve
name = "RSID_148"
# Getting the results (there should be only one
r = self.bgen.get_variant(name)
self.assertEqual(1, len(r))
variant, dosage = r.pop()
# Checking the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Checking the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
def test_get_last_variant(self):
"""Tests getting the last variant of the file."""
# The variant to retrieve
name = "RSID_200"
# Getting the results (there should be only one
r = self.bgen.get_variant(name)
self.assertEqual(1, len(r))
variant, dosage = r.pop()
# Checking the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Checking the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
def test_get_missing_variant(self):
"""Tests getting a variant which is absent from the BGEN file."""
with self.assertRaises(ValueError) as cm:
self.bgen.get_variant("UNKOWN_VARIANT_NAME")
self.assertEqual(
"UNKOWN_VARIANT_NAME: name not found",
str(cm.exception),
)
def test_iter_all_variants(self):
"""Tests the iteration of all variants."""
seen_variants = set()
for variant, dosage in self.bgen.iter_variants():
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"],
variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
self.assertEqual(seen_variants, self.truths["variant_set"])
def test_as_iterator(self):
"""Tests the module as iterator."""
seen_variants = set()
for variant, dosage in self.bgen:
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
self.assertEqual(seen_variants, self.truths["variant_set"])
def test_iter_variant_info(self):
"""Tests the iteration of all variants' information."""
seen_variants = set()
for variant in self.bgen.iter_variant_info():
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Checking if we checked all variants
self.assertEqual(seen_variants, self.truths["variant_set"])
def test_iter_variants_in_region(self):
"""Tests the iteration of all variants in a genomic region."""
seen_variants = set()
iterator = self.bgen.iter_variants_in_region("01", 67000, 70999)
for variant, dosage in iterator:
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
expected = set()
for name in self.truths["variant_set"]:
variant = self.truths["variants"][name]["variant"]
if variant.chrom == "01":
if variant.pos >= 67000 and variant.pos <= 70999:
expected.add(name)
self.assertEqual(seen_variants, expected)
def test_get_specific_variant(self):
"""Test for specific variant lookup."""
seen_variants = set()
iterator = self.bgen.get_specific_variant("01", 67000, "A", "G")
for variant, dosage in iterator:
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
expected = set()
for name in self.truths["variant_set"]:
variant = self.truths["variants"][name]["variant"]
if variant.chrom == "01":
if variant.pos == 67000:
expected.add(name)
self.assertEqual(seen_variants, expected)
def test_get_missing_specific_variant(self):
"""Tests getting a specific variant which is absent from the file."""
with self.assertRaises(ValueError) as cm:
self.bgen.get_specific_variant("01", 67000, "A", "T")
self.assertEqual(
"01:67000 A/T: variant not found",
str(cm.exception),
)
def test_iter_seeks(self):
"""Tests the _iter_seeks function."""
# Fetching random seeks from the index
self.bgen._bgen_index.execute(
"SELECT rsid, file_start_position FROM Variant"
)
seeks = random.sample(self.bgen._bgen_index.fetchall(), 5)
seen_variants = set()
iterator = self.bgen._iter_seeks([_[1] for _ in seeks])
for variant, dosage in iterator:
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"], variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
self.assertEqual(seen_variants, {_[0] for _ in seeks})
def test_iter_variants_by_name(self):
"""Tests the iteration of variants by name."""
# Fetching random variants in the index
self.bgen._bgen_index.execute("SELECT rsid FROM Variant")
names = [
_[0] for _ in random.sample(self.bgen._bgen_index.fetchall(), 5)
]
seen_variants = set()
iterator = self.bgen.iter_variants_by_names(names)
for variant, dosage in iterator:
# The name of the variant
name = variant.name
seen_variants.add(name)
# Comparing the variant
self._compare_variant(
self.truths["variants"][name]["variant"],
variant,
)
# Comparing the dosage
np.testing.assert_array_almost_equal(
self.truths["variants"][name]["data"], dosage,
)
# Checking if we checked all variants
self.assertEqual(seen_variants, set(names))
class ProbsReaderTests(ReaderTests):
def setUp(self):
# Getting the truth for this file
self.truths = truths["probs"][self.truth_filename]
# Reading the BGEN files
bgen_fn = resource_filename(__name__, self.bgen_filename)
self.bgen = pybgen.PyBGEN(bgen_fn, probs_only=True)
def test_check_returned_value(self):
"""Tests the module is returning probability data."""
self.assertTrue(self.bgen._return_probs)
class Test32bits(ReaderTests):
bgen_filename = os.path.join("data", "example.32bits.bgen")
truth_filename = "example.32bits.truths.txt.bz2"
class Test32bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.32bits.bgen")
truth_filename = "example.32bits.probs.truths.txt.bz2"
class Test24bits(ReaderTests):
bgen_filename = os.path.join("data", "example.24bits.bgen")
truth_filename = "example.24bits.truths.txt.bz2"
class Test24bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.24bits.bgen")
truth_filename = "example.24bits.probs.truths.txt.bz2"
class Test16bits(ReaderTests):
bgen_filename = os.path.join("data", "example.16bits.bgen")
truth_filename = "example.16bits.truths.txt.bz2"
class Test16bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.16bits.bgen")
truth_filename = "example.16bits.probs.truths.txt.bz2"
@unittest.skipIf(not pybgen.HAS_ZSTD, "module 'zstandard' not installed")
class Test16bitsZstd(ReaderTests):
bgen_filename = os.path.join("data", "example.16bits.zstd.bgen")
truth_filename = "example.16bits.zstd.truths.txt.bz2"
@unittest.skipIf(not pybgen.HAS_ZSTD, "module 'zstandard' not installed")
class Test16bitsZstdProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.16bits.zstd.bgen")
truth_filename = "example.16bits.zstd.probs.truths.txt.bz2"
class Test9bits(ReaderTests):
bgen_filename = os.path.join("data", "example.9bits.bgen")
truth_filename = "example.9bits.truths.txt.bz2"
class Test9bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.9bits.bgen")
truth_filename = "example.9bits.probs.truths.txt.bz2"
class Test8bits(ReaderTests):
bgen_filename = os.path.join("data", "example.8bits.bgen")
truth_filename = "example.8bits.truths.txt.bz2"
class Test8bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.8bits.bgen")
truth_filename = "example.8bits.probs.truths.txt.bz2"
class Test3bits(ReaderTests):
bgen_filename = os.path.join("data", "example.3bits.bgen")
truth_filename = "example.3bits.truths.txt.bz2"
class Test3bitsProbs(ProbsReaderTests):
bgen_filename = os.path.join("data", "example.3bits.bgen")
truth_filename = "example.3bits.probs.truths.txt.bz2"
class TestLayout1(ReaderTests):
bgen_filename = os.path.join("data", "cohort1.bgen")
truth_filename = "cohort1.truths.txt.bz2"
class TestLayout1Probs(ProbsReaderTests):
bgen_filename = os.path.join("data", "cohort1.bgen")
truth_filename = "cohort1.probs.truths.txt.bz2"
reader_tests = (
Test32bits, Test24bits, Test16bits, Test16bitsZstd, Test9bits, Test8bits,
Test3bits, TestLayout1, Test32bitsProbs, Test24bitsProbs, Test16bitsProbs,
Test16bitsZstdProbs, Test9bitsProbs, Test8bitsProbs, Test3bitsProbs,
TestLayout1Probs,
)
| lemieuxl/pybgen | pybgen/tests/test_pybgen.py | Python | mit | 15,033 |
# -*- coding: utf-8 -*-
#Copyright (c) 2007, Playful Invention Company
#Copyright (c) 2008-10, Walter Bender
#Copyright (c) 2009-10 Raúl Gutiérrez Segalés
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#The above copyright notice and this permission notice shall be included in
#all copies or substantial portions of the Software.
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
import pygtk
pygtk.require('2.0')
import gtk
import gobject
import os
import os.path
from math import atan2, pi
DEGTOR = 2 * pi / 360
import locale
from gettext import gettext as _
try:
from sugar.graphics.objectchooser import ObjectChooser
from sugar.datastore import datastore
from sugar import profile
except ImportError:
pass
from taconstants import HORIZONTAL_PALETTE, VERTICAL_PALETTE, BLOCK_SCALE, \
PALETTE_NAMES, TITLEXY, MEDIA_SHAPES, STATUS_SHAPES, \
OVERLAY_SHAPES, TOOLBAR_SHAPES, TAB_LAYER, RETURN, \
OVERLAY_LAYER, CATEGORY_LAYER, BLOCKS_WITH_SKIN, \
ICON_SIZE, PALETTES, PALETTE_SCALE, BOX_STYLE_MEDIA, \
PALETTE_WIDTH, MACROS, TOP_LAYER, BLOCK_LAYER, \
CONTENT_BLOCKS, DEFAULTS, SPECIAL_NAMES, \
HELP_STRINGS, CURSOR, EXPANDABLE, COLLAPSIBLE, \
DEAD_DICTS, DEAD_KEYS, TEMPLATES, PYTHON_SKIN, \
PALETTE_HEIGHT, STATUS_LAYER, OLD_DOCK, OLD_NAMES, \
BOOLEAN_STYLE, BLOCK_NAMES, DEFAULT_TURTLE, \
TURTLE_LAYER, EXPANDABLE_BLOCKS, COMPARE_STYLE, \
BOOLEAN_STYLE, EXPANDABLE_ARGS, NUMBER_STYLE, \
NUMBER_STYLE_PORCH, NUMBER_STYLE_BLOCK, \
NUMBER_STYLE_VAR_ARG
from talogo import LogoCode, stop_logo
from tacanvas import TurtleGraphics
from tablock import Blocks, Block
from taturtle import Turtles, Turtle
from tautils import magnitude, get_load_name, get_save_name, data_from_file, \
data_to_file, round_int, get_id, get_pixbuf_from_journal, \
movie_media_type, audio_media_type, image_media_type, \
save_picture, save_svg, calc_image_size, get_path, \
reset_stack_arm, grow_stack_arm, find_sandwich_top, \
find_sandwich_bottom, restore_stack, collapse_stack, \
collapsed, collapsible, hide_button_hit, show_button_hit, \
arithmetic_check, xy, find_block_to_run, find_top_block, \
find_start_stack, find_group, find_blk_below, olpc_xo_1, \
dock_dx_dy, data_to_string, journal_check, chooser
from tasprite_factory import SVG, svg_str_to_pixbuf, svg_from_file
from sprites import Sprites, Sprite
import logging
_logger = logging.getLogger('turtleart-activity')
class TurtleArtWindow():
""" TurtleArt Window class abstraction """
timeout_tag = [0]
def __init__(self, win, path, parent=None, mycolors=None, mynick=None):
self._loaded_project = ''
self.win = None
self.parent = parent
if type(win) == gtk.DrawingArea:
self.interactive_mode = True
self.window = win
self.window.set_flags(gtk.CAN_FOCUS)
if self.parent is not None:
self.parent.show_all()
self.running_sugar = True
else:
self.window.show_all()
self.running_sugar = False
self.area = self.window.window
self.gc = self.area.new_gc()
self._setup_events()
elif type(win) == gtk.gdk.Pixmap:
self.interactive_mode = False
self.window = win
self.running_sugar = False
self.gc = self.window.new_gc()
else:
_logger.debug("bad win type %s" % (type(win)))
if self.running_sugar:
self.activity = parent
self.nick = profile.get_nick_name()
else:
self.activity = None
self.nick = None
self.path = path
self.load_save_folder = os.path.join(path, 'samples')
self.save_folder = None
self.save_file_name = None
self.width = gtk.gdk.screen_width()
self.height = gtk.gdk.screen_height()
self.rect = gtk.gdk.Rectangle(0, 0, 0, 0)
self.keypress = ''
self.keyvalue = 0
self.dead_key = ''
self.mouse_flag = 0
self.mouse_x = 0
self.mouse_y = 0
locale.setlocale(locale.LC_NUMERIC, '')
self.decimal_point = locale.localeconv()['decimal_point']
if self.decimal_point == '' or self.decimal_point is None:
self.decimal_point = '.'
self.orientation = HORIZONTAL_PALETTE
if olpc_xo_1():
self.lead = 1.0
self.scale = 0.67
self.color_mode = '565'
if self.running_sugar and not self.activity.new_sugar_system:
self.orientation = VERTICAL_PALETTE
else:
self.lead = 1.0
self.scale = 1.0
self.color_mode = '888' # TODO: Read visual mode from gtk image
self.block_scale = BLOCK_SCALE
self.trash_scale = 0.5
self.myblock = None
self.nop = 'nop'
self.loaded = 0
self.step_time = 0
self.hide = False
self.palette = True
self.coord_scale = 1
self.buddies = []
self.saved_string = ''
self.dx = 0
self.dy = 0
self.media_shapes = {}
self.cartesian = False
self.polar = False
self.overlay_shapes = {}
self.toolbar_shapes = {}
self.toolbar_offset = 0
self.status_spr = None
self.status_shapes = {}
self.toolbar_spr = None
self.palette_sprs = []
self.palettes = []
self.palette_button = []
self.trash_index = PALETTE_NAMES.index('trash')
self.trash_stack = []
self.selected_palette = None
self.previous_palette = None
self.selectors = []
self.selected_selector = None
self.previous_selector = None
self.selector_shapes = []
self.selected_blk = None
self.selected_spr = None
self.drag_group = None
self.drag_turtle = 'move', 0, 0
self.drag_pos = 0, 0
self.paste_offset = 20
self.block_list = Blocks(font_scale_factor=self.scale,
decimal_point=self.decimal_point)
if self.interactive_mode:
self.sprite_list = Sprites(self.window, self.area, self.gc)
else:
self.sprite_list = None
self.turtles = Turtles(self.sprite_list)
if mynick is None:
self.default_turtle_name = DEFAULT_TURTLE
else:
self.default_turtle_name = mynick
if mycolors is None:
Turtle(self.turtles, self.default_turtle_name)
else:
Turtle(self.turtles, self.default_turtle_name, mycolors.split(','))
self.active_turtle = self.turtles.get_turtle(self.default_turtle_name)
self.saving_svg = False
self.svg_string = ''
self.selected_turtle = None
self.canvas = TurtleGraphics(self, self.width, self.height)
self.titlex = -(self.canvas.width * TITLEXY[0]) / \
(self.coord_scale * 2)
self.leftx = -(self.canvas.width * TITLEXY[0]) / \
(self.coord_scale * 2)
self.rightx = 0
self.titley = (self.canvas.height * TITLEXY[1]) / \
(self.coord_scale * 2)
self.topy = (self.canvas.height * (TITLEXY[1] - 0.125)) / \
(self.coord_scale * 2)
self.bottomy = 0
self.lc = LogoCode(self)
self.saved_pictures = []
if self.interactive_mode:
self._setup_misc()
self._show_toolbar_palette(0, False)
self.block_operation = ''
def _setup_events(self):
""" Register the events we listen to. """
self.window.add_events(gtk.gdk.BUTTON_PRESS_MASK)
self.window.add_events(gtk.gdk.BUTTON_RELEASE_MASK)
self.window.add_events(gtk.gdk.POINTER_MOTION_MASK)
self.window.add_events(gtk.gdk.KEY_PRESS_MASK)
self.window.connect("expose-event", self._expose_cb)
self.window.connect("button-press-event", self._buttonpress_cb)
self.window.connect("button-release-event", self._buttonrelease_cb)
self.window.connect("motion-notify-event", self._move_cb)
self.window.connect("key_press_event", self._keypress_cb)
def _setup_misc(self):
""" Misc. sprites for status, overlays, etc. """
# media blocks get positioned into other blocks
for _name in MEDIA_SHAPES:
if _name[0:7] == 'journal' and not self.running_sugar:
file_name = 'file' + _name[7:]
else:
file_name = _name
self.media_shapes[_name] = svg_str_to_pixbuf(svg_from_file(
"%s/images/%s.svg" % (self.path, file_name)))
for i, _name in enumerate(STATUS_SHAPES):
self.status_shapes[_name] = svg_str_to_pixbuf(svg_from_file(
"%s/images/%s.svg" % (self.path, _name)))
self.status_spr = Sprite(self.sprite_list, 0, self.height - 200,
self.status_shapes['status'])
self.status_spr.hide()
self.status_spr.type = 'status'
for _name in OVERLAY_SHAPES:
self.overlay_shapes[_name] = Sprite(self.sprite_list,
int(self.width / 2 - 600),
int(self.height / 2 - 450),
svg_str_to_pixbuf(
svg_from_file("%s/images/%s.svg" % (self.path, _name))))
self.overlay_shapes[_name].hide()
self.overlay_shapes[_name].type = 'overlay'
if not self.running_sugar:
offset = self.width - 55 * len(TOOLBAR_SHAPES)
for i, _name in enumerate(TOOLBAR_SHAPES):
self.toolbar_shapes[_name] = Sprite(self.sprite_list,
i * 55 + offset, 0,
svg_str_to_pixbuf(
svg_from_file("%s/icons/%s.svg" % (self.path, _name))))
self.toolbar_shapes[_name].set_layer(TAB_LAYER)
self.toolbar_shapes[_name].name = _name
self.toolbar_shapes[_name].type = 'toolbar'
self.toolbar_shapes['stopiton'].hide()
def sharing(self):
""" Is a chattube available for share? """
if self.running_sugar and hasattr(self.activity, 'chattube') and\
self.activity.chattube is not None:
return True
return False
def is_project_empty(self):
""" Check to see if project has any blocks in use """
return len(self.just_blocks()) == 1
def _expose_cb(self, win, event):
""" Repaint """
self.sprite_list.refresh(event)
return True
def eraser_button(self):
""" Eraser_button (hide status block when clearing the screen.) """
if self.status_spr is not None:
self.status_spr.hide()
self.lc.prim_clear()
self.display_coordinates()
def run_button(self, time):
""" Run turtle! """
if self.running_sugar:
self.activity.recenter()
# Look for a 'start' block
for blk in self.just_blocks():
if find_start_stack(blk):
self.step_time = time
_logger.debug("running stack starting from %s" % (blk.name))
self._run_stack(blk)
return
# If there is no 'start' block, run stacks that aren't 'def action'
for blk in self.just_blocks():
if find_block_to_run(blk):
self.step_time = time
_logger.debug("running stack starting from %s" % (blk.name))
self._run_stack(blk)
return
def stop_button(self):
""" Stop button """
stop_logo(self)
def set_userdefined(self):
""" Change icon for user-defined blocks after loading Python code. """
for blk in self.just_blocks():
if blk.name in PYTHON_SKIN:
x, y = self._calc_image_offset('pythonon', blk.spr)
blk.set_image(self.media_shapes['pythonon'], x, y)
self._resize_skin(blk)
self.nop = 'pythonloaded'
def set_fullscreen(self):
""" Enter fullscreen mode """
if self.running_sugar:
self.activity.fullscreen()
self.activity.recenter()
def set_cartesian(self, flag):
""" Turn on/off Cartesian coordinates """
if flag:
if self.coord_scale == 1:
self.overlay_shapes['Cartesian_labeled'].set_layer(
OVERLAY_LAYER)
else:
self.overlay_shapes['Cartesian'].set_layer(OVERLAY_LAYER)
self.cartesian = True
else:
if self.coord_scale == 1:
self.overlay_shapes['Cartesian_labeled'].hide()
else:
self.overlay_shapes['Cartesian'].hide()
self.cartesian = False
def set_polar(self, flag):
""" Turn on/off polar coordinates """
if flag:
self.overlay_shapes['polar'].set_layer(OVERLAY_LAYER)
self.polar = True
else:
self.overlay_shapes['polar'].hide()
self.polar = False
def update_overlay_position(self, widget, event):
""" Reposition the overlays when window size changes """
self.width = event.width
self.height = event.height
for _name in OVERLAY_SHAPES:
shape = self.overlay_shapes[_name]
showing = False
if shape in shape._sprites.list:
shape.hide()
showing = True
self.overlay_shapes[_name] = Sprite(self.sprite_list,
int(self.width / 2 - 600),
int(self.height / 2 - 450),
svg_str_to_pixbuf(
svg_from_file("%s/images/%s.svg" % (self.path, _name))))
if showing:
self.overlay_shapes[_name].set_layer(OVERLAY_LAYER)
else:
self.overlay_shapes[_name].hide()
self.overlay_shapes[_name].type = 'overlay'
self.cartesian = False
self.polar = False
self.canvas.width = self.width
self.canvas.height = self.height
self.canvas.move_turtle()
def hideshow_button(self):
""" Hide/show button """
if not self.hide:
for blk in self.just_blocks():
blk.spr.hide()
self.hide_palette()
self.hide = True
else:
for blk in self.just_blocks():
if blk.status != 'collapsed':
blk.spr.set_layer(BLOCK_LAYER)
self.show_palette()
if self.activity is not None and self.activity.new_sugar_system:
self.activity.palette_buttons[0].set_icon(
PALETTE_NAMES[0] + 'on')
self.hide = False
if self.running_sugar:
self.activity.recenter()
self.canvas.canvas.inval()
def hideshow_palette(self, state):
""" Hide or show palette """
if not state:
self.palette = False
if self.running_sugar:
self.activity.do_hidepalette()
self.hide_palette()
else:
self.palette = True
if self.running_sugar:
self.activity.do_showpalette()
self.activity.recenter()
self.show_palette()
def show_palette(self, n=0):
""" Show palette """
self._show_toolbar_palette(n)
self.palette_button[self.orientation].set_layer(TAB_LAYER)
self.palette_button[2].set_layer(TAB_LAYER)
if self.activity is None or not self.activity.new_sugar_system:
self.toolbar_spr.set_layer(CATEGORY_LAYER)
self.palette = True
def hide_palette(self):
""" Hide the palette. """
self._hide_toolbar_palette()
self.palette_button[self.orientation].hide()
self.palette_button[2].hide()
if self.activity is None or not self.activity.new_sugar_system:
self.toolbar_spr.hide()
self.palette = False
def hideblocks(self):
""" Callback from 'hide blocks' block """
if not self.interactive_mode:
return
self.hide = False
self.hideshow_button()
if self.running_sugar:
self.activity.do_hide()
def showblocks(self):
""" Callback from 'show blocks' block """
if not self.interactive_mode:
return
self.hide = True
self.hideshow_button()
if self.running_sugar:
self.activity.do_show()
def resize_blocks(self, blocks=None):
""" Resize blocks or if blocks is None, all of the blocks """
if blocks is None:
blocks = self.just_blocks()
# We need to restore collapsed stacks before resizing.
for blk in blocks:
if blk.status == 'collapsed':
bot = find_sandwich_bottom(blk)
if collapsed(bot):
dy = bot.values[0]
restore_stack(find_sandwich_top(blk))
bot.values[0] = dy
# Do the resizing.
for blk in blocks:
blk.rescale(self.block_scale)
for blk in blocks:
self._adjust_dock_positions(blk)
# Re-collapsed stacks after resizing.
for blk in blocks:
if collapsed(blk):
collapse_stack(find_sandwich_top(blk))
for blk in blocks:
if blk.name in ['sandwichtop', 'sandwichtop_no_label']:
grow_stack_arm(blk)
# Resize the skins on some blocks: media content and Python
for blk in blocks:
if blk.name in BLOCKS_WITH_SKIN:
self._resize_skin(blk)
def _show_toolbar_palette(self, n, init_only=False):
""" Show the toolbar palettes, creating them on init_only """
if (self.activity is None or not self.activity.new_sugar_system) and\
self.selectors == []:
# Create the selectors
svg = SVG()
x, y = 50, 0
for i, name in enumerate(PALETTE_NAMES):
a = svg_str_to_pixbuf(svg_from_file("%s/icons/%soff.svg" % (
self.path, name)))
b = svg_str_to_pixbuf(svg_from_file("%s/icons/%son.svg" % (
self.path, name)))
self.selector_shapes.append([a, b])
self.selectors.append(Sprite(self.sprite_list, x, y, a))
self.selectors[i].type = 'selector'
self.selectors[i].name = name
self.selectors[i].set_layer(TAB_LAYER)
w = self.selectors[i].get_dimensions()[0]
x += int(w)
# Create the toolbar background
self.toolbar_offset = ICON_SIZE
self.toolbar_spr = Sprite(self.sprite_list, 0, 0,
svg_str_to_pixbuf(svg.toolbar(self.width, ICON_SIZE)))
self.toolbar_spr.type = 'toolbar'
self.toolbar_spr.set_layer(CATEGORY_LAYER)
if self.palette_sprs == []:
# Create the empty palettes
if len(self.palettes) == 0:
for i in range(len(PALETTES)):
self.palettes.append([])
# Create empty palette backgrounds
for i in PALETTE_NAMES:
self.palette_sprs.append([None, None])
# Create the palette orientation button
self.palette_button.append(Sprite(self.sprite_list, 0,
self.toolbar_offset, svg_str_to_pixbuf(svg_from_file(
"%s/images/palettehorizontal.svg" % (self.path)))))
self.palette_button.append(Sprite(self.sprite_list, 0,
self.toolbar_offset, svg_str_to_pixbuf(svg_from_file(
"%s/images/palettevertical.svg" % (self.path)))))
self.palette_button[0].name = _('orientation')
self.palette_button[1].name = _('orientation')
self.palette_button[0].type = 'palette'
self.palette_button[1].type = 'palette'
self.palette_button[self.orientation].set_layer(TAB_LAYER)
self.palette_button[1 - self.orientation].hide()
# Create the palette next button
self.palette_button.append(Sprite(self.sprite_list, 16,
self.toolbar_offset, svg_str_to_pixbuf(svg_from_file(
"%s/images/palettenext.svg" % (self.path)))))
self.palette_button[2].name = _('next')
self.palette_button[2].type = 'palette'
self.palette_button[2].set_layer(TAB_LAYER)
if init_only:
return
# Hide the previously displayed palette
self._hide_previous_palette()
self.selected_palette = n
self.previous_palette = self.selected_palette
if self.activity is None or not self.activity.new_sugar_system:
self.selected_selector = self.selectors[n]
# Make sure all of the selectors are visible.
self.selectors[n].set_shape(self.selector_shapes[n][1])
for i in range(len(PALETTES)):
self.selectors[i].set_layer(TAB_LAYER)
# Show the palette with the current orientation.
if self.palette_sprs[n][self.orientation] is not None:
self.palette_sprs[n][self.orientation].set_layer(CATEGORY_LAYER)
if self.palettes[n] == []:
# Create 'proto' blocks for each palette entry
for i, name in enumerate(PALETTES[n]):
self.palettes[n].append(Block(self.block_list,
self.sprite_list, name, 0, 0, 'proto', [], PALETTE_SCALE))
self.palettes[n][i].spr.set_layer(TAB_LAYER)
self.palettes[n][i].unhighlight()
# Some proto blocks get a skin.
if name in BOX_STYLE_MEDIA:
self._proto_skin(name + 'small', n, i)
elif name[:8] == 'template':
self._proto_skin(name[8:], n, i)
elif name[:7] == 'picture':
self._proto_skin(name[7:], n, i)
elif name in PYTHON_SKIN:
self._proto_skin('pythonsmall', n, i)
self._layout_palette(n)
for blk in self.palettes[n]:
blk.spr.set_layer(TAB_LAYER)
if n == self.trash_index:
for blk in self.trash_stack:
for gblk in find_group(blk):
if gblk.status != 'collapsed':
gblk.spr.set_layer(TAB_LAYER)
def _hide_toolbar_palette(self):
""" Hide the toolbar palettes """
self._hide_previous_palette()
if self.activity is None or not self.activity.new_sugar_system:
# Hide the selectors
for i in range(len(PALETTES)):
self.selectors[i].hide()
elif self.selected_palette is not None:
self.activity.palette_buttons[self.selected_palette].set_icon(
PALETTE_NAMES[self.selected_palette] + 'off')
self.selected_palette = None
self.previous_palette = None
def _hide_previous_palette(self):
""" Hide just the previously viewed toolbar palette """
# Hide previous palette
if self.previous_palette is not None:
for i in range(len(PALETTES[self.previous_palette])):
self.palettes[self.previous_palette][i].spr.hide()
self.palette_sprs[self.previous_palette][
self.orientation].hide()
if self.activity is None or not self.activity.new_sugar_system:
self.selectors[self.previous_palette].set_shape(
self.selector_shapes[self.previous_palette][0])
elif self.previous_palette is not None and \
self.previous_palette != self.selected_palette:
self.activity.palette_buttons[self.previous_palette].set_icon(
PALETTE_NAMES[self.previous_palette] + 'off')
if self.previous_palette == self.trash_index:
for blk in self.trash_stack:
for gblk in find_group(blk):
gblk.spr.hide()
def _horizontal_layout(self, x, y, blocks):
""" Position prototypes in a horizontal palette. """
_max_w = 0
for blk in blocks:
_w, _h = self._width_and_height(blk)
if y + _h > PALETTE_HEIGHT + self.toolbar_offset:
x += int(_max_w + 3)
y = self.toolbar_offset + 3
_max_w = 0
(_bx, _by) = blk.spr.get_xy()
_dx = x - _bx
_dy = y - _by
for g in find_group(blk):
g.spr.move_relative((int(_dx), int(_dy)))
y += int(_h + 3)
if _w > _max_w:
_max_w = _w
return x, y, _max_w
def _vertical_layout(self, x, y, blocks):
""" Position prototypes in a vertical palette. """
_row = []
_row_w = 0
_max_h = 0
for _b in blocks:
_w, _h = self._width_and_height(_b)
if x + _w > PALETTE_WIDTH:
# Recenter row.
_dx = int((PALETTE_WIDTH - _row_w) / 2)
for _r in _row:
for _g in find_group(_r):
_g.spr.move_relative((_dx, 0))
_row = []
_row_w = 0
x = 4
y += int(_max_h + 3)
_max_h = 0
_row.append(_b)
_row_w += (4 + _w)
(_bx, _by) = _b.spr.get_xy()
_dx = int(x - _bx)
_dy = int(y - _by)
for _g in find_group(_b):
_g.spr.move_relative((_dx, _dy))
x += int(_w + 4)
if _h > _max_h:
_max_h = _h
# Recenter last row.
_dx = int((PALETTE_WIDTH - _row_w) / 2)
for _r in _row:
for _g in find_group(_r):
_g.spr.move_relative((_dx, 0))
return x, y, _max_h
def _layout_palette(self, n):
""" Layout prototypes in a palette. """
if n is not None:
if self.orientation == HORIZONTAL_PALETTE:
_x, _y = 20, self.toolbar_offset + 5
_x, _y, _max = self._horizontal_layout(_x, _y,
self.palettes[n])
if n == self.trash_index:
_x, _y, _max = self._horizontal_layout(_x + _max, _y,
self.trash_stack)
_w = _x + _max + 25
if self.palette_sprs[n][self.orientation] is None:
svg = SVG()
self.palette_sprs[n][self.orientation] = Sprite(
self.sprite_list, 0, self.toolbar_offset,
svg_str_to_pixbuf(svg.palette(_w, PALETTE_HEIGHT)))
self.palette_sprs[n][self.orientation].type = 'category'
if n == PALETTE_NAMES.index('trash'):
svg = SVG()
self.palette_sprs[n][self.orientation].set_shape(
svg_str_to_pixbuf(svg.palette(_w, PALETTE_HEIGHT)))
self.palette_button[2].move((_w - 20, self.toolbar_offset))
else:
_x, _y = 5, self.toolbar_offset + 15
_x, _y, _max = self._vertical_layout(_x, _y, self.palettes[n])
if n == PALETTE_NAMES.index('trash'):
_x, _y, _max = self._vertical_layout(_x, _y + _max,
self.trash_stack)
_h = _y + _max + 25 - self.toolbar_offset
if self.palette_sprs[n][self.orientation] is None:
svg = SVG()
self.palette_sprs[n][self.orientation] = \
Sprite(self.sprite_list, 0, self.toolbar_offset,
svg_str_to_pixbuf(svg.palette(PALETTE_WIDTH, _h)))
self.palette_sprs[n][self.orientation].type = 'category'
if n == PALETTE_NAMES.index('trash'):
svg = SVG()
self.palette_sprs[n][self.orientation].set_shape(
svg_str_to_pixbuf(svg.palette(PALETTE_WIDTH, _h)))
self.palette_button[2].move((PALETTE_WIDTH - 20,
self.toolbar_offset))
self.palette_sprs[n][self.orientation].set_layer(CATEGORY_LAYER)
def _buttonpress_cb(self, win, event):
""" Button press """
self.window.grab_focus()
x, y = xy(event)
self.mouse_flag = 1
self.mouse_x = x
self.mouse_y = y
self.button_press(event.get_state() & gtk.gdk.CONTROL_MASK, x, y)
return True
def button_press(self, mask, x, y):
self.block_operation = 'click'
# Unselect things that may have been selected earlier
if self.selected_blk is not None:
self._unselect_block()
self.selected_turtle = None
# Always hide the status layer on a click
if self.status_spr is not None:
self.status_spr.hide()
# Find out what was clicked
spr = self.sprite_list.find_sprite((x, y))
self.dx = 0
self.dy = 0
if spr is None:
return True
self.selected_spr = spr
# From the sprite at x, y, look for a corresponding block
blk = self.block_list.spr_to_block(spr)
if blk is not None:
if blk.type == 'block':
self.selected_blk = blk
self._block_pressed(x, y, blk)
elif blk.type == 'trash':
self._restore_from_trash(find_top_block(blk))
elif blk.type == 'proto':
if blk.name == 'restoreall':
self._restore_all_from_trash()
elif blk.name == 'restore':
self._restore_latest_from_trash()
elif blk.name == 'empty':
self._empty_trash()
elif blk.name in MACROS:
self._new_macro(blk.name, x + 20, y + 20)
else:
blk.highlight()
self._new_block(blk.name, x, y)
blk.unhighlight()
return True
# Next, look for a turtle
t = self.turtles.spr_to_turtle(spr)
if t is not None:
self.selected_turtle = t
self.canvas.set_turtle(self.turtles.get_turtle_key(t))
self._turtle_pressed(x, y)
return True
# Finally, check for anything else
if hasattr(spr, 'type'):
if spr.type == "canvas":
pass
# spr.set_layer(CANVAS_LAYER)
elif spr.type == 'selector':
self._select_category(spr)
elif spr.type == 'category':
if hide_button_hit(spr, x, y):
self.hideshow_palette(False)
elif spr.type == 'palette':
if spr.name == _('next'):
i = self.selected_palette + 1
if i == len(PALETTE_NAMES):
i = 0
if self.activity is None or \
not self.activity.new_sugar_system:
self._select_category(self.selectors[i])
else:
if self.selected_palette is not None:
self.activity.palette_buttons[
self.selected_palette].set_icon(
PALETTE_NAMES[self.selected_palette] + 'off')
self.activity.palette_buttons[i].set_icon(
PALETTE_NAMES[i] + 'on')
self.show_palette(i)
else:
self.orientation = 1 - self.orientation
self.palette_button[self.orientation].set_layer(TAB_LAYER)
self.palette_button[1 - self.orientation].hide()
self.palette_sprs[self.selected_palette][
1 - self.orientation].hide()
self._layout_palette(self.selected_palette)
self.show_palette(self.selected_palette)
elif spr.type == 'toolbar':
self._select_toolbar_button(spr)
return True
def _select_category(self, spr):
""" Select a category from the toolbar (old Sugar systems only). """
i = self.selectors.index(spr)
spr.set_shape(self.selector_shapes[i][1])
if self.selected_selector is not None:
j = self.selectors.index(self.selected_selector)
if i == j:
return
self.selected_selector.set_shape(self.selector_shapes[j][0])
self.previous_selector = self.selected_selector
self.selected_selector = spr
self.show_palette(i)
def _select_toolbar_button(self, spr):
""" Select a toolbar button (Used when not running Sugar). """
if not hasattr(spr, 'name'):
return
if spr.name == 'run-fastoff':
self.lc.trace = 0
self.run_button(0)
elif spr.name == 'run-slowoff':
self.lc.trace = 0
self.run_button(3)
elif spr.name == 'debugoff':
self.lc.trace = 1
self.run_button(6)
elif spr.name == 'stopiton':
self.stop_button()
self.toolbar_shapes['stopiton'].hide()
elif spr.name == 'eraseron':
self.eraser_button()
elif spr.name == 'hideshowoff':
self.hideshow_button()
def _put_in_trash(self, blk, x=0, y=0):
""" Put a group of blocks into the trash. """
self.trash_stack.append(blk)
group = find_group(blk)
for gblk in group:
if gblk.status == 'collapsed':
# Collapsed stacks are restored for rescaling
# and then recollapsed after they are moved to the trash.
bot = find_sandwich_bottom(gblk)
if collapsed(bot):
dy = bot.values[0]
restore_stack(find_sandwich_top(gblk))
bot.values[0] = dy
gblk.type = 'trash'
gblk.rescale(self.trash_scale)
blk.spr.move((x, y))
for gblk in group:
self._adjust_dock_positions(gblk)
# Re-collapsing any stacks we had restored for scaling
for gblk in group:
if collapsed(gblk):
collapse_stack(find_sandwich_top(gblk))
# And resize any skins.
for gblk in group:
if gblk.name in BLOCKS_WITH_SKIN:
self._resize_skin(gblk)
# self.show_palette(self.trash_index)
if self.selected_palette != self.trash_index:
for gblk in group:
gblk.spr.hide()
def _restore_all_from_trash(self):
""" Restore all the blocks in the trash can. """
for blk in self.block_list.list:
if blk.type == 'trash':
self._restore_from_trash(blk)
def _restore_latest_from_trash(self):
""" Restore most recent blocks from the trash can. """
if len(self.trash_stack) == 0:
return
self._restore_from_trash(self.trash_stack[len(self.trash_stack) - 1])
def _restore_from_trash(self, blk):
group = find_group(blk)
for gblk in group:
gblk.rescale(self.block_scale)
gblk.spr.set_layer(BLOCK_LAYER)
x, y = gblk.spr.get_xy()
if self.orientation == 0:
gblk.spr.move((x, y + PALETTE_HEIGHT + self.toolbar_offset))
else:
gblk.spr.move((x + PALETTE_WIDTH, y))
gblk.type = 'block'
for gblk in group:
self._adjust_dock_positions(gblk)
# If the stack had been collapsed before going into the trash,
# collapse it again now.
for gblk in group:
if collapsed(gblk):
collapse_stack(find_sandwich_top(gblk))
# And resize any skins.
for gblk in group:
if gblk.name in BLOCKS_WITH_SKIN:
self._resize_skin(gblk)
self.trash_stack.remove(blk)
def _empty_trash(self):
""" Permanently remove all blocks presently in the trash can. """
for blk in self.block_list.list:
if blk.type == 'trash':
blk.type = 'deleted'
blk.spr.hide()
self.trash_stack = []
def _in_the_trash(self, x, y):
""" Is x, y over the trash can? """
"""
if self.selected_palette == self.trash_index and \
self.palette_sprs[self.trash_index][self.orientation].hit((x, y)):
return True
"""
if self.selected_palette is not None and \
self.palette_sprs[self.selected_palette][self.orientation].hit(
(x, y)):
return True
return False
def _block_pressed(self, x, y, blk):
""" Block pressed """
if blk is not None:
blk.highlight()
self._disconnect(blk)
self.drag_group = find_group(blk)
(sx, sy) = blk.spr.get_xy()
self.drag_pos = x - sx, y - sy
for blk in self.drag_group:
if blk.status != 'collapsed':
blk.spr.set_layer(TOP_LAYER)
self.saved_string = blk.spr.labels[0]
def _unselect_block(self):
""" Unselect block """
# After unselecting a 'number' block, we need to check its value
if self.selected_blk.name == 'number':
self._number_check()
elif self.selected_blk.name == 'string':
self._string_check()
self.selected_blk.unhighlight()
self.selected_blk = None
def _new_block(self, name, x, y):
""" Make a new block. """
if name in CONTENT_BLOCKS:
newblk = Block(self.block_list, self.sprite_list, name, x - 20,
y - 20, 'block', DEFAULTS[name], self.block_scale)
else:
newblk = Block(self.block_list, self.sprite_list, name, x - 20,
y - 20, 'block', [], self.block_scale)
# Add a 'skin' to some blocks
if name in PYTHON_SKIN:
if self.nop == 'pythonloaded':
self._block_skin('pythonon', newblk)
else:
self._block_skin('pythonoff', newblk)
elif name in BOX_STYLE_MEDIA:
self._block_skin(name + 'off', newblk)
newspr = newblk.spr
newspr.set_layer(TOP_LAYER)
self.drag_pos = 20, 20
newblk.connections = [None] * len(newblk.docks)
if newblk.name in DEFAULTS:
for i, argvalue in enumerate(DEFAULTS[newblk.name]):
# skip the first dock position since it is always a connector
dock = newblk.docks[i + 1]
argname = dock[0]
if argname == 'unavailable':
continue
if argname == 'media':
argname = 'journal'
elif argname == 'number' and \
(type(argvalue) is str or type(argvalue) is unicode):
argname = 'string'
elif argname == 'bool':
argname = argvalue
elif argname == 'flow':
argname = argvalue
(sx, sy) = newspr.get_xy()
if argname is not None:
if argname in CONTENT_BLOCKS:
argblk = Block(self.block_list, self.sprite_list,
argname, 0, 0, 'block', [argvalue],
self.block_scale)
else:
argblk = Block(self.block_list, self.sprite_list,
argname, 0, 0, 'block', [],
self.block_scale)
argdock = argblk.docks[0]
nx = sx + dock[2] - argdock[2]
ny = sy + dock[3] - argdock[3]
if argname == 'journal':
self._block_skin('journaloff', argblk)
argblk.spr.move((nx, ny))
argblk.spr.set_layer(TOP_LAYER)
argblk.connections = [newblk, None]
newblk.connections[i + 1] = argblk
self.drag_group = find_group(newblk)
self.block_operation = 'new'
def _new_macro(self, name, x, y):
""" Create a "macro" (predefined stack of blocks). """
macro = MACROS[name]
macro[0][2] = x
macro[0][3] = y
top = self.process_data(macro)
self.block_operation = 'new'
self._check_collapsibles(top)
self.drag_group = find_group(top)
def process_data(self, block_data, offset=0):
""" Process block_data (from a macro, a file, or the clipboard). """
if offset != 0:
_logger.debug("offset is %d" % (offset))
# Create the blocks (or turtle).
blocks = []
for blk in block_data:
if not self._found_a_turtle(blk):
blocks.append(self.load_block(blk, offset))
# Make the connections.
for i in range(len(blocks)):
cons = []
# Normally, it is simply a matter of copying the connections.
if blocks[i].connections is None:
for c in block_data[i][4]:
if c is None:
cons.append(None)
else:
cons.append(blocks[c])
elif blocks[i].connections == 'check':
# Convert old-style boolean and arithmetic blocks
cons.append(None) # Add an extra connection.
for c in block_data[i][4]:
if c is None:
cons.append(None)
else:
cons.append(blocks[c])
# If the boolean op was connected, readjust the plumbing.
if blocks[i].name in BOOLEAN_STYLE:
if block_data[i][4][0] is not None:
c = block_data[i][4][0]
cons[0] = blocks[block_data[c][4][0]]
c0 = block_data[c][4][0]
for j, cj in enumerate(block_data[c0][4]):
if cj == c:
blocks[c0].connections[j] = blocks[i]
if c < i:
blocks[c].connections[0] = blocks[i]
blocks[c].connections[3] = None
else:
# Connection was to a block we haven't seen yet.
_logger.debug("Warning: dock to the future")
else:
if block_data[i][4][0] is not None:
c = block_data[i][4][0]
cons[0] = blocks[block_data[c][4][0]]
c0 = block_data[c][4][0]
for j, cj in enumerate(block_data[c0][4]):
if cj == c:
blocks[c0].connections[j] = blocks[i]
if c < i:
blocks[c].connections[0] = blocks[i]
blocks[c].connections[1] = None
else:
# Connection was to a block we haven't seen yet.
_logger.debug("Warning: dock to the future")
else:
_logger.debug("Warning: unknown connection state %s" % \
(str(blocks[i].connections)))
blocks[i].connections = cons[:]
# Block sizes and shapes may have changed.
for blk in blocks:
self._adjust_dock_positions(blk)
# Look for any stacks that need to be collapsed or sandwiched
for blk in blocks:
if collapsed(blk):
collapse_stack(find_sandwich_top(blk))
elif blk.name == 'sandwichbottom' and collapsible(blk):
blk.svg.set_hide(True)
blk.svg.set_show(False)
blk.refresh()
grow_stack_arm(find_sandwich_top(blk))
# Resize blocks to current scale
self.resize_blocks(blocks)
if len(blocks) > 0:
return blocks[0]
else:
return None
def _adjust_dock_positions(self, blk):
""" Adjust the dock x, y positions """
if not self.interactive_mode:
return
(sx, sy) = blk.spr.get_xy()
for i, c in enumerate(blk.connections):
if i > 0 and c is not None:
bdock = blk.docks[i]
for j in range(len(c.docks)):
if c.connections[j] == blk:
cdock = c.docks[j]
nx = sx + bdock[2] - cdock[2]
ny = sy + bdock[3] - cdock[3]
c.spr.move((nx, ny))
self._adjust_dock_positions(c)
def _turtle_pressed(self, x, y):
(tx, ty) = self.selected_turtle.get_xy()
w = self.active_turtle.spr.rect.width / 2
h = self.active_turtle.spr.rect.height / 2
dx = x - tx - w
dy = y - ty - h
# if x, y is near the edge, rotate
if (dx * dx) + (dy * dy) > ((w * w) + (h * h)) / 6:
self.drag_turtle = ('turn',
self.canvas.heading - atan2(dy, dx) / DEGTOR, 0)
else:
self.drag_turtle = ('move', x - tx, y - ty)
def _move_cb(self, win, event):
x, y = xy(event)
self._mouse_move(x, y)
return True
def _mouse_move(self, x, y):
""" Process mouse movements """
self.block_operation = 'move'
# First, check to see if we are dragging or rotating a turtle.
if self.selected_turtle is not None:
dtype, dragx, dragy = self.drag_turtle
(sx, sy) = self.selected_turtle.get_xy()
if dtype == 'move':
dx = x - dragx - sx
dy = y - dragy - sy
self.selected_turtle.spr.set_layer(TOP_LAYER)
self.selected_turtle.move((sx + dx, sy + dy))
else:
dx = x - sx - self.active_turtle.spr.rect.width / 2
dy = y - sy - self.active_turtle.spr.rect.height / 2
self.canvas.seth(int(dragx + atan2(dy, dx) / DEGTOR + 5) / \
10 * 10)
# If we are hoving, show popup help.
elif self.drag_group is None:
self._show_popup(x, y)
return
# If we have a stack of blocks selected, move them.
elif self.drag_group[0] is not None:
blk = self.drag_group[0]
# Don't move a bottom blk if the stack is collapsed
if collapsed(blk):
return
self.selected_spr = blk.spr
dragx, dragy = self.drag_pos
(sx, sy) = blk.spr.get_xy()
dx = x - dragx - sx
dy = y - dragy - sy
# Take no action if there was a move of 0,0.
if dx == 0 and dy == 0:
return
self.drag_group = find_group(blk)
# Prevent blocks from ending up with a negative x or y
for blk in self.drag_group:
(bx, by) = blk.spr.get_xy()
if bx + dx < 0:
dx = -bx
if by + dy < 0:
dy = -by
# Calculate a bounding box and only invalidate once.
minx = blk.spr.rect.x
miny = blk.spr.rect.y
maxx = blk.spr.rect.x + blk.spr.rect.width
maxy = blk.spr.rect.y + blk.spr.rect.height
for blk in self.drag_group:
if blk.spr.rect.x < minx:
minx = blk.spr.rect.x
if blk.spr.rect.x + blk.spr.rect.width > maxx:
maxx = blk.spr.rect.x + blk.spr.rect.width
if blk.spr.rect.y < miny:
miny = blk.spr.rect.y
if blk.spr.rect.y + blk.spr.rect.height > maxy:
maxy = blk.spr.rect.y + blk.spr.rect.height
blk.spr.rect.x += dx
blk.spr.rect.y += dy
if dx < 0:
minx += dx
else:
maxx += dx
if dy < 0:
miny += dy
else:
maxy += dy
self.rect.x = minx
self.rect.y = miny
self.rect.width = maxx - minx
self.rect.height = maxy - miny
self.sprite_list.area.invalidate_rect(self.rect, False)
self.dx += dx
self.dy += dy
def _show_popup(self, x, y):
""" Let's help our users by displaying a little help. """
spr = self.sprite_list.find_sprite((x, y))
blk = self.block_list.spr_to_block(spr)
if spr and blk is not None:
if self.timeout_tag[0] == 0:
self.timeout_tag[0] = self._do_show_popup(blk.name)
self.selected_spr = spr
else:
if self.timeout_tag[0] > 0:
try:
gobject.source_remove(self.timeout_tag[0])
self.timeout_tag[0] = 0
except:
self.timeout_tag[0] = 0
elif spr and hasattr(spr, 'type') and (spr.type == 'selector' or \
spr.type == 'palette' or \
spr.type == 'toolbar'):
if self.timeout_tag[0] == 0 and hasattr(spr, 'name'):
self.timeout_tag[0] = self._do_show_popup(spr.name)
self.selected_spr = spr
else:
if self.timeout_tag[0] > 0:
try:
gobject.source_remove(self.timeout_tag[0])
self.timeout_tag[0] = 0
except:
self.timeout_tag[0] = 0
else:
if self.timeout_tag[0] > 0:
try:
gobject.source_remove(self.timeout_tag[0])
self.timeout_tag[0] = 0
except:
self.timeout_tag[0] = 0
def _do_show_popup(self, block_name):
""" Fetch the help text and display it. """
if block_name in SPECIAL_NAMES:
block_name_s = SPECIAL_NAMES[block_name]
elif block_name in BLOCK_NAMES:
block_name_s = BLOCK_NAMES[block_name][0]
elif block_name in TOOLBAR_SHAPES:
block_name_s = ''
else:
block_name_s = _(block_name)
if block_name in HELP_STRINGS:
if block_name_s == '':
label = HELP_STRINGS[block_name]
else:
label = block_name_s + ": " + HELP_STRINGS[block_name]
else:
label = block_name_s
if self.running_sugar:
self.activity.hover_help_label.set_text(label)
self.activity.hover_help_label.show()
else:
if self.interactive_mode:
self.win.set_title(_("Turtle Art") + " — " + label)
return 0
def _buttonrelease_cb(self, win, event):
""" Button release """
x, y = xy(event)
self.button_release(x, y)
return True
def button_release(self, x, y):
# We may have been moving the turtle
if self.selected_turtle is not None:
(tx, ty) = self.selected_turtle.get_xy()
k = self.turtles.get_turtle_key(self.selected_turtle)
# Remove turtles by dragging them onto the trash palette.
if self._in_the_trash(tx, ty):
# If it is the default turtle, just recenter it.
if k == self.default_turtle_name:
self._move_turtle(0, 0)
self.canvas.heading = 0
self.canvas.turn_turtle()
else:
self.selected_turtle.hide()
self.turtles.remove_from_dict(k)
else:
self._move_turtle(tx - self.canvas.width / 2 + \
self.active_turtle.spr.rect.width / 2,
self.canvas.height / 2 - ty - \
self.active_turtle.spr.rect.height / 2)
self.selected_turtle = None
self.active_turtle = self.turtles.get_turtle(
self.default_turtle_name)
return
# If we don't have a group of blocks, then there is nothing to do.
if self.drag_group is None:
return
blk = self.drag_group[0]
# Remove blocks by dragging them onto the trash palette.
if self.block_operation == 'move' and self._in_the_trash(x, y):
self._put_in_trash(blk, x, y)
self.drag_group = None
return
# Pull a stack of new blocks off of the category palette.
if self.block_operation == 'new':
for gblk in self.drag_group:
(bx, by) = gblk.spr.get_xy()
if self.orientation == 0:
gblk.spr.move((bx + 20,
by + PALETTE_HEIGHT + self.toolbar_offset))
else:
gblk.spr.move((bx + PALETTE_WIDTH, by + 20))
# Look to see if we can dock the current stack.
self._snap_to_dock()
self._check_collapsibles(blk)
for gblk in self.drag_group:
if gblk.status != 'collapsed':
gblk.spr.set_layer(BLOCK_LAYER)
self.drag_group = None
# Find the block we clicked on and process it.
if self.block_operation == 'click':
self._click_block(x, y)
def _move_turtle(self, x, y):
""" Move the selected turtle to (x, y). """
(cx, cy) = self.canvas.canvas.get_xy()
self.canvas.xcor = x - cx
self.canvas.ycor = y + cy
self.canvas.move_turtle()
if self.running_sugar:
self.display_coordinates()
self.selected_turtle.spr.set_layer(TURTLE_LAYER)
def _click_block(self, x, y):
""" Click block: lots of special cases to handle... """
blk = self.block_list.spr_to_block(self.selected_spr)
if blk is None:
return
self.selected_blk = blk
if blk.name == 'number' or blk.name == 'string':
self.saved_string = blk.spr.labels[0]
blk.spr.labels[0] += CURSOR
elif blk.name in BOX_STYLE_MEDIA:
self._import_from_journal(self.selected_blk)
if blk.name == 'journal' and self.running_sugar:
self._load_description_block(blk)
elif blk.name == 'identity2' or blk.name == 'hspace':
group = find_group(blk)
if hide_button_hit(blk.spr, x, y):
dx = blk.reset_x()
elif show_button_hit(blk.spr, x, y):
dx = 20
blk.expand_in_x(dx)
else:
dx = 0
for gblk in group:
if gblk != blk:
gblk.spr.move_relative((dx * blk.scale, 0))
elif blk.name == 'vspace':
group = find_group(blk)
if hide_button_hit(blk.spr, x, y):
dy = blk.reset_y()
elif show_button_hit(blk.spr, x, y):
dy = 20
blk.expand_in_y(dy)
else:
dy = 0
for gblk in group:
if gblk != blk:
gblk.spr.move_relative((0, dy * blk.scale))
grow_stack_arm(find_sandwich_top(blk))
elif blk.name in EXPANDABLE_BLOCKS:
# Connection may be lost during expansion, so store it...
blk0 = blk.connections[0]
if blk0 is not None:
dock0 = blk0.connections.index(blk)
if hide_button_hit(blk.spr, x, y):
dy = blk.reset_y()
elif show_button_hit(blk.spr, x, y):
dy = 20
blk.expand_in_y(dy)
else:
self._run_stack(blk)
return
if blk.name in BOOLEAN_STYLE:
self._expand_boolean(blk, blk.connections[1], dy)
else:
self._expand_expandable(blk, blk.connections[1], dy)
# and restore it...
if blk0 is not None:
blk.connections[0] = blk0
blk0.connections[dock0] = blk
self._cascade_expandable(blk)
grow_stack_arm(find_sandwich_top(blk))
elif blk.name in EXPANDABLE_ARGS or blk.name == 'nop':
if show_button_hit(blk.spr, x, y):
n = len(blk.connections)
group = find_group(blk.connections[n - 1])
if blk.name == 'myfunc1arg':
blk.spr.labels[1] = 'f(x, y)'
blk.spr.labels[2] = ' '
dy = blk.add_arg()
blk.primitive = 'myfunction2'
blk.name = 'myfunc2arg'
elif blk.name == 'myfunc2arg':
blk.spr.labels[1] = 'f(x, y, z)'
dy = blk.add_arg(False)
blk.primitive = 'myfunction3'
blk.name = 'myfunc3arg'
elif blk.name == 'userdefined':
dy = blk.add_arg()
blk.primitive = 'userdefined2'
blk.name = 'userdefined2args'
elif blk.name == 'userdefined2args':
dy = blk.add_arg(False)
blk.primitive = 'userdefined3'
blk.name = 'userdefined3args'
else:
dy = blk.add_arg()
for gblk in group:
gblk.spr.move_relative((0, dy))
blk.connections.append(blk.connections[n - 1])
argname = blk.docks[n - 1][0]
argvalue = DEFAULTS[blk.name][len(DEFAULTS[blk.name]) - 1]
argblk = Block(self.block_list, self.sprite_list, argname,
0, 0, 'block', [argvalue], self.block_scale)
argdock = argblk.docks[0]
(bx, by) = blk.spr.get_xy()
nx = bx + blk.docks[n - 1][2] - argdock[2]
ny = by + blk.docks[n - 1][3] - argdock[3]
argblk.spr.move((nx, ny))
argblk.spr.set_layer(TOP_LAYER)
argblk.connections = [blk, None]
blk.connections[n - 1] = argblk
if blk.name in NUMBER_STYLE_VAR_ARG:
self._cascade_expandable(blk)
grow_stack_arm(find_sandwich_top(blk))
elif blk.name in PYTHON_SKIN and self.myblock is None:
self._import_py()
else:
self._run_stack(blk)
elif blk.name in COLLAPSIBLE:
top = find_sandwich_top(blk)
if collapsed(blk):
restore_stack(top)
elif top is not None:
collapse_stack(top)
else:
self._run_stack(blk)
def _expand_boolean(self, blk, blk2, dy):
""" Expand a boolean blk if blk2 is too big to fit. """
group = find_group(blk2)
for gblk in find_group(blk):
if gblk not in group:
gblk.spr.move_relative((0, -dy * blk.scale))
def _expand_expandable(self, blk, blk2, dy):
""" Expand an expandable blk if blk2 is too big to fit. """
if blk2 is None:
group = [blk]
else:
group = find_group(blk2)
group.append(blk)
for gblk in find_group(blk):
if gblk not in group:
gblk.spr.move_relative((0, dy * blk.scale))
if blk.name in COMPARE_STYLE:
for gblk in find_group(blk):
gblk.spr.move_relative((0, -dy * blk.scale))
def _cascade_expandable(self, blk):
""" If expanding/shrinking a block, cascade. """
while blk.name in NUMBER_STYLE or \
blk.name in NUMBER_STYLE_PORCH or \
blk.name in NUMBER_STYLE_BLOCK or \
blk.name in NUMBER_STYLE_VAR_ARG:
if blk.connections[0] is None:
break
if blk.connections[0].name in EXPANDABLE_BLOCKS:
if blk.connections[0].connections.index(blk) != 1:
break
blk = blk.connections[0]
if blk.connections[1].name == 'myfunc2arg':
dy = 40 + blk.connections[1].ey - blk.ey
elif blk.connections[1].name == 'myfunc3arg':
dy = 60 + blk.connections[1].ey - blk.ey
else:
dy = 20 + blk.connections[1].ey - blk.ey
blk.expand_in_y(dy)
if dy != 0:
group = find_group(blk.connections[1])
group.append(blk)
for gblk in find_group(blk):
if gblk not in group:
gblk.spr.move_relative((0, dy * blk.scale))
if blk.name in COMPARE_STYLE:
for gblk in find_group(blk):
gblk.spr.move_relative((0, -dy * blk.scale))
else:
break
def _check_collapsibles(self, blk):
""" Check state of collapsible blocks upon change in dock state. """
group = find_group(blk)
for gblk in group:
if gblk.name in COLLAPSIBLE:
if collapsed(gblk):
gblk.svg.set_show(True)
gblk.svg.set_hide(False)
reset_stack_arm(find_sandwich_top(gblk))
elif collapsible(gblk):
gblk.svg.set_hide(True)
gblk.svg.set_show(False)
grow_stack_arm(find_sandwich_top(gblk))
else:
gblk.svg.set_hide(False)
gblk.svg.set_show(False)
# Ouch: When you tear off the sandwich bottom, you
# no longer have access to the group with the sandwich top
# so check them all.
for b in self.just_blocks():
if b.name in ['sandwichtop', 'sandwichtop_no_label']:
if find_sandwich_bottom(b) is None:
reset_stack_arm(b)
gblk.refresh()
def _run_stack(self, blk):
""" Run a stack of blocks. """
if blk is None:
return
self.lc.ag = None
top = find_top_block(blk)
self.lc.run_blocks(top, self.just_blocks(), True)
if self.interactive_mode:
gobject.idle_add(self.lc.doevalstep)
else:
while self.lc.doevalstep():
pass
def _snap_to_dock(self):
""" Snap a block (selected_block) to the dock of another block
(destination_block).
"""
selected_block = self.drag_group[0]
best_destination = None
d = 200
for selected_block_dockn in range(len(selected_block.docks)):
for destination_block in self.just_blocks():
# Don't link to a block to which you're already connected
if destination_block in self.drag_group:
continue
# Check each dock of destination for a possible connection
for destination_dockn in range(len(destination_block.docks)):
this_xy = dock_dx_dy(destination_block, destination_dockn,
selected_block, selected_block_dockn)
if magnitude(this_xy) > d:
continue
d = magnitude(this_xy)
best_xy = this_xy
best_destination = destination_block
best_destination_dockn = destination_dockn
best_selected_block_dockn = selected_block_dockn
if d < 200:
if not arithmetic_check(selected_block, best_destination,
best_selected_block_dockn,
best_destination_dockn):
return
if not journal_check(selected_block, best_destination,
best_selected_block_dockn,
best_destination_dockn):
return
for blk in self.drag_group:
(sx, sy) = blk.spr.get_xy()
blk.spr.move((sx + best_xy[0], sy + best_xy[1]))
# If there was already a block docked there, move it to the trash.
blk_in_dock = best_destination.connections[best_destination_dockn]
if blk_in_dock is not None and blk_in_dock != selected_block:
blk_in_dock.connections[0] = None
self._put_in_trash(blk_in_dock)
best_destination.connections[best_destination_dockn] = \
selected_block
if selected_block.connections is not None:
selected_block.connections[best_selected_block_dockn] = \
best_destination
if best_destination.name in BOOLEAN_STYLE:
if best_destination_dockn == 2 and \
selected_block.name in COMPARE_STYLE:
dy = selected_block.ey - best_destination.ey
best_destination.expand_in_y(dy)
self._expand_boolean(best_destination, selected_block, dy)
elif best_destination.name in EXPANDABLE_BLOCKS and \
best_destination_dockn == 1:
dy = 0
if (selected_block.name in EXPANDABLE_BLOCKS or
selected_block.name in NUMBER_STYLE_VAR_ARG):
if selected_block.name == 'myfunc2arg':
dy = 40 + selected_block.ey - best_destination.ey
elif selected_block.name == 'myfunc3arg':
dy = 60 + selected_block.ey - best_destination.ey
else:
dy = 20 + selected_block.ey - best_destination.ey
best_destination.expand_in_y(dy)
else:
if best_destination.ey > 0:
dy = best_destination.reset_y()
if dy != 0:
self._expand_expandable(best_destination, selected_block,
dy)
self._cascade_expandable(best_destination)
grow_stack_arm(find_sandwich_top(best_destination))
def _disconnect(self, blk):
""" Disconnect block from stack above it. """
if blk.connections[0] is None:
return
if collapsed(blk):
return
blk2 = blk.connections[0]
c = blk2.connections.index(blk)
blk2.connections[c] = None
if blk2.name in BOOLEAN_STYLE:
if c == 2 and blk2.ey > 0:
dy = -blk2.ey
blk2.expand_in_y(dy)
self._expand_boolean(blk2, blk, dy)
elif blk2.name in EXPANDABLE_BLOCKS and c == 1:
if blk2.ey > 0:
dy = blk2.reset_y()
if dy != 0:
self._expand_expandable(blk2, blk, dy)
self._cascade_expandable(blk2)
grow_stack_arm(find_sandwich_top(blk2))
blk.connections[0] = None
def _import_from_journal(self, blk):
""" Import a file from the Sugar Journal """
if self.running_sugar:
chooser(self.parent, '', self._update_media_blk)
else:
fname, self.load_save_folder = get_load_name('.*',
self.load_save_folder)
if fname is None:
return
self._update_media_icon(blk, fname)
def _load_description_block(self, blk):
""" Look for a corresponding description block """
if blk is None or blk.name != 'journal' or len(blk.values) == 0 or \
blk.connections[0] is None:
return
_blk = blk.connections[0]
dblk = find_blk_below(_blk, 'description')
# Autoupdate the block if it is empty
if dblk != None and (len(dblk.values) == 0 or dblk.values[0] is None):
self._update_media_icon(dblk, None, blk.values[0])
def _update_media_blk(self, dsobject):
""" Called from the chooser to load a media block """
self._update_media_icon(self.selected_blk, dsobject,
dsobject.object_id)
def _update_media_icon(self, blk, name, value=''):
""" Update the icon on a 'loaded' media block. """
if blk.name == 'journal':
self._load_image_thumb(name, blk)
elif blk.name == 'audio':
self._block_skin('audioon', blk)
else:
self._block_skin('descriptionon', blk)
if value == '':
value = name
if len(blk.values) > 0:
blk.values[0] = value
else:
blk.values.append(value)
blk.spr.set_label(' ')
def _load_image_thumb(self, picture, blk):
""" Replace icon with a preview image. """
pixbuf = None
self._block_skin('descriptionon', blk)
if self.running_sugar:
w, h = calc_image_size(blk.spr)
pixbuf = get_pixbuf_from_journal(picture, w, h)
else:
if movie_media_type(picture):
self._block_skin('journalon', blk)
elif audio_media_type(picture):
self._block_skin('audioon', blk)
blk.name = 'audio'
elif image_media_type(picture):
w, h = calc_image_size(blk.spr)
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(picture, w, h)
else:
blk.name = 'description'
if pixbuf is not None:
x, y = self._calc_image_offset('', blk.spr)
blk.set_image(pixbuf, x, y)
self._resize_skin(blk)
def _keypress_cb(self, area, event):
""" Keyboard """
keyname = gtk.gdk.keyval_name(event.keyval)
keyunicode = gtk.gdk.keyval_to_unicode(event.keyval)
if event.get_state() & gtk.gdk.MOD1_MASK:
alt_mask = True
alt_flag = 'T'
else:
alt_mask = False
alt_flag = 'F'
self._key_press(alt_mask, keyname, keyunicode)
return keyname
def _key_press(self, alt_mask, keyname, keyunicode):
if keyname is None:
return False
self.keypress = keyname
if alt_mask:
if keyname == "p":
self.hideshow_button()
elif keyname == 'q':
exit()
elif self.selected_blk is not None:
if self.selected_blk.name == 'number':
self._process_numeric_input(keyname)
elif self.selected_blk.name == 'string':
self.process_alphanumeric_input(keyname, keyunicode)
if self.selected_blk is not None:
self.selected_blk.resize()
elif self.selected_blk.name != 'proto':
self._process_keyboard_commands(keyname, block_flag=True)
elif self.turtles.spr_to_turtle(self.selected_spr) is not None:
self._process_keyboard_commands(keyname, block_flag=False)
return True
def _process_numeric_input(self, keyname):
''' Make sure numeric input is valid. '''
oldnum = self.selected_blk.spr.labels[0].replace(CURSOR, '')
if len(oldnum) == 0:
oldnum = '0'
if keyname == 'minus':
if oldnum == '0':
newnum = '-'
elif oldnum[0] != '-':
newnum = '-' + oldnum
else:
newnum = oldnum
elif keyname == 'comma' and self.decimal_point == ',' and \
',' not in oldnum:
newnum = oldnum + ','
elif keyname == 'period' and self.decimal_point == '.' and \
'.' not in oldnum:
newnum = oldnum + '.'
elif keyname == 'BackSpace':
if len(oldnum) > 0:
newnum = oldnum[:len(oldnum)-1]
else:
newnum = ''
elif keyname in ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']:
if oldnum == '0':
newnum = keyname
else:
newnum = oldnum + keyname
elif keyname == 'Return':
self._unselect_block()
return
else:
newnum = oldnum
if newnum == '.':
newnum = '0.'
if newnum == ',':
newnum = '0,'
if len(newnum) > 0 and newnum != '-':
try:
float(newnum.replace(self.decimal_point, '.'))
except ValueError, e:
newnum = oldnum
self.selected_blk.spr.set_label(newnum + CURSOR)
def process_alphanumeric_input(self, keyname, keyunicode):
""" Make sure alphanumeric input is properly parsed. """
if len(self.selected_blk.spr.labels[0]) > 0:
c = self.selected_blk.spr.labels[0].count(CURSOR)
if c == 0:
oldleft = self.selected_blk.spr.labels[0]
oldright = ''
elif len(self.selected_blk.spr.labels[0]) == 1:
oldleft = ''
oldright = ''
else:
try: # Why are getting a ValueError on occasion?
oldleft, oldright = \
self.selected_blk.spr.labels[0].split(CURSOR)
except ValueError:
_logger.debug("[%s]" % self.selected_blk.spr.labels[0])
oldleft = self.selected_blk.spr.labels[0]
oldright = ''
else:
oldleft = ''
oldright = ''
newleft = oldleft
if keyname in ['Shift_L', 'Shift_R', 'Control_L', 'Caps_Lock', \
'Alt_L', 'Alt_R', 'KP_Enter', 'ISO_Level3_Shift']:
keyname = ''
keyunicode = 0
# Hack until I sort out input and unicode and dead keys,
if keyname[0:5] == 'dead_':
self.dead_key = keyname
keyname = ''
keyunicode = 0
if keyname == 'space':
keyunicode = 32
elif keyname == 'Tab':
keyunicode = 9
if keyname == 'BackSpace':
if len(oldleft) > 1:
newleft = oldleft[:len(oldleft)-1]
else:
newleft = ''
elif keyname == 'Home':
oldright = oldleft + oldright
newleft = ''
elif keyname == 'Left':
if len(oldleft) > 0:
oldright = oldleft[len(oldleft) - 1:] + oldright
newleft = oldleft[:len(oldleft) - 1]
elif keyname == 'Right':
if len(oldright) > 0:
newleft = oldleft + oldright[0]
oldright = oldright[1:]
elif keyname == 'End':
newleft = oldleft + oldright
oldright = ''
elif keyname == 'Return':
newleft = oldleft + RETURN
elif keyname == 'Down':
self._unselect_block()
return
elif keyname == 'Up' or keyname == 'Escape': # Restore previous state
self.selected_blk.spr.set_label(self.saved_string)
self._unselect_block()
return
else:
if self.dead_key is not '':
keyunicode = \
DEAD_DICTS[DEAD_KEYS.index(self.dead_key[5:])][keyname]
self.dead_key = ''
if keyunicode > 0:
if unichr(keyunicode) != '\x00':
newleft = oldleft + unichr(keyunicode)
else:
newleft = oldleft
elif keyunicode == -1: # clipboard text
if keyname == '\n':
newleft = oldleft + RETURN
else:
newleft = oldleft + keyname
self.selected_blk.spr.set_label("%s%s%s" % (newleft, CURSOR, oldright))
def _process_keyboard_commands(self, keyname, block_flag=True):
""" Use the keyboard to move blocks and turtle """
mov_dict = {'KP_Up': [0, 20], 'j': [0, 20], 'Up': [0, 20],
'KP_Down': [0, -20], 'k': [0, -20], 'Down': [0, -20],
'KP_Left': [-20, 0], 'h': [-20, 0], 'Left': [-20, 0],
'KP_Right': [20, 0], 'l': [20, 0], 'Right': [20, 0],
'KP_Page_Down': [-1, -1], 'Page_Down': [-1, -1],
'KP_Page_Up': [-1, -1], 'Page_Up': [-1, -1],
'KP_End': [0, 0], 'End': [0, 0],
'KP_Home': [0, 0], 'Home': [0, 0], 'space': [0, 0],
'Return': [-1, -1], 'Esc': [-1, -1]}
if keyname not in mov_dict:
return True
if keyname in ['KP_End', 'End']:
self.run_button(0)
elif self.selected_spr is not None:
if not self.lc.running and block_flag:
blk = self.block_list.spr_to_block(self.selected_spr)
if keyname in ['Return', 'KP_Page_Up', 'Page_Up', 'Esc']:
(x, y) = blk.spr.get_xy()
self._click_block(x, y)
elif keyname in ['KP_Page_Down', 'Page_Down']:
if self.drag_group is None:
self.drag_group = find_group(blk)
self._put_in_trash(blk)
self.drag_group = None
elif keyname in ['KP_Home', 'Home', 'space']:
block = self.block_list.spr_to_block(self.selected_spr)
if block is None:
return True
block.unhighlight()
block = self.block_list.get_next_block_of_same_type(
block)
if block is not None:
self.selected_spr = block.spr
block.highlight()
else:
self._jog_block(blk, mov_dict[keyname][0],
mov_dict[keyname][1])
elif not block_flag:
self._jog_turtle(mov_dict[keyname][0], mov_dict[keyname][1])
return True
def _jog_turtle(self, dx, dy):
""" Jog turtle """
if dx == -1 and dy == -1:
self.canvas.xcor = 0
self.canvas.ycor = 0
else:
self.canvas.xcor += dx
self.canvas.ycor += dy
self.active_turtle = self.turtles.spr_to_turtle(self.selected_spr)
self.canvas.move_turtle()
self.display_coordinates()
self.selected_turtle = None
def _jog_block(self, blk, dx, dy):
""" Jog block """
if blk.type == 'proto':
return
if collapsed(blk):
return
self._disconnect(blk)
self.drag_group = find_group(blk)
for blk in self.drag_group:
(sx, sy) = blk.spr.get_xy()
if sx + dx < 0:
dx += -(sx + dx)
if sy + dy < 0:
dy += -(sy + dy)
for blk in self.drag_group:
(sx, sy) = blk.spr.get_xy()
blk.spr.move((sx + dx, sy - dy))
self._snap_to_dock()
self.drag_group = None
def _number_check(self):
""" Make sure a 'number' block contains a number. """
n = self.selected_blk.spr.labels[0].replace(CURSOR, '')
if n in ['-', '.', '-.', ',', '-,']:
n = 0
elif n is not None:
try:
f = float(n.replace(self.decimal_point, '.'))
if f > 1000000:
n = 1
self.showlabel("#overflowerror")
elif f < -1000000:
n = -1
self.showlabel("#overflowerror")
except ValueError:
n = 0
self.showlabel("#notanumber")
else:
n = 0
self.selected_blk.spr.set_label(n)
self.selected_blk.values[0] = n.replace(self.decimal_point, '.')
def _string_check(self):
s = self.selected_blk.spr.labels[0].replace(CURSOR, '')
self.selected_blk.spr.set_label(s)
self.selected_blk.values[0] = s.replace(RETURN, "\12")
def load_python_code(self):
""" Load Python code from a file """
fname, self.load_save_folder = get_load_name('.py',
self.load_save_folder)
if fname is None:
return
f = open(fname, 'r')
self.myblock = f.read()
f.close()
def _import_py(self):
""" Import Python code into a block """
if self.running_sugar:
self.activity.import_py()
else:
self.load_python_code()
self.set_userdefined()
def new_project(self):
""" Start a new project """
stop_logo(self)
self._loaded_project = ""
# Put current project in the trash.
while len(self.just_blocks()) > 0:
blk = self.just_blocks()[0]
top = find_top_block(blk)
self._put_in_trash(top)
self.canvas.clearscreen()
self.save_file_name = None
def is_new_project(self):
""" Is this a new project or was a old project loaded from a file? """
return self._loaded_project == ""
def project_has_changed(self):
""" WARNING: order of JSON serialized data may have changed. """
try:
f = open(self._loaded_project, 'r')
saved_project_data = f.read()
f.close()
except:
_logger.debug("problem loading saved project data from %s" % \
(self._loaded_project))
saved_project_data = ""
current_project_data = data_to_string(self.assemble_data_to_save())
return saved_project_data != current_project_data
def load_files(self, ta_file, create_new_project=True):
""" Load a project from a file """
if create_new_project:
self.new_project()
self._check_collapsibles(self.process_data(data_from_file(ta_file)))
self._loaded_prokect = ta_file
def load_file(self, create_new_project=True):
_file_name, self.load_save_folder = get_load_name('.ta',
self.load_save_folder)
if _file_name is None:
return
if _file_name[-3:] == '.ta':
_file_name = _file_name[0: -3]
self.load_files(_file_name + '.ta', create_new_project)
if create_new_project:
self.save_file_name = os.path.basename(_file_name)
if self.running_sugar:
self.activity.metadata['title'] = os.path.split(_file_name)[1]
def _found_a_turtle(self, blk):
""" Either [-1, 'turtle', ...] or [-1, ['turtle', key], ...] """
if blk[1] == 'turtle':
self.load_turtle(blk)
return True
elif type(blk[1]) == list and blk[1][0] == 'turtle':
self.load_turtle(blk, blk[1][1])
return True
elif type(blk[1]) == tuple:
_btype, _key = blk[1]
if _btype == 'turtle':
self.load_turtle(blk, _key)
return True
return False
def load_turtle(self, blk, key=1):
""" Restore a turtle from its saved state """
tid, name, xcor, ycor, heading, color, shade, pensize = blk
self.canvas.set_turtle(key)
self.canvas.setxy(xcor, ycor, pendown=False)
self.canvas.seth(heading)
self.canvas.setcolor(color)
self.canvas.setshade(shade)
self.canvas.setpensize(pensize)
def load_block(self, b, offset=0):
""" Restore individual blocks from saved state """
# A block is saved as: (i, (btype, value), x, y, (c0,... cn))
# The x, y position is saved/loaded for backward compatibility
btype, value = b[1], None
if type(btype) == tuple:
btype, value = btype
elif type(btype) == list:
btype, value = btype[0], btype[1]
if btype in CONTENT_BLOCKS or btype in COLLAPSIBLE:
if btype == 'number':
try:
values = [round_int(value)]
except ValueError:
values = [0]
elif btype in COLLAPSIBLE:
if value is not None:
values = [int(value)]
else:
values = []
else:
values = [value]
else:
values = []
if btype in OLD_DOCK:
check_dock = True
else:
check_dock = False
if btype in OLD_NAMES:
btype = OLD_NAMES[btype]
blk = Block(self.block_list, self.sprite_list, btype,
b[2] + self.canvas.cx + offset,
b[3] + self.canvas.cy + offset,
'block', values, self.block_scale)
# Some blocks get transformed.
if btype == 'string' and blk.spr is not None:
blk.spr.set_label(blk.values[0].replace('\n', RETURN))
elif btype == 'start': # block size is saved in start block
if value is not None:
self.block_scale = value
elif btype in EXPANDABLE or btype in EXPANDABLE_BLOCKS or \
btype in EXPANDABLE_ARGS or btype == 'nop':
if btype == 'vspace' or btype in EXPANDABLE_BLOCKS:
if value is not None:
blk.expand_in_y(value)
elif btype == 'hspace' or btype == 'identity2':
if value is not None:
blk.expand_in_x(value)
elif btype == 'templatelist' or btype == 'list':
for i in range(len(b[4])-4):
blk.add_arg()
elif btype == 'myfunc2arg' or btype == 'myfunc3arg' or\
btype == 'userdefined2args' or btype == 'userdefined3args':
blk.add_arg()
if btype == 'myfunc3arg' or btype == 'userdefined3args':
blk.add_arg(False)
if btype in PYTHON_SKIN:
if self.nop == 'pythonloaded':
self._block_skin('pythonon', blk)
else:
self._block_skin('pythonoff', blk)
elif btype in BOX_STYLE_MEDIA and blk.spr is not None:
if len(blk.values) == 0 or blk.values[0] == 'None' or \
blk.values[0] is None:
self._block_skin(btype + 'off', blk)
elif btype == 'audio' or btype == 'description':
self._block_skin(btype + 'on', blk)
elif self.running_sugar:
try:
dsobject = datastore.get(blk.values[0])
if not movie_media_type(dsobject.file_path[-4:]):
w, h, = calc_image_size(blk.spr)
pixbuf = get_pixbuf_from_journal(dsobject, w, h)
if pixbuf is not None:
x, y = self._calc_image_offset('', blk.spr)
blk.set_image(pixbuf, x, y)
else:
self._block_skin('journalon', blk)
dsobject.destroy()
except:
try:
w, h, = calc_image_size(blk.spr)
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(
blk.values[0], w, h)
x, y = self._calc_image_offset('', blk.spr)
blk.set_image(pixbuf, x, y)
except:
_logger.debug("Couldn't open dsobject (%s)" % \
(blk.values[0]))
self._block_skin('journaloff', blk)
else:
if not movie_media_type(blk.values[0][-4:]):
try:
w, h, = calc_image_size(blk.spr)
pixbuf = gtk.gdk.pixbuf_new_from_file_at_size(
blk.values[0], w, h)
x, y = self._calc_image_offset('', blk.spr)
blk.set_image(pixbuf, x, y)
except:
self._block_skin('journaloff', blk)
else:
self._block_skin('journalon', blk)
blk.spr.set_label(' ')
blk.resize()
if self.interactive_mode:
blk.spr.set_layer(BLOCK_LAYER)
if check_dock:
blk.connections = 'check'
return blk
def load_start(self, ta_file=None):
""" Start a new project with a 'start' brick """
if ta_file is None:
self.process_data([[0, "start", PALETTE_WIDTH + 20,
self.toolbar_offset + PALETTE_HEIGHT + 20,
[None, None]]])
else:
self.process_data(data_from_file(ta_file))
def save_file(self, _file_name=None):
""" Start a project to a file """
if self.save_folder is not None:
self.load_save_folder = self.save_folder
if _file_name is None:
_file_name, self.load_save_folder = get_save_name('.ta',
self.load_save_folder, self.save_file_name)
if _file_name is None:
return
if _file_name[-3:] == '.ta':
_file_name = _file_name[0: -3]
data_to_file(self.assemble_data_to_save(), _file_name + '.ta')
self.save_file_name = os.path.basename(_file_name)
if not self.running_sugar:
self.save_folder = self.load_save_folder
def assemble_data_to_save(self, save_turtle=True, save_project=True):
""" Pack the project (or stack) into a data stream to be serialized """
_data = []
_blks = []
if save_project:
_blks = self.just_blocks()
else:
if self.selected_blk is None:
return []
_blks = find_group(find_top_block(self.selected_blk))
for _i, _blk in enumerate(_blks):
_blk.id = _i
for _blk in _blks:
if _blk.name in CONTENT_BLOCKS or _blk.name in COLLAPSIBLE:
if len(_blk.values) > 0:
_name = (_blk.name, _blk.values[0])
else:
_name = (_blk.name)
elif _blk.name in EXPANDABLE or _blk.name in EXPANDABLE_BLOCKS or \
_blk.name in EXPANDABLE_ARGS:
_ex, _ey = _blk.get_expand_x_y()
if _ex > 0:
_name = (_blk.name, _ex)
elif _ey > 0:
_name = (_blk.name, _ey)
else:
_name = (_blk.name, 0)
elif _blk.name == 'start': # save block_size in start block
_name = (_blk.name, self.block_scale)
else:
_name = (_blk.name)
if hasattr(_blk, 'connections'):
connections = [get_id(_cblk) for _cblk in _blk.connections]
else:
connections = None
(_sx, _sy) = _blk.spr.get_xy()
# Add a slight offset for copy/paste
if not save_project:
_sx += 20
_sy += 20
_data.append((_blk.id, _name, _sx - self.canvas.cx,
_sy - self.canvas.cy, connections))
if save_turtle:
for _turtle in iter(self.turtles.dict):
self.canvas.set_turtle(_turtle)
_data.append((-1, ['turtle', _turtle],
self.canvas.xcor, self.canvas.ycor,
self.canvas.heading, self.canvas.color,
self.canvas.shade, self.canvas.pensize))
return _data
def display_coordinates(self):
""" Display the coordinates of the current turtle on the toolbar """
x = round_int(self.canvas.xcor / self.coord_scale)
y = round_int(self.canvas.ycor / self.coord_scale)
h = round_int(self.canvas.heading)
if self.running_sugar:
self.activity.coordinates_label.set_text("%s: %d %s: %d %s: %d" % \
(_("xcor"), x, _("ycor"), y, _("heading"), h))
self.activity.coordinates_label.show()
elif self.interactive_mode:
self.win.set_title("%s — %s: %d %s: %d %s: %d" % \
(_("Turtle Art"), _("xcor"), x, _("ycor"), y, _("heading"), h))
def showlabel(self, shp, label=''):
""" Display a message on a status block """
if not self.interactive_mode:
_logger.debug(label)
return
if shp == 'syntaxerror' and str(label) != '':
if str(label)[1:] in self.status_shapes:
shp = str(label)[1:]
label = ''
else:
shp = 'status'
elif shp[0] == '#':
shp = shp[1:]
label = ''
self.status_spr.set_shape(self.status_shapes[shp])
self.status_spr.set_label(str(label))
self.status_spr.set_layer(STATUS_LAYER)
if shp == 'info':
self.status_spr.move((PALETTE_WIDTH, self.height - 400))
else:
self.status_spr.move((PALETTE_WIDTH, self.height - 200))
def calc_position(self, template):
""" Relative placement of portfolio objects (depreciated) """
w, h, x, y, dx, dy = TEMPLATES[template]
x *= self.canvas.width
y *= self.canvas.height
w *= (self.canvas.width - x)
h *= (self.canvas.height - y)
dx *= w
dy *= h
return(w, h, x, y, dx, dy)
def save_for_upload(self, _file_name):
""" Grab the current canvas and save it for upload """
if _file_name[-3:] == '.ta':
_file_name = _file_name[0: -3]
data_to_file(self.assemble_data_to_save(), _file_name + '.ta')
save_picture(self.canvas, _file_name + '.png')
ta_file = _file_name + '.ta'
image_file = _file_name + '.png'
return ta_file, image_file
def save_as_image(self, name="", svg=False, pixbuf=None):
""" Grab the current canvas and save it. """
if not self.interactive_mode:
save_picture(self.canvas, name[:-3] + ".png")
return
"""
self.color_map = self.window.get_colormap()
new_pix = pixbuf.get_from_drawable(self.window, self.color_map,
0, 0, 0, 0,
self.width, self.height)
new_pix.save(name[:-3] + ".png", "png")
"""
if self.running_sugar:
if svg:
if len(name) == 0:
filename = "ta.svg"
else:
filename = name + ".svg"
else:
if len(name) == 0:
filename = "ta.png"
else:
filename = name + ".png"
datapath = get_path(self.activity, 'instance')
elif len(name) == 0:
name = "ta"
if self.save_folder is not None:
self.load_save_folder = self.save_folder
if svg:
filename, self.load_save_folder = get_save_name('.svg',
self.load_save_folder,
name)
else:
filename, self.load_save_folder = get_save_name('.png',
self.load_save_folder,
name)
datapath = self.load_save_folder
else:
datapath = os.getcwd()
if svg:
filename = name + ".svg"
else:
filename = name + ".png"
if filename is None:
return
file_path = os.path.join(datapath, filename)
if svg:
if self.svg_string == '':
return
save_svg(self.svg_string, file_path)
self.svg_string = ''
else:
save_picture(self.canvas, file_path)
# keep a log of the saved pictures for export to HTML
self.saved_pictures.append(file_path)
if self.running_sugar:
dsobject = datastore.create()
if len(name) == 0:
dsobject.metadata['title'] = "%s %s" % \
(self.activity.metadata['title'], _("image"))
else:
dsobject.metadata['title'] = name
dsobject.metadata['icon-color'] = profile.get_color().to_string()
if svg:
dsobject.metadata['mime_type'] = 'image/svg+xml'
else:
dsobject.metadata['mime_type'] = 'image/png'
dsobject.set_file_path(file_path)
datastore.write(dsobject)
dsobject.destroy()
def just_blocks(self):
""" Filter out 'proto', 'trash', and 'deleted' blocks """
just_blocks_list = []
for _blk in self.block_list.list:
if _blk.type == 'block':
just_blocks_list.append(_blk)
return just_blocks_list
def _width_and_height(self, blk):
""" What are the width and height of a stack? """
minx = 10000
miny = 10000
maxx = -10000
maxy = -10000
for gblk in find_group(blk):
(x, y) = gblk.spr.get_xy()
w, h = gblk.spr.get_dimensions()
if x < minx:
minx = x
if y < miny:
miny = y
if x + w > maxx:
maxx = x + w
if y + h > maxy:
maxy = y + h
return(maxx - minx, maxy - miny)
# Utilities related to putting a image 'skin' on a block
def _calc_image_offset(self, name, spr, iw=0, ih=0):
""" Calculate the postion for placing an image onto a sprite. """
_l, _t = spr.label_left_top()
if name == '':
return _l, _t
_w = spr.label_safe_width()
_h = spr.label_safe_height()
if iw == 0:
iw = self.media_shapes[name].get_width()
ih = self.media_shapes[name].get_height()
return int(_l + (_w - iw) / 2), int(_t + (_h - ih) / 2)
def _calc_w_h(self, name, spr):
""" Calculate new image size """
target_w = spr.label_safe_width()
target_h = spr.label_safe_height()
if name == '':
return target_w, target_h
image_w = self.media_shapes[name].get_width()
image_h = self.media_shapes[name].get_height()
scale_factor = float(target_w) / image_w
new_w = target_w
new_h = image_h * scale_factor
if new_h > target_h:
scale_factor = float(target_h) / new_h
new_h = target_h
new_w = target_w * scale_factor
return int(new_w), int(new_h)
def _proto_skin(self, name, n, i):
""" Utility for calculating proto skin images """
x, y = self._calc_image_offset(name, self.palettes[n][i].spr)
self.palettes[n][i].spr.set_image(self.media_shapes[name], 1, x, y)
def _block_skin(self, name, blk):
""" Some blocks get a skin """
x, y = self._calc_image_offset(name, blk.spr)
blk.set_image(self.media_shapes[name], x, y)
self._resize_skin(blk)
def _resize_skin(self, blk):
""" Resize the 'skin' when block scale changes. """
if blk.name in PYTHON_SKIN:
w, h = self._calc_w_h('pythonoff', blk.spr)
x, y = self._calc_image_offset('pythonoff', blk.spr, w, h)
elif blk.name == 'journal':
if len(blk.values) == 1 and blk.values[0] is not None:
w, h = self._calc_w_h('', blk.spr)
x, y = self._calc_image_offset('journaloff', blk.spr, w, h)
else:
w, h = self._calc_w_h('journaloff', blk.spr)
x, y = self._calc_image_offset('journaloff', blk.spr, w, h)
else:
w, h = self._calc_w_h('descriptionoff', blk.spr)
x, y = self._calc_image_offset('descriptionoff', blk.spr, w, h)
blk.scale_image(x, y, w, h)
| max630/turtleart-hacks | TurtleArt/tawindow.py | Python | mit | 103,114 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2017-05-23 13:30
from __future__ import unicode_literals
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [("jobs", "0032_sparkjobrun_ready_at")]
operations = [
migrations.RenameField(
model_name="sparkjobrun", old_name="scheduled_date", new_name="scheduled_at"
)
]
| mozilla/telemetry-analysis-service | atmo/jobs/migrations/0033_rename_scheduled_date.py | Python | mpl-2.0 | 405 |
import time
class Session(object):
time_limit = 20 * 60 # in seconds
student_id = None
can_invoke_api = False
def __init__(self, user, sess_id):
self.user = user
self.sess_id = sess_id
self.last_accessed = time.time()
def __get_timeout(self):
return self.last_accessed + self.time_limit
def expired(self):
return self.__get_timeout() < time.time()
timeout = property(__get_timeout)
def is_valid_session(sess_id, sessions):
s = find_session(sess_id, sessions)
if s is not None and not s.expired():
return True
return False
def find_session(sess_id, sessions: list) -> Session:
for s in sessions:
if s.sess_id == sess_id:
return s
return None | dvshka/focus-api | focus/session.py | Python | gpl-3.0 | 764 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`__init__.py` - Initialization
--------------------------------------
'''
from __future__ import division, print_function, absolute_import, unicode_literals
from .tess import *
#: The string that identifies individual targets for this mission
IDSTRING = 'TIC'
#: The character abbreviation of the name given to an observing "season" for this mission
SEASONCHAR = 'S'
#: The string representing the filter/band used in the mission
MAGSTRING = r'T'
#: The time units for the mission
TIMEUNITS = 'BJD'
#: The currently supported seasons
SEASONS = []
#: Returns :py:obj:`True` if argument is a valid `TESS` target identifier (necessary but not sufficient)
ISTARGET = lambda x: ((type(x) is int))
#: The published light curve CSV file header
CSVHEADER = \
'''TIC %07d
============
''' | rodluger/everest | everest/missions/tess/__init__.py | Python | mit | 842 |
# -*- coding: utf-8 -*-
from django.contrib.sites.models import Site
from django.conf import settings
def get_site_url(request, slash=False):
domain = Site.objects.get_current().domain
protocol = 'https' if request.is_secure() else 'http'
root = "%s://%s" % (protocol, domain)
if slash:
root += '/'
return root
def absolute(request):
urls = {
'ABSOLUTE_ROOT': request.build_absolute_uri('/')[:-1],
'ABSOLUTE_ROOT_URL': request.build_absolute_uri('/'),
}
if 'django.contrib.sites' in settings.INSTALLED_APPS:
urls['SITE_ROOT'] = get_site_url(request)
urls['SITE_ROOT_URL'] = get_site_url(request, True)
return urls | noirbizarre/django-absolute | absolute/context_processors.py | Python | lgpl-3.0 | 703 |
# Copyright (c) 2014 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public
# License as published by the Free Software Foundation; either version
# 2 of the License (GPLv2) or (at your option) any later version.
# There is NO WARRANTY for this software, express or implied,
# including the implied warranties of MERCHANTABILITY,
# NON-INFRINGEMENT, or FITNESS FOR A PARTICULAR PURPOSE. You should
# have received a copy of GPLv2 along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
import os
import socket
from pulp.common.config import Config, REQUIRED, ANY, NUMBER, BOOL, OPTIONAL
DEFAULT = {
'server': {
'host': socket.gethostname(),
'port': '443',
'api_prefix': '/pulp/api',
'rsa_pub': '/etc/pki/pulp/consumer/server/rsa_pub.key',
'verify_ssl': 'true',
'ca_path': '/etc/pki/tls/certs/ca-bundle.crt',
},
'authentication': {
'rsa_key': '/etc/pki/pulp/consumer/rsa.key',
'rsa_pub': '/etc/pki/pulp/consumer/rsa_pub.key'
},
'client': {
'role': 'consumer'
},
'filesystem': {
'extensions_dir': '/usr/lib/pulp/consumer/extensions',
'repo_file': '/etc/yum.repos.d/pulp.repo',
'mirror_list_dir': '/etc/yum.repos.d',
'gpg_keys_dir': '/etc/pki/pulp-gpg-keys',
'cert_dir': '/etc/pki/pulp/client/repo',
'id_cert_dir': '/etc/pki/pulp/consumer/',
'id_cert_filename': 'consumer-cert.pem',
},
'reboot': {
'permit': 'false',
'delay': '3',
},
'output': {
'poll_frequency_in_seconds': '1',
'enable_color': 'true',
'wrap_to_terminal': 'false',
'wrap_width': '80',
},
'messaging': {
'scheme': 'amqp',
'host': None,
'port': '5672',
'transport': 'qpid',
'cacert': None,
'clientcert': None,
},
'profile': {
'minutes': '240',
}
}
SCHEMA = (
('server', REQUIRED,
(
('host', REQUIRED, ANY),
('port', REQUIRED, NUMBER),
('api_prefix', REQUIRED, ANY),
('verify_ssl', REQUIRED, BOOL),
('ca_path', REQUIRED, ANY),
('rsa_pub', REQUIRED, ANY),
)
),
('authentication', REQUIRED,
(
('rsa_key', REQUIRED, ANY),
('rsa_pub', REQUIRED, ANY),
)
),
('client', REQUIRED,
(
('role', REQUIRED, r'consumer'),
)
),
('filesystem', REQUIRED,
(
('extensions_dir', REQUIRED, ANY),
('repo_file', REQUIRED, ANY),
('mirror_list_dir', REQUIRED, ANY),
('gpg_keys_dir', REQUIRED, ANY),
('cert_dir', REQUIRED, ANY),
('id_cert_dir', REQUIRED, ANY),
('id_cert_filename', REQUIRED, ANY),
)
),
('reboot', REQUIRED,
(
('permit', REQUIRED, BOOL),
('delay', REQUIRED, NUMBER),
)
),
('output', REQUIRED,
(
('poll_frequency_in_seconds', REQUIRED, NUMBER),
('enable_color', REQUIRED, BOOL),
('wrap_to_terminal', REQUIRED, BOOL),
('wrap_width', REQUIRED, NUMBER)
)
),
('messaging', REQUIRED,
(
('scheme', REQUIRED, r'(tcp|ssl|amqp|amqps)'),
('host', OPTIONAL, ANY),
('port', REQUIRED, NUMBER),
('transport', REQUIRED, ANY),
('cacert', OPTIONAL, ANY),
('clientcert', OPTIONAL, ANY)
)
),
('profile', REQUIRED,
(
('minutes', REQUIRED, NUMBER),
)
),
)
def read_config(paths=None, validate=True):
"""
Read and validate the consumer configuration.
:param validate: Validate the configuration.
:param validate: bool
:param paths: A list of paths to configuration files to read.
Reads the standard locations when not specified.
:param paths: list
:return: A configuration object.
:rtype: Config
"""
if not paths:
paths = ['/etc/pulp/consumer/consumer.conf']
conf_d_dir = '/etc/pulp/consumer/conf.d'
paths += [os.path.join(conf_d_dir, i) for i in sorted(os.listdir(conf_d_dir))]
overrides = os.path.expanduser('~/.pulp/consumer.conf')
if os.path.exists(overrides):
paths.append(overrides)
config = Config(DEFAULT)
config.update(Config(*paths))
if validate:
config.validate(SCHEMA)
return config
| mhrivnak/pulp | client_consumer/pulp/client/consumer/config.py | Python | gpl-2.0 | 4,554 |
# coding=utf-8
"""
The MEA method consist of the two followings steps:
-Estimate the value of the row and col systematic errors, independently for every plate of the assay, by solving the
system of linear equations/
-Adjust the measurement of all compounds located in rows and col of the plate affected by the systematic error using
the error estimates determined in previous step.
"""
import numpy as np
from scipy import stats
import logging
log = logging.getLogger(__name__)
__author__ = "Arnaud KOPP"
__copyright__ = "© 2014-2017 KOPP Arnaud All Rights Reserved"
__credits__ = ["KOPP Arnaud"]
__license__ = "GPLv3"
__maintainer__ = "Arnaud KOPP"
__email__ = "kopp.arnaud@gmail.com"
def matrix_error_amendmend(input_array, verbose=False, alpha=0.05, skip_col=[], skip_row=[]):
"""
Implementation of Matrix Error Amendment , published in 'Two effective methods for correcting experimental
HTS data ' Dragiev, et al 2012
:param alpha: alpha for TTest
:param verbose: print or not result
:param input_array: numpy ndarray represent data
:param skip_col: index of col to skip
:param skip_row: index of row to skip
:return: normalized array
"""
assert isinstance(input_array, np.ndarray)
array_org = input_array.copy()
# # count number of row and col affected by systematic error
shape = input_array.shape
nrows = []
ncols = []
# search systematic error in row
for row in range(shape[0]):
t, prob = stats.ttest_ind(input_array[row, :].flatten(), np.delete(input_array, row, 0).flatten(),
equal_var=False)
if prob < alpha:
nrows.append(row)
# search systematic error in column
for col in range(shape[1]):
t, prob = stats.ttest_ind(input_array[:, col].flatten(), np.delete(input_array, col, 1).flatten(),
equal_var=False)
if prob < alpha:
ncols.append(col)
nrows = [x for x in nrows if x not in skip_row]
ncols = [x for x in ncols if x not in skip_col]
# exit if not row or col affected
n = nrows.__len__() + ncols.__len__()
if n == 0:
log.info('MEA : No Systematics Error detected')
return input_array
mu = 0
# # compute mu
for row in range(shape[0]):
if row not in nrows:
for col in range(shape[1]):
if col not in ncols:
mu += input_array[row][col]
mu /= ((shape[0] - nrows.__len__()) * (shape[1] - ncols.__len__()))
# exact solution
x = np.zeros(n)
a = np.zeros([n, n])
b = np.zeros(n)
for i in range(nrows.__len__()):
r = nrows[i]
a[i][i] = shape[1]
for j in range(nrows.__len__(), n, 1):
a[i, j] = 1.0
b[i] = -shape[1] * mu
for k in range(0, shape[1], 1):
b[i] += input_array[r][k]
for i in range(nrows.__len__(), n, 1):
c = ncols[i - nrows.__len__()]
a[i][i] = shape[0]
for j in range(0, nrows.__len__(), 1):
a[i][j] = 1.0
b[i] = -shape[0] * mu
for k in range(0, shape[0], 1):
b[i] += input_array[k][c]
a = np.linalg.inv(a)
# x = Inv(a) * b, x is the estimated row and column error
for i in range(n):
x[i] = 0.0
for j in range(n):
x[i] += a[i][j] * b[j]
# Remove the systematic error form the plate measure
for i in range(nrows.__len__()):
r = nrows[i]
for j in range(shape[1]):
input_array[r][j] -= x[i]
for i in range(nrows.__len__(), n, 1):
c = ncols[i - nrows.__len__()]
for j in range(nrows.__len__()):
input_array[j][c] -= x[i]
if verbose:
print("MEA methods for removing systematics error")
print(u'\u03B1'" for T-Test : ", alpha)
print("-----Normalized Table-------")
print(input_array)
print("-----Original Table-------")
print(array_org)
print("")
return input_array
| ArnaudKOPP/TransCellAssay | TransCellAssay/Stat/Normalization/MatrixErrorAmendment.py | Python | gpl-3.0 | 4,043 |
# BlenderBIM Add-on - OpenBIM Blender Add-on
# Copyright (C) 2021 Dion Moult <dion@thinkmoult.com>
#
# This file is part of BlenderBIM Add-on.
#
# BlenderBIM Add-on is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# BlenderBIM Add-on is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with BlenderBIM Add-on. If not, see <http://www.gnu.org/licenses/>.
import bpy
import ifcopenshell
import blenderbim.core.tool
import blenderbim.tool as tool
from test.bim.bootstrap import NewFile
from blenderbim.tool.root import Root as subject
class TestImplementsTool(NewFile):
def test_run(self):
assert isinstance(subject(), blenderbim.core.tool.Root)
class TestAddDynamicOpeningVoids(NewFile):
def test_run(self):
ifc = ifcopenshell.file()
tool.Ifc.set(ifc)
obj = bpy.data.objects.new("Object", bpy.data.meshes.new("Mesh"))
element = ifc.createIfcOpeningElement()
tool.Ifc.link(element, obj)
wall_obj = bpy.data.objects.new("Object", bpy.data.meshes.new("Mesh"))
wall_element = ifc.createIfcOpeningElement()
tool.Ifc.link(wall_element, wall_obj)
ifcopenshell.api.run("void.add_opening", ifc, opening=element, element=wall_element)
subject.add_dynamic_opening_voids(element, obj)
modifier = wall_obj.modifiers[0]
assert modifier.type == "BOOLEAN"
assert modifier.name == "IfcOpeningElement"
assert modifier.operation == "DIFFERENCE"
assert modifier.object == obj
assert modifier.solver == "EXACT"
assert modifier.use_self is True
class TestDoesTypeHaveRepresentations(NewFile):
def test_run(self):
ifc = ifcopenshell.file()
element = ifc.createIfcWallType()
assert subject.does_type_have_representations(element) is False
element.RepresentationMaps = [ifc.createIfcRepresentationMap()]
assert subject.does_type_have_representations(element) is True
class TestGetElementType(NewFile):
def test_run(self):
bpy.ops.bim.create_project()
ifc = tool.Ifc.get()
element = ifc.createIfcWall()
type = ifc.createIfcWallType()
ifcopenshell.api.run("type.assign_type", ifc, related_object=element, relating_type=type)
assert subject.get_element_type(element) == type
class TestGetObjectName(NewFile):
def test_run(self):
obj = bpy.data.objects.new("Object", None)
assert subject.get_object_name(obj) == "Object"
def test_blender_number_suffixes_are_ignored(self):
obj = bpy.data.objects.new("Object.001", None)
assert subject.get_object_name(obj) == "Object"
obj = bpy.data.objects.new("Object.foo.123", None)
assert subject.get_object_name(obj) == "Object.foo"
class TestGetObjectRepresentation(NewFile):
def test_run(self):
ifc = ifcopenshell.file()
tool.Ifc.set(ifc)
representation = ifc.createIfcShapeRepresentation()
obj = bpy.data.objects.new("Object", bpy.data.meshes.new("Mesh"))
obj.data.BIMMeshProperties.ifc_definition_id = representation.id()
assert subject.get_object_representation(obj) == representation
class TestGetRepresentationContext(NewFile):
def test_run(self):
ifc = ifcopenshell.file()
tool.Ifc.set(ifc)
context = ifc.createIfcGeometricRepresentationContext()
representation = ifc.createIfcShapeRepresentation(ContextOfItems=context)
assert subject.get_representation_context(representation) == context
class TestIsOpeningElement(NewFile):
def test_run(self):
ifc = ifcopenshell.file()
assert subject.is_opening_element(ifc.createIfcWall()) is False
assert subject.is_opening_element(ifc.createIfcOpeningElement()) is True
class TestLinkObjectData(NewFile):
def test_run(self):
data = bpy.data.meshes.new("Mesh")
source = bpy.data.objects.new("Object", data)
destination = bpy.data.objects.new("Object", bpy.data.meshes.new("Mesh"))
subject.link_object_data(source, destination)
assert source.data == data
assert source.data == destination.data
class TestRunGeometryAddRepresntation(NewFile):
def test_nothing(self):
pass
class TestSetElementSpecificDisplaySettings(NewFile):
def test_opening_elements_display_as_wire(self):
ifc = ifcopenshell.file()
obj = bpy.data.objects.new("Object", bpy.data.meshes.new("Mesh"))
element = ifc.createIfcOpeningElement()
subject.set_element_specific_display_settings(obj, element)
assert obj.display_type == "WIRE"
class TestSetObjectName(NewFile):
def test_run(self):
ifc = ifcopenshell.file()
obj = bpy.data.objects.new("Object", bpy.data.meshes.new("Mesh"))
element = ifc.createIfcWall()
subject.set_object_name(obj, element)
assert obj.name == "IfcWall/Object"
def test_existing_ifc_prefixes_are_not_repeated(self):
ifc = ifcopenshell.file()
obj = bpy.data.objects.new("IfcSlab/Object", bpy.data.meshes.new("Mesh"))
element = ifc.createIfcWall()
subject.set_object_name(obj, element)
assert obj.name == "IfcWall/Object"
| IfcOpenShell/IfcOpenShell | src/blenderbim/test/tool/test_root.py | Python | lgpl-3.0 | 5,658 |
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django.core.context_processors import csrf
from django.views.generic.base import View, TemplateView
from django.views import generic
from django.shortcuts import render_to_response
from django.template import RequestContext, loader, Context
from django.utils import simplejson
from settings import CLARUS_DBCONN_STRING
import psycopg2, sys, pprint, json
from datetime import datetime
from django.views.decorators.csrf import ensure_csrf_cookie
import logging
from array import *
# logger = logging.getLogger('print')
class LoadCanvas(View):
template_name= "index.html"
def get(self, request, *args, **kwargs):
c = {}
c.update(csrf(request))
return render_to_response(self.template_name, c)
class FetchObservations(View):
# template_name = "timeline/timelines.html"
obsType = array('i'); # 575
startTime = '' ;
stationID ='';
#@route ('/observe', method='POST')
def post(self, request, *args, **kwargs):
try:
json_data = simplejson.loads(request.body)
print 'Raw Data: "%s"' % request.body
# print json_data
self.startTime = str(json_data['startTime'])
# self.startTime = '2013-07-09 00:00:00';
# print self.startTime
self.stationID = json_data['stationID']
stationList = ",".join([str(x) for x in self.stationID])
# print stationList
self.obsType = json_data['obsType']
# print self.obsType
conn_string = CLARUS_DBCONN_STRING # get a connection, if a connect cannot be made an exception will be raised here
conn = psycopg2.connect(conn_string) # conn.cursor will return a cursor object, you can use this cursor to perform queries
cursor = conn.cursor()
r = [];
# print self.obsType[0]
# print self.obsType[1]
for obs in self.obsType:
# execute our Query
data = {}
obsStr = str(obs)
cursor.execute("SELECT "+
"TO_CHAR( (date_trunc('hour', tstamp) + INTERVAL '15 min' * ROUND(date_part('minute', tstamp) / 15.0)), 'YYYY-MM-DD HH24:MI' ) AS tstamp, "+
"AVG( metric_value ) AS metric_value " +
"FROM clarus.observation, clarus.sensor "+
"WHERE clarus.observation.sensor_id=clarus.sensor.sensor_id "+
"AND station_id IN (" + stationList + ") AND observation_type = " + obsStr + " "+
"AND tstamp >= (timestamp '"+self.startTime+"' - INTERVAL '1 week') AND tstamp < timestamp '"+self.startTime+"' " +
"GROUP BY date_trunc('hour', tstamp) + INTERVAL '15 min' * ROUND(date_part('minute', tstamp) / 15.0) "+
"ORDER BY tstamp asc" );
data['rows'] = [dict((cursor.description[i][0], value)
for i, value in enumerate(row)) for row in cursor.fetchall()]
# this query is no longer needed as the metadata is all loaded separately
#cursor.execute("SELECT name, description, metric_abbreviation "+
# "FROM clarus.observation_type_lkp "+
# "WHERE clarus.observation_type_lkp.observation_type= "+ obs +"");
#data['title'] = ([dict((cursor.description[i][0], value)
# for i, value in enumerate(row)) for row in cursor.fetchall()])
data['title'] = obs
r.append(data);
cursor.connection.close();
# now process it
json_output = simplejson.dumps(r)
return HttpResponse(json_output, content_type="application/json")
except:
return HttpResponse("<h1>Error in running query</h1>")
# logger.error('Getting observation data failed')
| awalin/rwis | views.py | Python | lgpl-3.0 | 4,395 |
import unittest
from katas.kyu_8.multiply import multiply
class MultiplyTestCase(unittest.TestCase):
def test_equals(self):
self.assertEqual(multiply(3, 4), 12)
def test_equals_2(self):
self.assertEqual(multiply(10, 10), 100)
| the-zebulan/CodeWars | tests/kyu_8_tests/test_multiply.py | Python | mit | 254 |
# ###
# Copyright (c) 2010 Konstantinos Spyropoulos <inigo.aldana@gmail.com>
#
# This file is part of inimailbot
#
# inimailbot is free software: you can redistribute it and/or modify it under the terms of the
# GNU General Public License as published by the Free Software Foundation, either version 3 of
# the License, or (at your option) any later version.
#
# inimailbot is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
# without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with inimailbot.
# If not, see http://www.gnu.org/licenses/.
# #####
import os, logging, re
from google.appengine.ext import webapp
from google.appengine.ext.webapp.util import run_wsgi_app
from receive_ankicrashes import Bug
class ScanIssues(webapp.RequestHandler):
def get(self):
bugs_query = Bug.all()
bugs_query.filter('linked =', False)
bugs = []
bugs = bugs_query.fetch(1000)
for bg in bugs:
issues = bg.findIssue()
if issues:
bg.issueName = issues[0]['id']
logging.info("ScanIssues: Autolinking bug " + str(bg.key().id()) + " to issue " + str(bg.issueName))
bg.put()
class UpdateStatusesPriorities(webapp.RequestHandler):
def get(self):
bugs_query = Bug.all()
#bugs_query.filter('issueName !=', None)
bugs_query.filter('linked =', True)
bugs = []
bugs = bugs_query.fetch(1000)
logging.debug("Cron job updater, found " + str(bugs_query.count(1000000)) + " bugs")
for bg in bugs:
if bg.updateStatusPriority():
logging.debug("Updated status and/or priority for bug: '" + str(bg.key().id()) + "'")
bg.put()
application = webapp.WSGIApplication(
[(r'^/ankidroid_triage/cron_updater/status_priority?.*', UpdateStatusesPriorities),
(r'^/ankidroid_triage/cron_updater/issue_scanner?.*', ScanIssues)],
debug=True)
def main():
run_wsgi_app(application)
if __name__ == "__main__":
main()
| edu-zamora/inimailbot | cron_updater.py | Python | gpl-3.0 | 2,032 |
__author__ = 'MegabytePhreak'
import argparse
from .systemrdl.preprocessor import preprocess_mode, preprocess
from .systemrdl.rdl_parser import RdlParser
import re
from .config import Config
import colorize
from logger import logger
preprocessor_mode_map = {
'auto': preprocess_mode.AUTO,
'none': preprocess_mode.NONE,
'perlpp': preprocess_mode.PERL_ONLY,
'verilog': preprocess_mode.VERILOG_ONLY,
'full': preprocess_mode.BOTH
}
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('inputs', nargs='+', action='store',
help=
"""Files to process. The contents of the files will be concatenated before any
preprocessing.""")
compile_opts = parser.add_argument_group('Compiler Options')
compile_opts.add_argument('-E', '--preprocess-only', action='store_true', dest='preprocess_only',
help="Only preprocess the input files, do not compile them and produce outputs")
compile_opts.add_argument('-o', '--output', action='store', dest='output',
help="Output filename or prefix")
compile_opts.add_argument('-p', '--preprocessor', choices=preprocessor_mode_map, action='store', default='auto',
help="Type of preprocessing to perform, default='auto'")
compile_opts.add_argument('-D', '--debug', action='store_true', dest='debug',
help="Print parser debugging info")
compile_opts.add_argument('--no-color', action='store_false', dest='colorize',
help="Disable colorized error messages")
conf = parser.add_argument_group('Configuration Options')
conf.add_argument('--no-system-config', action='store_false', dest='load_system_config',
help="Don't load the system-level configuration file: '%s'" % Config.SYSTEM_CONFIG_PATH)
conf.add_argument('--no-user-config', action='store_false', dest='load_user_config',
help="Don't load the user-level configuration file: '%s'" % Config.USER_CONFIG_PATH)
conf.add_argument('-C', '--config', action='append', dest='config_files', type=argparse.FileType('rb'),
metavar='CONFIG_FILE', default=[],
help="Load extra configuration file")
conf.add_argument('-O', '--override', action='append', dest='overrides', type=parse_override,
metavar='SECTION.OPTION=VALUE', default=[],
help="Override loaded configuration settings with specified value")
return parser.parse_args()
def parse_override(arg):
rx = r'([^.]+)\.([^.]+)=(.+)'
res = re.match(rx, arg)
if res is None:
raise argparse.ArgumentTypeError('Expected configuration override')
return res.groups()
def main():
Config.create()
args = parse_args()
colorize.init(args.colorize, logger.get_dest())
if args.load_system_config:
Config.cfg().read(Config.SYSTEM_CONFIG_PATH)
if args.load_user_config:
Config.cfg().read(Config.USER_CONFIG_PATH)
for f in args.config_files:
Config.cfg().readfp(f, f.name)
for override in args.overrides:
Config.cfg().set(override[0], override[1], override[2])
contents = preprocess(args.inputs, preprocessor_mode_map[args.preprocessor])
if args.preprocess_only:
if args.output is None:
print contents
else:
open(args.output, 'wb').write(contents)
exit(0)
p = RdlParser()
p.debug = args.debug
ast = p.parse(contents)
for node in ast:
print node
if __name__ == '__main__':
main() | MegabytePhreak/rdl | rdlcompiler/__init__.py | Python | mit | 3,729 |
# Copyright 2016 Casey Jaymes
# This file is part of PySCAP.
#
# PySCAP is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# PySCAP is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with PySCAP. If not, see <http://www.gnu.org/licenses/>.
import logging
from scap.model.xhtml import *
from scap.model.xhtml.BlockType import BlockType
logger = logging.getLogger(__name__)
class BodyTag(BlockType):
MODEL_MAP = {
'attributes': {
'onload': {'type': 'ScriptType'},
'onunload': {'type': 'ScriptType'},
}
}
MODEL_MAP['attributes'].update(ATTRIBUTE_GROUP_attrs)
| cjaymes/pyscap | src/scap/model/xhtml/BodyTag.py | Python | gpl-3.0 | 1,051 |
import tkinter as tk
from tkinter import ttk
import sys
class GUI(object):
def __init__(self, master, cmd):
object.__init__(self)
self.master = master
self.cmd = cmd
# Create elements
self.frame = ttk.Frame(self.master)
self.button = ttk.Button(self.frame, text='Test', command=self.cmd.button)
# If you need to pass arguments to the function, use lambda
# self.button = ttk.Button(self.frame, text='Test', command=lambda: self.cmd.nothing(5))
# Pack elements
self.frame.pack()
self.button.pack()
class Commands(object):
def __init__(self):
object.__init__(self)
@staticmethod
def button():
print('Button command')
@staticmethod
def nothing(number):
print('What? Nothing! Or maybe {}!'.format(number))
def main():
root = tk.Tk()
gui = GUI(root, Commands())
root.mainloop()
if __name__ == '__main__':
sys.exit(main())
| oystein-hr/tkinter-gui-commands | class_example.py | Python | mit | 982 |
from django import forms
from wagtail.admin.widgets import AdminDateTimeInput
from wagtail.documents.forms import BaseDocumentForm
from wagtail.images.forms import BaseImageForm
class OverriddenWidget(forms.Widget):
pass
class AlternateImageForm(BaseImageForm):
form_only_field = forms.DateTimeField()
class Meta:
widgets = {
**BaseImageForm.Meta.widgets,
"tags": OverriddenWidget,
"file": OverriddenWidget,
"form_only_field": AdminDateTimeInput
}
class AlternateDocumentForm(BaseDocumentForm):
form_only_field = forms.DateTimeField()
class Meta:
widgets = {
"tags": OverriddenWidget,
"file": OverriddenWidget,
"form_only_field": AdminDateTimeInput
}
| zerolab/wagtail | wagtail/tests/testapp/media_forms.py | Python | bsd-3-clause | 796 |
# Copyright (C) 2008, Benjamin Berg <benjamin@sipsolutions.net>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import logging
import gobject
import gtk
from jarabe.model import shell
_RAISE_DELAY = 250
class TabbingHandler(object):
def __init__(self, frame, modifier):
self._frame = frame
self._tabbing = False
self._modifier = modifier
self._timeout = None
def _start_tabbing(self):
if not self._tabbing:
logging.debug('Grabing the input.')
screen = gtk.gdk.screen_get_default()
window = screen.get_root_window()
keyboard_grab_result = gtk.gdk.keyboard_grab(window)
pointer_grab_result = gtk.gdk.pointer_grab(window)
self._tabbing = (keyboard_grab_result == gtk.gdk.GRAB_SUCCESS and
pointer_grab_result == gtk.gdk.GRAB_SUCCESS)
# Now test that the modifier is still active to prevent race
# conditions. We also test if one of the grabs failed.
mask = window.get_pointer()[2]
if not self._tabbing or not (mask & self._modifier):
logging.debug('Releasing grabs again.')
# ungrab keyboard/pointer if the grab was successfull.
if keyboard_grab_result == gtk.gdk.GRAB_SUCCESS:
gtk.gdk.keyboard_ungrab()
if pointer_grab_result == gtk.gdk.GRAB_SUCCESS:
gtk.gdk.pointer_ungrab()
self._tabbing = False
else:
self._frame.show(self._frame.MODE_NON_INTERACTIVE)
def __timeout_cb(self, event_time):
self._activate_current(event_time)
self._timeout = None
return False
def _start_timeout(self, event_time):
self._cancel_timeout()
self._timeout = gobject.timeout_add(_RAISE_DELAY,
lambda: self.__timeout_cb(event_time))
def _cancel_timeout(self):
if self._timeout:
gobject.source_remove(self._timeout)
self._timeout = None
def _activate_current(self, event_time):
home_model = shell.get_model()
activity = home_model.get_tabbing_activity()
if activity and activity.get_window():
activity.get_window().activate(event_time)
def next_activity(self, event_time):
if not self._tabbing:
first_switch = True
self._start_tabbing()
else:
first_switch = False
if self._tabbing:
shell_model = shell.get_model()
zoom_level = shell_model.zoom_level
zoom_activity = (zoom_level == shell.ShellModel.ZOOM_ACTIVITY)
if not zoom_activity and first_switch:
activity = shell_model.get_active_activity()
else:
activity = shell_model.get_tabbing_activity()
activity = shell_model.get_next_activity(current=activity)
shell_model.set_tabbing_activity(activity)
self._start_timeout(event_time)
else:
self._activate_next_activity(event_time)
def previous_activity(self, event_time):
if not self._tabbing:
first_switch = True
self._start_tabbing()
else:
first_switch = False
if self._tabbing:
shell_model = shell.get_model()
zoom_level = shell_model.zoom_level
zoom_activity = (zoom_level == shell.ShellModel.ZOOM_ACTIVITY)
if not zoom_activity and first_switch:
activity = shell_model.get_active_activity()
else:
activity = shell_model.get_tabbing_activity()
activity = shell_model.get_previous_activity(current=activity)
shell_model.set_tabbing_activity(activity)
self._start_timeout(event_time)
else:
self._activate_next_activity(event_time)
def _activate_next_activity(self, event_time):
next_activity = shell.get_model().get_next_activity()
if next_activity:
next_activity.get_window().activate(event_time)
def stop(self, event_time):
gtk.gdk.keyboard_ungrab()
gtk.gdk.pointer_ungrab()
self._tabbing = False
self._frame.hide()
self._cancel_timeout()
self._activate_current(event_time)
home_model = shell.get_model()
home_model.set_tabbing_activity(None)
def is_tabbing(self):
return self._tabbing
| nemesiscodex/JukyOS-sugar | src/jarabe/view/tabbinghandler.py | Python | gpl-2.0 | 5,177 |
# Copyright (c) 2022, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import unittest
import frappe
from frappe.utils import add_days, today
from erpnext.accounts.doctype.cost_center.test_cost_center import create_cost_center
from erpnext.accounts.doctype.cost_center_allocation.cost_center_allocation import (
InvalidChildCostCenter,
InvalidDateError,
InvalidMainCostCenter,
MainCostCenterCantBeChild,
WrongPercentageAllocation,
)
from erpnext.accounts.doctype.journal_entry.test_journal_entry import make_journal_entry
class TestCostCenterAllocation(unittest.TestCase):
def setUp(self):
cost_centers = ["Main Cost Center 1", "Main Cost Center 2", "Sub Cost Center 1", "Sub Cost Center 2"]
for cc in cost_centers:
create_cost_center(cost_center_name=cc, company="_Test Company")
def test_gle_based_on_cost_center_allocation(self):
cca = create_cost_center_allocation("_Test Company", "Main Cost Center 1 - _TC",
{
"Sub Cost Center 1 - _TC": 60,
"Sub Cost Center 2 - _TC": 40
}
)
jv = make_journal_entry("_Test Cash - _TC", "Sales - _TC", 100,
cost_center = "Main Cost Center 1 - _TC", submit=True)
expected_values = [
["Sub Cost Center 1 - _TC", 0.0, 60],
["Sub Cost Center 2 - _TC", 0.0, 40]
]
gle = frappe.qb.DocType("GL Entry")
gl_entries = (
frappe.qb.from_(gle)
.select(gle.cost_center, gle.debit, gle.credit)
.where(gle.voucher_type == 'Journal Entry')
.where(gle.voucher_no == jv.name)
.where(gle.account == 'Sales - _TC')
.orderby(gle.cost_center)
).run(as_dict=1)
self.assertTrue(gl_entries)
for i, gle in enumerate(gl_entries):
self.assertEqual(expected_values[i][0], gle.cost_center)
self.assertEqual(expected_values[i][1], gle.debit)
self.assertEqual(expected_values[i][2], gle.credit)
cca.cancel()
jv.cancel()
def test_main_cost_center_cant_be_child(self):
# Main cost center itself cannot be entered in child table
cca = create_cost_center_allocation("_Test Company", "Main Cost Center 1 - _TC",
{
"Sub Cost Center 1 - _TC": 60,
"Main Cost Center 1 - _TC": 40
}, save=False
)
self.assertRaises(MainCostCenterCantBeChild, cca.save)
def test_invalid_main_cost_center(self):
# If main cost center is used for allocation under any other cost center,
# allocation cannot be done against it
cca1 = create_cost_center_allocation("_Test Company", "Main Cost Center 1 - _TC",
{
"Sub Cost Center 1 - _TC": 60,
"Sub Cost Center 2 - _TC": 40
}
)
cca2 = create_cost_center_allocation("_Test Company", "Sub Cost Center 1 - _TC",
{
"Sub Cost Center 2 - _TC": 100
}, save=False
)
self.assertRaises(InvalidMainCostCenter, cca2.save)
cca1.cancel()
def test_if_child_cost_center_has_any_allocation_record(self):
# Check if any child cost center is used as main cost center in any other existing allocation
cca1 = create_cost_center_allocation("_Test Company", "Main Cost Center 1 - _TC",
{
"Sub Cost Center 1 - _TC": 60,
"Sub Cost Center 2 - _TC": 40
}
)
cca2 = create_cost_center_allocation("_Test Company", "Main Cost Center 2 - _TC",
{
"Main Cost Center 1 - _TC": 60,
"Sub Cost Center 1 - _TC": 40
}, save=False
)
self.assertRaises(InvalidChildCostCenter, cca2.save)
cca1.cancel()
def test_total_percentage(self):
cca = create_cost_center_allocation("_Test Company", "Main Cost Center 1 - _TC",
{
"Sub Cost Center 1 - _TC": 40,
"Sub Cost Center 2 - _TC": 40
}, save=False
)
self.assertRaises(WrongPercentageAllocation, cca.save)
def test_valid_from_based_on_existing_gle(self):
# GLE posted against Sub Cost Center 1 on today
jv = make_journal_entry("_Test Cash - _TC", "Sales - _TC", 100,
cost_center = "Main Cost Center 1 - _TC", posting_date=today(), submit=True)
# try to set valid from as yesterday
cca = create_cost_center_allocation("_Test Company", "Main Cost Center 1 - _TC",
{
"Sub Cost Center 1 - _TC": 60,
"Sub Cost Center 2 - _TC": 40
}, valid_from=add_days(today(), -1), save=False
)
self.assertRaises(InvalidDateError, cca.save)
jv.cancel()
def create_cost_center_allocation(company, main_cost_center, allocation_percentages,
valid_from=None, valid_upto=None, save=True, submit=True):
doc = frappe.new_doc("Cost Center Allocation")
doc.main_cost_center = main_cost_center
doc.company = company
doc.valid_from = valid_from or today()
doc.valid_upto = valid_upto
for cc, percentage in allocation_percentages.items():
doc.append("allocation_percentages", {
"cost_center": cc,
"percentage": percentage
})
if save:
doc.save()
if submit:
doc.submit()
return doc | almeidapaulopt/erpnext | erpnext/accounts/doctype/cost_center_allocation/test_cost_center_allocation.py | Python | gpl-3.0 | 4,690 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-12-02 05:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='MCAnswer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('correct', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='MCChoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=100)),
('correct', models.BooleanField(default=False)),
],
),
migrations.CreateModel(
name='MCQuestion',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200)),
('point_value', models.IntegerField(default=1)),
],
),
migrations.AddField(
model_name='mcanswer',
name='question_answer',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='multichoice.MCChoice'),
),
]
| mtzirkel/leakyskiff | quiz/multichoice/migrations/0001_initial.py | Python | mit | 1,525 |
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import datetime
import hashlib
import inspect
import mock
import six
from testtools import matchers
from nova.conductor import rpcapi as conductor_rpcapi
from nova import context
from nova import exception
from nova import objects
from nova.objects import base
from nova.objects import fields
from nova.openstack.common import jsonutils
from nova.openstack.common import timeutils
from nova import rpc
from nova import test
from nova.tests import fake_notifier
class MyObj(base.NovaPersistentObject, base.NovaObject):
VERSION = '1.5'
fields = {'foo': fields.Field(fields.Integer()),
'bar': fields.Field(fields.String()),
'missing': fields.Field(fields.String()),
}
@staticmethod
def _from_db_object(context, obj, db_obj):
self = MyObj()
self.foo = db_obj['foo']
self.bar = db_obj['bar']
self.missing = db_obj['missing']
return self
def obj_load_attr(self, attrname):
setattr(self, attrname, 'loaded!')
@base.remotable_classmethod
def query(cls, context):
obj = cls(foo=1, bar='bar')
obj.obj_reset_changes()
return obj
@base.remotable
def marco(self, context):
return 'polo'
@base.remotable
def _update_test(self, context):
if context.project_id == 'alternate':
self.bar = 'alternate-context'
else:
self.bar = 'updated'
@base.remotable
def save(self, context):
self.obj_reset_changes()
@base.remotable
def refresh(self, context):
self.foo = 321
self.bar = 'refreshed'
self.obj_reset_changes()
@base.remotable
def modify_save_modify(self, context):
self.bar = 'meow'
self.save()
self.foo = 42
def obj_make_compatible(self, primitive, target_version):
# NOTE(danms): Simulate an older version that had a different
# format for the 'bar' attribute
if target_version == '1.1':
primitive['bar'] = 'old%s' % primitive['bar']
class MyObjDiffVers(MyObj):
VERSION = '1.4'
@classmethod
def obj_name(cls):
return 'MyObj'
class MyObj2(object):
@classmethod
def obj_name(cls):
return 'MyObj'
@base.remotable_classmethod
def query(cls, *args, **kwargs):
pass
class RandomMixInWithNoFields(object):
"""Used to test object inheritance using a mixin that has no fields."""
pass
class TestSubclassedObject(RandomMixInWithNoFields, MyObj):
fields = {'new_field': fields.Field(fields.String())}
class TestMetaclass(test.TestCase):
def test_obj_tracking(self):
@six.add_metaclass(base.NovaObjectMetaclass)
class NewBaseClass(object):
VERSION = '1.0'
fields = {}
@classmethod
def obj_name(cls):
return cls.__name__
class Fake1TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake1'
class Fake1TestObj2(Fake1TestObj1):
pass
class Fake1TestObj3(Fake1TestObj1):
VERSION = '1.1'
class Fake2TestObj1(NewBaseClass):
@classmethod
def obj_name(cls):
return 'fake2'
class Fake1TestObj4(Fake1TestObj3):
VERSION = '1.2'
class Fake2TestObj2(Fake2TestObj1):
VERSION = '1.1'
class Fake1TestObj5(Fake1TestObj1):
VERSION = '1.1'
# Newest versions first in the list. Duplicate versions take the
# newest object.
expected = {'fake1': [Fake1TestObj4, Fake1TestObj5, Fake1TestObj2],
'fake2': [Fake2TestObj2, Fake2TestObj1]}
self.assertEqual(expected, NewBaseClass._obj_classes)
# The following should work, also.
self.assertEqual(expected, Fake1TestObj1._obj_classes)
self.assertEqual(expected, Fake1TestObj2._obj_classes)
self.assertEqual(expected, Fake1TestObj3._obj_classes)
self.assertEqual(expected, Fake1TestObj4._obj_classes)
self.assertEqual(expected, Fake1TestObj5._obj_classes)
self.assertEqual(expected, Fake2TestObj1._obj_classes)
self.assertEqual(expected, Fake2TestObj2._obj_classes)
def test_field_checking(self):
def create_class(field):
class TestField(base.NovaObject):
VERSION = '1.5'
fields = {'foo': field()}
return TestField
create_class(fields.IPV4AndV6AddressField)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, fields.IPV4AndV6Address)
self.assertRaises(exception.ObjectFieldInvalid,
create_class, int)
class TestObjToPrimitive(test.TestCase):
def test_obj_to_primitive_list(self):
class MyObjElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyObjElement, self).__init__()
self.foo = foo
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObjElement')}
mylist = MyList()
mylist.objects = [MyObjElement(1), MyObjElement(2), MyObjElement(3)]
self.assertEqual([1, 2, 3],
[x['foo'] for x in base.obj_to_primitive(mylist)])
def test_obj_to_primitive_dict(self):
myobj = MyObj(foo=1, bar='foo')
self.assertEqual({'foo': 1, 'bar': 'foo'},
base.obj_to_primitive(myobj))
def test_obj_to_primitive_recursive(self):
class MyList(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyObj')}
mylist = MyList(objects=[MyObj(), MyObj()])
for i, value in enumerate(mylist):
value.foo = i
self.assertEqual([{'foo': 0}, {'foo': 1}],
base.obj_to_primitive(mylist))
def test_obj_to_primitive_with_ip_addr(self):
class TestObject(base.NovaObject):
fields = {'addr': fields.IPAddressField(),
'cidr': fields.IPNetworkField()}
obj = TestObject(addr='1.2.3.4', cidr='1.1.1.1/16')
self.assertEqual({'addr': '1.2.3.4', 'cidr': '1.1.1.1/16'},
base.obj_to_primitive(obj))
class TestObjMakeList(test.TestCase):
def test_obj_make_list(self):
class MyList(base.ObjectListBase, base.NovaObject):
pass
db_objs = [{'foo': 1, 'bar': 'baz', 'missing': 'banana'},
{'foo': 2, 'bar': 'bat', 'missing': 'apple'},
]
mylist = base.obj_make_list('ctxt', MyList(), MyObj, db_objs)
self.assertEqual(2, len(mylist))
self.assertEqual('ctxt', mylist._context)
for index, item in enumerate(mylist):
self.assertEqual(db_objs[index]['foo'], item.foo)
self.assertEqual(db_objs[index]['bar'], item.bar)
self.assertEqual(db_objs[index]['missing'], item.missing)
def compare_obj(test, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
"""Compare a NovaObject and a dict-like database object.
This automatically converts TZ-aware datetimes and iterates over
the fields of the object.
:param:test: The TestCase doing the comparison
:param:obj: The NovaObject to examine
:param:db_obj: The dict-like database object to use as reference
:param:subs: A dict of objkey=dbkey field substitutions
:param:allow_missing: A list of fields that may not be in db_obj
:param:comparators: Map of comparator functions to use for certain fields
"""
if subs is None:
subs = {}
if allow_missing is None:
allow_missing = []
if comparators is None:
comparators = {}
for key in obj.fields:
if key in allow_missing and not obj.obj_attr_is_set(key):
continue
obj_val = obj[key]
db_key = subs.get(key, key)
db_val = db_obj[db_key]
if isinstance(obj_val, datetime.datetime):
obj_val = obj_val.replace(tzinfo=None)
if key in comparators:
comparator = comparators[key]
comparator(db_val, obj_val)
else:
test.assertEqual(db_val, obj_val)
class _BaseTestCase(test.TestCase):
def setUp(self):
super(_BaseTestCase, self).setUp()
self.remote_object_calls = list()
self.context = context.RequestContext('fake-user', 'fake-project')
fake_notifier.stub_notifier(self.stubs)
self.addCleanup(fake_notifier.reset)
def compare_obj(self, obj, db_obj, subs=None, allow_missing=None,
comparators=None):
compare_obj(self, obj, db_obj, subs=subs, allow_missing=allow_missing,
comparators=comparators)
def json_comparator(self, expected, obj_val):
# json-ify an object field for comparison with its db str
#equivalent
self.assertEqual(expected, jsonutils.dumps(obj_val))
def assertNotIsInstance(self, obj, cls, msg=None):
"""Python < v2.7 compatibility. Assert 'not isinstance(obj, cls)."""
try:
f = super(_BaseTestCase, self).assertNotIsInstance
except AttributeError:
self.assertThat(obj,
matchers.Not(matchers.IsInstance(cls)),
message=msg or '')
else:
f(obj, cls, msg=msg)
class _LocalTest(_BaseTestCase):
def setUp(self):
super(_LocalTest, self).setUp()
# Just in case
base.NovaObject.indirection_api = None
def assertRemotes(self):
self.assertEqual(self.remote_object_calls, [])
@contextlib.contextmanager
def things_temporarily_local():
# Temporarily go non-remote so the conductor handles
# this request directly
_api = base.NovaObject.indirection_api
base.NovaObject.indirection_api = None
yield
base.NovaObject.indirection_api = _api
class _RemoteTest(_BaseTestCase):
def _testable_conductor(self):
self.conductor_service = self.start_service(
'conductor', manager='nova.conductor.manager.ConductorManager')
self.remote_object_calls = list()
orig_object_class_action = \
self.conductor_service.manager.object_class_action
orig_object_action = \
self.conductor_service.manager.object_action
def fake_object_class_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objname'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_class_action(*args, **kwargs)
return (base.NovaObject.obj_from_primitive(result, context=args[0])
if isinstance(result, base.NovaObject) else result)
self.stubs.Set(self.conductor_service.manager, 'object_class_action',
fake_object_class_action)
def fake_object_action(*args, **kwargs):
self.remote_object_calls.append((kwargs.get('objinst'),
kwargs.get('objmethod')))
with things_temporarily_local():
result = orig_object_action(*args, **kwargs)
return result
self.stubs.Set(self.conductor_service.manager, 'object_action',
fake_object_action)
# Things are remoted by default in this session
base.NovaObject.indirection_api = conductor_rpcapi.ConductorAPI()
# To make sure local and remote contexts match
self.stubs.Set(rpc.RequestContextSerializer,
'serialize_context',
lambda s, c: c)
self.stubs.Set(rpc.RequestContextSerializer,
'deserialize_context',
lambda s, c: c)
def setUp(self):
super(_RemoteTest, self).setUp()
self._testable_conductor()
def assertRemotes(self):
self.assertNotEqual(self.remote_object_calls, [])
class _TestObject(object):
def test_object_attrs_in_init(self):
# Spot check a few
objects.Instance
objects.InstanceInfoCache
objects.SecurityGroup
# Now check the test one in this file. Should be newest version
self.assertEqual('1.5', objects.MyObj.VERSION)
def test_hydration_type_error(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 'a'}}
self.assertRaises(ValueError, MyObj.obj_from_primitive, primitive)
def test_hydration(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
real_method = MyObj._obj_from_primitive
def _obj_from_primitive(*args):
return real_method(*args)
with mock.patch.object(MyObj, '_obj_from_primitive') as ofp:
ofp.side_effect = _obj_from_primitive
obj = MyObj.obj_from_primitive(primitive)
ofp.assert_called_once_with(None, '1.5', primitive)
self.assertEqual(obj.foo, 1)
def test_hydration_version_different(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.2',
'nova_object.data': {'foo': 1}}
obj = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj.foo, 1)
self.assertEqual('1.2', obj.VERSION)
def test_hydration_bad_ns(self):
primitive = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'foo',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
self.assertRaises(exception.UnsupportedObjectError,
MyObj.obj_from_primitive, primitive)
def test_dehydration(self):
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.data': {'foo': 1}}
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.obj_to_primitive(), expected)
def test_object_property(self):
obj = MyObj(foo=1)
self.assertEqual(obj.foo, 1)
def test_object_property_type_error(self):
obj = MyObj()
def fail():
obj.foo = 'a'
self.assertRaises(ValueError, fail)
def test_object_dict_syntax(self):
obj = MyObj(foo=123, bar='bar')
self.assertEqual(obj['foo'], 123)
self.assertEqual(sorted(obj.items(), key=lambda x: x[0]),
[('bar', 'bar'), ('foo', 123)])
self.assertEqual(sorted(list(obj.iteritems()), key=lambda x: x[0]),
[('bar', 'bar'), ('foo', 123)])
def test_load(self):
obj = MyObj()
self.assertEqual(obj.bar, 'loaded!')
def test_load_in_base(self):
class Foo(base.NovaObject):
fields = {'foobar': fields.Field(fields.Integer())}
obj = Foo()
# NOTE(danms): Can't use assertRaisesRegexp() because of py26
raised = False
try:
obj.foobar
except NotImplementedError as ex:
raised = True
self.assertTrue(raised)
self.assertIn('foobar', str(ex))
def test_loaded_in_primitive(self):
obj = MyObj(foo=1)
obj.obj_reset_changes()
self.assertEqual(obj.bar, 'loaded!')
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.changes': ['bar'],
'nova_object.data': {'foo': 1,
'bar': 'loaded!'}}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_changes_in_primitive(self):
obj = MyObj(foo=123)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
primitive = obj.obj_to_primitive()
self.assertIn('nova_object.changes', primitive)
obj2 = MyObj.obj_from_primitive(primitive)
self.assertEqual(obj2.obj_what_changed(), set(['foo']))
obj2.obj_reset_changes()
self.assertEqual(obj2.obj_what_changed(), set())
def test_obj_class_from_name(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.4')
self.assertEqual('1.4', obj.VERSION)
def test_obj_class_from_name_latest_compatible(self):
obj = base.NovaObject.obj_class_from_name('MyObj', '1.1')
self.assertEqual('1.5', obj.VERSION)
def test_unknown_objtype(self):
self.assertRaises(exception.UnsupportedObjectError,
base.NovaObject.obj_class_from_name, 'foo', '1.0')
def test_obj_class_from_name_supported_version(self):
error = None
try:
base.NovaObject.obj_class_from_name('MyObj', '1.25')
except exception.IncompatibleObjectVersion as error:
pass
self.assertIsNotNone(error)
self.assertEqual('1.5', error.kwargs['supported'])
def test_with_alternate_context(self):
ctxt1 = context.RequestContext('foo', 'foo')
ctxt2 = context.RequestContext('bar', 'alternate')
obj = MyObj.query(ctxt1)
obj._update_test(ctxt2)
self.assertEqual(obj.bar, 'alternate-context')
self.assertRemotes()
def test_orphaned_object(self):
obj = MyObj.query(self.context)
obj._context = None
self.assertRaises(exception.OrphanedObjectError,
obj._update_test)
self.assertRemotes()
def test_changed_1(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj._update_test(self.context)
self.assertEqual(obj.obj_what_changed(), set(['foo', 'bar']))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_2(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.save(self.context)
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 123)
self.assertRemotes()
def test_changed_3(self):
obj = MyObj.query(self.context)
obj.foo = 123
self.assertEqual(obj.obj_what_changed(), set(['foo']))
obj.refresh(self.context)
self.assertEqual(obj.obj_what_changed(), set([]))
self.assertEqual(obj.foo, 321)
self.assertEqual(obj.bar, 'refreshed')
self.assertRemotes()
def test_changed_4(self):
obj = MyObj.query(self.context)
obj.bar = 'something'
self.assertEqual(obj.obj_what_changed(), set(['bar']))
obj.modify_save_modify(self.context)
self.assertEqual(obj.obj_what_changed(), set(['foo']))
self.assertEqual(obj.foo, 42)
self.assertEqual(obj.bar, 'meow')
self.assertRemotes()
def test_changed_with_sub_object(self):
class ParentObject(base.NovaObject):
fields = {'foo': fields.IntegerField(),
'bar': fields.ObjectField('MyObj'),
}
obj = ParentObject()
self.assertEqual(set(), obj.obj_what_changed())
obj.foo = 1
self.assertEqual(set(['foo']), obj.obj_what_changed())
bar = MyObj()
obj.bar = bar
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
obj.obj_reset_changes()
self.assertEqual(set(), obj.obj_what_changed())
bar.foo = 1
self.assertEqual(set(['bar']), obj.obj_what_changed())
def test_static_result(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.bar, 'bar')
result = obj.marco()
self.assertEqual(result, 'polo')
self.assertRemotes()
def test_updates(self):
obj = MyObj.query(self.context)
self.assertEqual(obj.foo, 1)
obj._update_test()
self.assertEqual(obj.bar, 'updated')
self.assertRemotes()
def test_base_attributes(self):
dt = datetime.datetime(1955, 11, 5)
obj = MyObj(created_at=dt, updated_at=dt, deleted_at=None,
deleted=False)
expected = {'nova_object.name': 'MyObj',
'nova_object.namespace': 'nova',
'nova_object.version': '1.5',
'nova_object.changes':
['deleted', 'created_at', 'deleted_at', 'updated_at'],
'nova_object.data':
{'created_at': timeutils.isotime(dt),
'updated_at': timeutils.isotime(dt),
'deleted_at': None,
'deleted': False,
}
}
self.assertEqual(obj.obj_to_primitive(), expected)
def test_contains(self):
obj = MyObj()
self.assertNotIn('foo', obj)
obj.foo = 1
self.assertIn('foo', obj)
self.assertNotIn('does_not_exist', obj)
def test_obj_attr_is_set(self):
obj = MyObj(foo=1)
self.assertTrue(obj.obj_attr_is_set('foo'))
self.assertFalse(obj.obj_attr_is_set('bar'))
self.assertRaises(AttributeError, obj.obj_attr_is_set, 'bang')
def test_get(self):
obj = MyObj(foo=1)
# Foo has value, should not get the default
self.assertEqual(obj.get('foo', 2), 1)
# Foo has value, should return the value without error
self.assertEqual(obj.get('foo'), 1)
# Bar is not loaded, so we should get the default
self.assertEqual(obj.get('bar', 'not-loaded'), 'not-loaded')
# Bar without a default should lazy-load
self.assertEqual(obj.get('bar'), 'loaded!')
# Bar now has a default, but loaded value should be returned
self.assertEqual(obj.get('bar', 'not-loaded'), 'loaded!')
# Invalid attribute should raise AttributeError
self.assertRaises(AttributeError, obj.get, 'nothing')
# ...even with a default
self.assertRaises(AttributeError, obj.get, 'nothing', 3)
def test_object_inheritance(self):
base_fields = base.NovaPersistentObject.fields.keys()
myobj_fields = ['foo', 'bar', 'missing'] + base_fields
myobj3_fields = ['new_field']
self.assertTrue(issubclass(TestSubclassedObject, MyObj))
self.assertEqual(len(myobj_fields), len(MyObj.fields))
self.assertEqual(set(myobj_fields), set(MyObj.fields.keys()))
self.assertEqual(len(myobj_fields) + len(myobj3_fields),
len(TestSubclassedObject.fields))
self.assertEqual(set(myobj_fields) | set(myobj3_fields),
set(TestSubclassedObject.fields.keys()))
def test_get_changes(self):
obj = MyObj()
self.assertEqual({}, obj.obj_get_changes())
obj.foo = 123
self.assertEqual({'foo': 123}, obj.obj_get_changes())
obj.bar = 'test'
self.assertEqual({'foo': 123, 'bar': 'test'}, obj.obj_get_changes())
obj.obj_reset_changes()
self.assertEqual({}, obj.obj_get_changes())
def test_obj_fields(self):
class TestObj(base.NovaObject):
fields = {'foo': fields.Field(fields.Integer())}
obj_extra_fields = ['bar']
@property
def bar(self):
return 'this is bar'
obj = TestObj()
self.assertEqual(['foo', 'bar'], obj.obj_fields)
def test_obj_constructor(self):
obj = MyObj(context=self.context, foo=123, bar='abc')
self.assertEqual(123, obj.foo)
self.assertEqual('abc', obj.bar)
self.assertEqual(set(['foo', 'bar']), obj.obj_what_changed())
class TestObject(_LocalTest, _TestObject):
pass
class TestRemoteObject(_RemoteTest, _TestObject):
def test_major_version_mismatch(self):
MyObj2.VERSION = '2.0'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_greater(self):
MyObj2.VERSION = '1.6'
self.assertRaises(exception.IncompatibleObjectVersion,
MyObj2.query, self.context)
def test_minor_version_less(self):
MyObj2.VERSION = '1.2'
obj = MyObj2.query(self.context)
self.assertEqual(obj.bar, 'bar')
self.assertRemotes()
def test_compat(self):
MyObj2.VERSION = '1.1'
obj = MyObj2.query(self.context)
self.assertEqual('oldbar', obj.bar)
class TestObjectListBase(test.TestCase):
def test_list_like_operations(self):
class MyElement(base.NovaObject):
fields = {'foo': fields.IntegerField()}
def __init__(self, foo):
super(MyElement, self).__init__()
self.foo = foo
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('MyElement')}
objlist = Foo(context='foo',
objects=[MyElement(1), MyElement(2), MyElement(3)])
self.assertEqual(list(objlist), objlist.objects)
self.assertEqual(len(objlist), 3)
self.assertIn(objlist.objects[0], objlist)
self.assertEqual(list(objlist[:1]), [objlist.objects[0]])
self.assertEqual(objlist[:1]._context, 'foo')
self.assertEqual(objlist[2], objlist.objects[2])
self.assertEqual(objlist.count(objlist.objects[0]), 1)
self.assertEqual(objlist.index(objlist.objects[1]), 1)
objlist.sort(key=lambda x: x.foo, reverse=True)
self.assertEqual([3, 2, 1],
[x.foo for x in objlist])
def test_serialization(self):
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.NovaObject):
fields = {'foo': fields.Field(fields.String())}
obj = Foo(objects=[])
for i in 'abc':
bar = Bar(foo=i)
obj.objects.append(bar)
obj2 = base.NovaObject.obj_from_primitive(obj.obj_to_primitive())
self.assertFalse(obj is obj2)
self.assertEqual([x.foo for x in obj],
[y.foo for y in obj2])
def _test_object_list_version_mappings(self, list_obj_class):
# Figure out what sort of object this list is for
list_field = list_obj_class.fields['objects']
item_obj_field = list_field._type._element_type
item_obj_name = item_obj_field._type._obj_name
# Look through all object classes of this type and make sure that
# the versions we find are covered by the parent list class
for item_class in base.NovaObject._obj_classes[item_obj_name]:
self.assertIn(
item_class.VERSION,
list_obj_class.child_versions.values())
def test_object_version_mappings(self):
# Find all object list classes and make sure that they at least handle
# all the current object versions
for obj_classes in base.NovaObject._obj_classes.values():
for obj_class in obj_classes:
if issubclass(obj_class, base.ObjectListBase):
self._test_object_list_version_mappings(obj_class)
def test_list_changes(self):
class Foo(base.ObjectListBase, base.NovaObject):
fields = {'objects': fields.ListOfObjectsField('Bar')}
class Bar(base.NovaObject):
fields = {'foo': fields.StringField()}
obj = Foo(objects=[])
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.objects.append(Bar(foo='test'))
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.obj_reset_changes()
# This should still look dirty because the child is dirty
self.assertEqual(set(['objects']), obj.obj_what_changed())
obj.objects[0].obj_reset_changes()
# This should now look clean because the child is clean
self.assertEqual(set(), obj.obj_what_changed())
class TestObjectSerializer(_BaseTestCase):
def test_serialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.serialize_entity(None, thing))
def test_deserialize_entity_primitive(self):
ser = base.NovaObjectSerializer()
for thing in (1, 'foo', [1, 2], {'foo': 'bar'}):
self.assertEqual(thing, ser.deserialize_entity(None, thing))
def test_deserialize_entity_newer_version(self):
ser = base.NovaObjectSerializer()
ser._conductor = mock.Mock()
ser._conductor.object_backport.return_value = 'backported'
obj = MyObj()
obj.VERSION = '1.25'
primitive = obj.obj_to_primitive()
result = ser.deserialize_entity(self.context, primitive)
self.assertEqual('backported', result)
ser._conductor.object_backport.assert_called_with(self.context,
primitive,
'1.5')
def test_object_serialization(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
primitive = ser.serialize_entity(self.context, obj)
self.assertIn('nova_object.name', primitive)
obj2 = ser.deserialize_entity(self.context, primitive)
self.assertIsInstance(obj2, MyObj)
self.assertEqual(self.context, obj2._context)
def test_object_serialization_iterables(self):
ser = base.NovaObjectSerializer()
obj = MyObj()
for iterable in (list, tuple, set):
thing = iterable([obj])
primitive = ser.serialize_entity(self.context, thing)
self.assertEqual(1, len(primitive))
for item in primitive:
self.assertNotIsInstance(item, base.NovaObject)
thing2 = ser.deserialize_entity(self.context, primitive)
self.assertEqual(1, len(thing2))
for item in thing2:
self.assertIsInstance(item, MyObj)
# NOTE(danms): The hashes in this list should only be changed if
# they come with a corresponding version bump in the affected
# objects
object_data = {
'Aggregate': '1.1-1d96b82d2f0ad66ad1d49313f08eca71',
'AggregateList': '1.1-dbb5bafde58c263c1fd132c33d68ba77',
'BlockDeviceMapping': '1.1-d44030deca25ebf8efcb4f3d12429677',
'BlockDeviceMappingList': '1.2-d0f559a2510ea2beab5478e5118a69f9',
'ComputeNode': '1.3-da09be1ff8b43f9889f2bb4e43b5686e',
'ComputeNodeList': '1.2-be44294fa7d0deef6146863836adb1e5',
'DNSDomain': '1.0-f0467d23e2c8b567469cdcd6a9708615',
'DNSDomainList': '1.0-47ffa72c29119d19fc8d3854ae49f094',
'FixedIP': '1.1-e3ceee4d62b52320707db6f730338531',
'FixedIPList': '1.1-c944566e2e21af32432d7b7c35018831',
'Flavor': '1.0-4f0c857e5bf5627a40d04ba249f9e31b',
'FlavorList': '1.0-47ffa72c29119d19fc8d3854ae49f094',
'FloatingIP': '1.1-ee1245f7df59fcd081e3bffe3411e822',
'FloatingIPList': '1.1-a5c220af1c55f2aa3d2d14771bbca668',
'Instance': '1.13-552999d3072d5aa7b31493d3c2ee551e',
'InstanceAction': '1.1-abef7ec3247d587bdef78bf47744c6ee',
'InstanceActionEvent': '1.0-3b23bda6f8431fd2ab27489275a150ab',
'InstanceActionEventList': '1.0-6f8bfe29181b175400069c8a47f6e618',
'InstanceActionList': '1.0-d0f559a2510ea2beab5478e5118a69f9',
'InstanceExternalEvent': '1.0-c1b2be346d0ee670ebc0146c65859b1e',
'InstanceFault': '1.2-c85a5ecc4f4a82a26c9da95d947a719d',
'InstanceFaultList': '1.1-6e250b18ac45ea63a3478a4b365b009f',
'InstanceGroup': '1.6-c17ebff3c3453108370362a8f22b8d48',
'InstanceGroupList': '1.2-176452f4f090408eb1b9d631957f996b',
'InstanceInfoCache': '1.5-04937dde0e8409eb87bc04f3514736ba',
'InstanceList': '1.6-086b5de1c23af9e023fa10dd2e8c6a69',
'KeyPair': '1.1-30e67207cd4d0a3a044b5805f252a60c',
'KeyPairList': '1.0-ab564b050224c1945febb24ce84c9524',
'Migration': '1.1-c90e531ec87739decb31026c05100964',
'MigrationList': '1.1-add1d472f38ee759f9d717b9824e07a4',
'MyObj': '1.5-2cb1447b872ebaf439eff9c678af8bf4',
'Network': '1.1-faba26d0290395456f9a040584c4364b',
'NetworkList': '1.1-eaafb55cf6b571581df685127cd687c1',
'OtherTestableObject': '1.0-b43ae164bcf53764db6a54270af71b86',
'PciDevice': '1.1-637f3dddb48197d2a69e41bd1144a3c5',
'PciDeviceList': '1.0-80491949ec8ac90cbbd1ea153adcb4ef',
'Quotas': '1.0-759987de0abbb6e4428bba7c6bdf8e9e',
'QuotasNoOp': '1.0-c25493f36b5df1d1f0a1077a610495cd',
'SecurityGroup': '1.1-0a71e19e0b5bd790e6bf882afcb71d4c',
'SecurityGroupList': '1.0-ae82c19e66b17d506e25f8d49576db1f',
'SecurityGroupRule': '1.0-96cdebd0294fd834e3e4249238c76eb9',
'SecurityGroupRuleList': '1.0-790df2265ff6d41794f60decdf9dd080',
'Service': '1.2-16a7d0f0d41e423deefb804ca2aeb51d',
'ServiceList': '1.0-35c5e3a116de08c1655d5fc3ecbe6549',
'TestableObject': '1.0-b43ae164bcf53764db6a54270af71b86',
'TestSubclassedObject': '1.5-f5f524f005954e2e351f20bd58cb74fb',
'VirtualInterface': '1.0-022c3e84a172f8302a0f8c4407bc92a2',
'VirtualInterfaceList': '1.0-59568968ee1ac0e796c7ebbf8354d65d',
'VolumeMapping': '1.0-b97464d4e338688d04a46d5c1740423d',
}
class TestObjectVersions(test.TestCase):
def _get_fingerprint(self, obj_class):
fields = obj_class.fields.items()
methods = {}
for name in dir(obj_class):
thing = getattr(obj_class, name)
if inspect.ismethod(thing) and hasattr(thing, 'remotable'):
methods[name] = inspect.getargspec(thing)
# NOTE(danms): Things that need a version bump are any fields
# and their types, or the signatures of any remotable methods.
# Of course, these are just the mechanical changes we can detect,
# but many other things may require a version bump (method behavior
# and return value changes, for example).
relevant_data = {'fields': fields,
'methods': methods,
}
return '%s-%s' % (obj_class.VERSION,
hashlib.md5(str(relevant_data)).hexdigest())
def _test_versions_cls(self, obj_name):
obj_class = base.NovaObject._obj_classes[obj_name][0]
expected_fingerprint = object_data.get(obj_name, 'unknown')
actual_fingerprint = self._get_fingerprint(obj_class)
self.assertEqual(
expected_fingerprint, actual_fingerprint,
('%s object has changed; please make sure the version '
'has been bumped, and then update this hash') % obj_name)
def test_versions(self):
for obj_name in base.NovaObject._obj_classes:
self._test_versions_cls(obj_name)
| afrolov1/nova | nova/tests/objects/test_objects.py | Python | apache-2.0 | 36,028 |
import abc
import pprint
import six
def _decode_plain_type(value_type, buf):
if value_type == 'int8':
return buf.getInt8()
elif value_type == 'int16':
return buf.getInt16()
elif value_type == 'int32':
return buf.getInt32()
elif value_type == 'int64':
return buf.getInt64()
elif value_type == 'string':
val_len = buf.getInt16()
return None if val_len == -1 else buf.get(val_len).decode("utf-8")
elif value_type == 'bytes':
val_len = buf.getInt32()
return None if val_len == -1 else buf.get(val_len)
elif value_type == 'boolean':
return buf.getInt8() == 1
else:
raise NotImplementedError("Reference to non-implemented type in schema: {0}".format(value_type))
def _decode_array(array_schema, buf):
array_len = buf.getInt32()
if array_len == -1:
return None
if isinstance(array_schema, six.string_types):
return [_decode_plain_type(array_schema, buf) for i in range(array_len)]
else:
return [_decode_sequence(array_schema, buf) for i in range(array_len)]
def _decode_sequence(sequence_schema, buf):
val = {}
for entry in sequence_schema:
if entry['type'].lower() == 'array':
val[entry['name']] = _decode_array(entry['item_type'], buf)
else:
val[entry['name']] = _decode_plain_type(entry['type'].lower(), buf)
return val
@six.add_metaclass(abc.ABCMeta)
class BaseResponse(): # pragma: no cover
@abc.abstractproperty
def schema(self):
pass
@classmethod
def from_bytebuffer(cls, correlation_id, buf):
seq_obj = _decode_sequence(cls.schema, buf)
rv = cls(seq_obj)
rv.correlation_id = correlation_id
return rv
def __init__(self, sequence_obj):
self._response = sequence_obj
def __hash__(self):
return id(self)
def __str__(self):
pp = pprint.PrettyPrinter(indent=4)
return pp.pformat(self._response)
def __len__(self):
return len(self._response)
def __contains__(self, k):
return k in self._response
def __getitem__(self, k):
return self._response[k]
def __setitem__(self, k):
raise NotImplementedError
def __delitem__(self, k):
raise NotImplementedError
| toddpalino/kafka-tools | kafka/tools/protocol/responses/__init__.py | Python | apache-2.0 | 2,328 |
"""Purge old data helper."""
from datetime import timedelta
import logging
import homeassistant.util.dt as dt_util
from .util import session_scope
_LOGGER = logging.getLogger(__name__)
def purge_old_data(instance, purge_days, repack):
"""Purge events and states older than purge_days ago."""
from .models import States, Events
from sqlalchemy.exc import SQLAlchemyError
purge_before = dt_util.utcnow() - timedelta(days=purge_days)
_LOGGER.debug("Purging events before %s", purge_before)
try:
with session_scope(session=instance.get_session()) as session:
deleted_rows = (
session.query(States)
.filter((States.last_updated < purge_before))
.delete(synchronize_session=False)
)
_LOGGER.debug("Deleted %s states", deleted_rows)
deleted_rows = (
session.query(Events)
.filter((Events.time_fired < purge_before))
.delete(synchronize_session=False)
)
_LOGGER.debug("Deleted %s events", deleted_rows)
# Execute sqlite vacuum command to free up space on disk
if repack and instance.engine.driver == "pysqlite":
_LOGGER.debug("Vacuuming SQLite to free space")
instance.engine.execute("VACUUM")
except SQLAlchemyError as err:
_LOGGER.warning("Error purging history: %s.", err)
| fbradyirl/home-assistant | homeassistant/components/recorder/purge.py | Python | apache-2.0 | 1,430 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from .envconf import *
from .path import *
__version__ = '0.3.5'
| achedeuzot/django-envconf | envconf/__init__.py | Python | mit | 131 |
#!/usr/bin/env python
#
# Copyright (C) 2017 - Massachusetts Institute of Technology (MIT)
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
This Code tests module import from SEAS
"""
import os
import sys
DIR = os.path.abspath(os.path.dirname(__file__))
sys.path.insert(0, os.path.join(DIR, '../..'))
#plotting
import SEAS_Utils.common_utils.data_plotter as plt
#timer
from SEAS_Utils.common_utils.timer import simple_timer
#dbm
import SEAS_Utils.common_utils.db_management2 as dbm
#config
import SEAS_Utils.common_utils.configurable as config
#DIR
from SEAS_Utils.common_utils.DIRs import Simulation_DB
#constants
from SEAS_Utils.common_utils.constants import *
if __name__ == "__main__":
pass
| azariven/BioSig_SEAS | bin/test/test_SEAS_import.py | Python | gpl-3.0 | 1,357 |
from server import app
app.run()
| billyoverton/demerit-manager | run_server.py | Python | gpl-2.0 | 33 |
# Copyright 2012-2017, Damian Johnson and The Tor Project
# See LICENSE for licensing information
import io
import re
import time
import stem
import stem.control
import stem.descriptor.router_status_entry
import stem.response
import stem.version
from stem.util import str_type, int_type, connection, log, str_tools, tor_tools
# Matches keyword=value arguments. This can't be a simple "(.*)=(.*)" pattern
# because some positional arguments, like circuit paths, can have an equal
# sign.
KW_ARG = re.compile('^(.*) ([A-Za-z0-9_]+)=(\S*)$')
QUOTED_KW_ARG = re.compile('^(.*) ([A-Za-z0-9_]+)="(.*)"$')
CELL_TYPE = re.compile('^[a-z0-9_]+$')
class Event(stem.response.ControlMessage):
"""
Base for events we receive asynchronously, as described in section 4.1 of the
`control-spec
<https://gitweb.torproject.org/torspec.git/tree/control-spec.txt>`_.
:var str type: event type
:var int arrived_at: unix timestamp for when the message arrived
:var list positional_args: positional arguments of the event
:var dict keyword_args: key/value arguments of the event
"""
_POSITIONAL_ARGS = () # attribute names for recognized positional arguments
_KEYWORD_ARGS = {} # map of 'keyword => attribute' for recognized attributes
_QUOTED = () # positional arguments that are quoted
_OPTIONALLY_QUOTED = () # positional arguments that may or may not be quoted
_SKIP_PARSING = False # skip parsing contents into our positional_args and keyword_args
_VERSION_ADDED = stem.version.Version('0.1.1.1-alpha') # minimum version with control-spec V1 event support
def _parse_message(self, arrived_at = None):
if arrived_at is None:
arrived_at = int(time.time())
if not str(self).strip():
raise stem.ProtocolError('Received a blank tor event. Events must at the very least have a type.')
self.type = str(self).split()[0]
self.arrived_at = arrived_at
# if we're a recognized event type then translate ourselves into that subclass
if self.type in EVENT_TYPE_TO_CLASS:
self.__class__ = EVENT_TYPE_TO_CLASS[self.type]
self.positional_args = []
self.keyword_args = {}
if not self._SKIP_PARSING:
self._parse_standard_attr()
self._parse()
def _parse_standard_attr(self):
"""
Most events are of the form...
650 *( positional_args ) *( key "=" value )
This parses this standard format, populating our **positional_args** and
**keyword_args** attributes and creating attributes if it's in our event's
**_POSITIONAL_ARGS** and **_KEYWORD_ARGS**.
"""
# Tor events contain some number of positional arguments followed by
# key/value mappings. Parsing keyword arguments from the end until we hit
# something that isn't a key/value mapping. The rest are positional.
content = str(self)
while True:
match = QUOTED_KW_ARG.match(content)
if not match:
match = KW_ARG.match(content)
if match:
content, keyword, value = match.groups()
self.keyword_args[keyword] = value
else:
break
# Setting attributes for the fields that we recognize.
self.positional_args = content.split()[1:]
positional = list(self.positional_args)
for attr_name in self._POSITIONAL_ARGS:
attr_value = None
if positional:
if attr_name in self._QUOTED or (attr_name in self._OPTIONALLY_QUOTED and positional[0].startswith('"')):
attr_values = [positional.pop(0)]
if not attr_values[0].startswith('"'):
raise stem.ProtocolError("The %s value should be quoted, but didn't have a starting quote: %s" % (attr_name, self))
while True:
if not positional:
raise stem.ProtocolError("The %s value should be quoted, but didn't have an ending quote: %s" % (attr_name, self))
attr_values.append(positional.pop(0))
if attr_values[-1].endswith('"'):
break
attr_value = ' '.join(attr_values)[1:-1]
else:
attr_value = positional.pop(0)
setattr(self, attr_name, attr_value)
for controller_attr_name, attr_name in self._KEYWORD_ARGS.items():
setattr(self, attr_name, self.keyword_args.get(controller_attr_name))
def _iso_timestamp(self, timestamp):
"""
Parses an iso timestamp (ISOTime2Frac in the control-spec).
:param str timestamp: timestamp to parse
:returns: **datetime** with the parsed timestamp
:raises: :class:`stem.ProtocolError` if timestamp is malformed
"""
if timestamp is None:
return None
try:
return str_tools._parse_iso_timestamp(timestamp)
except ValueError as exc:
raise stem.ProtocolError('Unable to parse timestamp (%s): %s' % (exc, self))
# method overwritten by our subclasses for special handling that they do
def _parse(self):
pass
def _log_if_unrecognized(self, attr, attr_enum):
"""
Checks if an attribute exists in a given enumeration, logging a message if
it isn't. Attributes can either be for a string or collection of strings
:param str attr: name of the attribute to check
:param stem.util.enum.Enum enum: enumeration to check against
"""
attr_values = getattr(self, attr)
if attr_values:
if isinstance(attr_values, (bytes, str_type)):
attr_values = [attr_values]
for value in attr_values:
if value not in attr_enum:
log_id = 'event.%s.unknown_%s.%s' % (self.type.lower(), attr, value)
unrecognized_msg = "%s event had an unrecognized %s (%s). Maybe a new addition to the control protocol? Full Event: '%s'" % (self.type, attr, value, self)
log.log_once(log_id, log.INFO, unrecognized_msg)
class AddrMapEvent(Event):
"""
Event that indicates a new address mapping.
The ADDRMAP event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
.. versionchanged:: 1.1.0
Added the cached attribute.
:var str hostname: address being resolved
:var str destination: destination of the resolution, this is usually an ip,
but could be a hostname if TrackHostExits is enabled or **NONE** if the
resolution failed
:var datetime expiry: expiration time of the resolution in local time
:var str error: error code if the resolution failed
:var datetime utc_expiry: expiration time of the resolution in UTC
:var bool cached: **True** if the resolution will be kept until it expires,
**False** otherwise or **None** if undefined
"""
_POSITIONAL_ARGS = ('hostname', 'destination', 'expiry')
_KEYWORD_ARGS = {
'error': 'error',
'EXPIRES': 'utc_expiry',
'CACHED': 'cached',
}
_OPTIONALLY_QUOTED = ('expiry')
def _parse(self):
if self.destination == '<error>':
self.destination = None
if self.expiry is not None:
if self.expiry == 'NEVER':
self.expiry = None
else:
try:
self.expiry = stem.util.str_tools._parse_timestamp(self.expiry)
except ValueError:
raise stem.ProtocolError('Unable to parse date in ADDRMAP event: %s' % self)
if self.utc_expiry is not None:
self.utc_expiry = stem.util.str_tools._parse_timestamp(self.utc_expiry)
if self.cached is not None:
if self.cached == 'YES':
self.cached = True
elif self.cached == 'NO':
self.cached = False
else:
raise stem.ProtocolError("An ADDRMAP event's CACHED mapping can only be 'YES' or 'NO': %s" % self)
class AuthDirNewDescEvent(Event):
"""
Event specific to directory authorities, indicating that we just received new
descriptors. The descriptor type contained within this event is unspecified
so the descriptor contents are left unparsed.
The AUTHDIR_NEWDESCS event was introduced in tor version 0.1.1.10-alpha and
removed in 0.3.2.1-alpha. (:spec:`6e887ba`)
.. deprecated:: 1.6.0
Tor dropped this event as of version 0.3.2.1. (:spec:`6e887ba`)
:var stem.AuthDescriptorAction action: what is being done with the descriptor
:var str message: explanation of why we chose this action
:var str descriptor: content of the descriptor
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_AUTHDIR_NEWDESCS
def _parse(self):
lines = str(self).split('\n')
if len(lines) < 5:
raise stem.ProtocolError("AUTHDIR_NEWDESCS events must contain lines for at least the type, action, message, descriptor, and terminating 'OK'")
elif lines[-1] != 'OK':
raise stem.ProtocolError("AUTHDIR_NEWDESCS doesn't end with an 'OK'")
# TODO: For stem 2.0.0 we should consider changing 'descriptor' to a
# ServerDescriptor instance.
self.action = lines[1]
self.message = lines[2]
self.descriptor = '\n'.join(lines[3:-1])
class BandwidthEvent(Event):
"""
Event emitted every second with the bytes sent and received by tor.
The BW event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
"""
_POSITIONAL_ARGS = ('read', 'written')
def _parse(self):
if not self.read:
raise stem.ProtocolError('BW event is missing its read value')
elif not self.written:
raise stem.ProtocolError('BW event is missing its written value')
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = int_type(self.read)
self.written = int_type(self.written)
class BuildTimeoutSetEvent(Event):
"""
Event indicating that the timeout value for a circuit has changed. This was
first added in tor version 0.2.2.7.
The BUILDTIMEOUT_SET event was introduced in tor version 0.2.2.7-alpha.
:var stem.TimeoutSetType set_type: way in which the timeout is changing
:var int total_times: circuit build times tor used to determine the timeout
:var int timeout: circuit timeout value in milliseconds
:var int xm: Pareto parameter Xm in milliseconds
:var float alpha: Pareto parameter alpha
:var float quantile: CDF quantile cutoff point
:var float timeout_rate: ratio of circuits that have time out
:var int close_timeout: duration to keep measurement circuits in milliseconds
:var float close_rate: ratio of measurement circuits that are closed
"""
_POSITIONAL_ARGS = ('set_type',)
_KEYWORD_ARGS = {
'TOTAL_TIMES': 'total_times',
'TIMEOUT_MS': 'timeout',
'XM': 'xm',
'ALPHA': 'alpha',
'CUTOFF_QUANTILE': 'quantile',
'TIMEOUT_RATE': 'timeout_rate',
'CLOSE_MS': 'close_timeout',
'CLOSE_RATE': 'close_rate',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_BUILDTIMEOUT_SET
def _parse(self):
# convert our integer and float parameters
for param in ('total_times', 'timeout', 'xm', 'close_timeout'):
param_value = getattr(self, param)
if param_value is not None:
try:
setattr(self, param, int(param_value))
except ValueError:
raise stem.ProtocolError('The %s of a BUILDTIMEOUT_SET should be an integer: %s' % (param, self))
for param in ('alpha', 'quantile', 'timeout_rate', 'close_rate'):
param_value = getattr(self, param)
if param_value is not None:
try:
setattr(self, param, float(param_value))
except ValueError:
raise stem.ProtocolError('The %s of a BUILDTIMEOUT_SET should be a float: %s' % (param, self))
self._log_if_unrecognized('set_type', stem.TimeoutSetType)
class CircuitEvent(Event):
"""
Event that indicates that a circuit has changed.
The fingerprint or nickname values in our 'path' may be **None** if the
VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
version 0.1.2.2, and on by default after 0.2.2.1.
The CIRC event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
.. versionchanged:: 1.4.0
Added the socks_username and socks_password attributes which is used for
`stream isolation
<https://gitweb.torproject.org/torspec.git/tree/proposals/171-separate-streams.txt>`_.
:var str id: circuit identifier
:var stem.CircStatus status: reported status for the circuit
:var tuple path: relays involved in the circuit, these are
**(fingerprint, nickname)** tuples
:var tuple build_flags: :data:`~stem.CircBuildFlag` attributes
governing how the circuit is built
:var stem.CircPurpose purpose: purpose that the circuit is intended for
:var stem.HiddenServiceState hs_state: status if this is a hidden service circuit
:var str rend_query: circuit's rendezvous-point if this is hidden service related
:var datetime created: time when the circuit was created or cannibalized
:var stem.CircClosureReason reason: reason for the circuit to be closed
:var stem.CircClosureReason remote_reason: remote side's reason for the circuit to be closed
:var str socks_username: username for using this circuit
:var str socks_password: password for using this circuit
"""
_POSITIONAL_ARGS = ('id', 'status', 'path')
_KEYWORD_ARGS = {
'BUILD_FLAGS': 'build_flags',
'PURPOSE': 'purpose',
'HS_STATE': 'hs_state',
'REND_QUERY': 'rend_query',
'TIME_CREATED': 'created',
'REASON': 'reason',
'REMOTE_REASON': 'remote_reason',
'SOCKS_USERNAME': 'socks_username',
'SOCKS_PASSWORD': 'socks_password',
}
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
self.created = self._iso_timestamp(self.created)
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('status', stem.CircStatus)
self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
self._log_if_unrecognized('purpose', stem.CircPurpose)
self._log_if_unrecognized('hs_state', stem.HiddenServiceState)
self._log_if_unrecognized('reason', stem.CircClosureReason)
self._log_if_unrecognized('remote_reason', stem.CircClosureReason)
def _compare(self, other, method):
if not isinstance(other, CircuitEvent):
return False
for attr in ('id', 'status', 'path', 'build_flags', 'purpose', 'hs_state', 'rend_query', 'created', 'reason', 'remote_reason', 'socks_username', 'socks_password'):
my_attr = getattr(self, attr)
other_attr = getattr(other, attr)
# Our id attribute is technically a string, but Tor conventionally uses
# ints. Attempt to handle as ints if that's the case so we get numeric
# ordering.
if attr == 'id' and my_attr and other_attr:
if my_attr.isdigit() and other_attr.isdigit():
my_attr = int(my_attr)
other_attr = int(other_attr)
if my_attr is None:
my_attr = ''
if other_attr is None:
other_attr = ''
if my_attr != other_attr:
return method(my_attr, other_attr)
return True
def __hash__(self):
return hash(str(self).strip())
def __eq__(self, other):
return self._compare(other, lambda s, o: s == o)
def __ne__(self, other):
return not self == other
def __gt__(self, other):
return self._compare(other, lambda s, o: s > o)
def __ge__(self, other):
return self._compare(other, lambda s, o: s >= o)
class CircMinorEvent(Event):
"""
Event providing information about minor changes in our circuits. This was
first added in tor version 0.2.3.11.
The CIRC_MINOR event was introduced in tor version 0.2.3.11-alpha.
:var str id: circuit identifier
:var stem.CircEvent event: type of change in the circuit
:var tuple path: relays involved in the circuit, these are
**(fingerprint, nickname)** tuples
:var tuple build_flags: :data:`~stem.CircBuildFlag` attributes
governing how the circuit is built
:var stem.CircPurpose purpose: purpose that the circuit is intended for
:var stem.HiddenServiceState hs_state: status if this is a hidden service circuit
:var str rend_query: circuit's rendezvous-point if this is hidden service related
:var datetime created: time when the circuit was created or cannibalized
:var stem.CircPurpose old_purpose: prior purpose for the circuit
:var stem.HiddenServiceState old_hs_state: prior status as a hidden service circuit
"""
_POSITIONAL_ARGS = ('id', 'event', 'path')
_KEYWORD_ARGS = {
'BUILD_FLAGS': 'build_flags',
'PURPOSE': 'purpose',
'HS_STATE': 'hs_state',
'REND_QUERY': 'rend_query',
'TIME_CREATED': 'created',
'OLD_PURPOSE': 'old_purpose',
'OLD_HS_STATE': 'old_hs_state',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CIRC_MINOR
def _parse(self):
self.path = tuple(stem.control._parse_circ_path(self.path))
self.created = self._iso_timestamp(self.created)
if self.build_flags is not None:
self.build_flags = tuple(self.build_flags.split(','))
if not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('event', stem.CircEvent)
self._log_if_unrecognized('build_flags', stem.CircBuildFlag)
self._log_if_unrecognized('purpose', stem.CircPurpose)
self._log_if_unrecognized('hs_state', stem.HiddenServiceState)
self._log_if_unrecognized('old_purpose', stem.CircPurpose)
self._log_if_unrecognized('old_hs_state', stem.HiddenServiceState)
class ClientsSeenEvent(Event):
"""
Periodic event on bridge relays that provides a summary of our users.
The CLIENTS_SEEN event was introduced in tor version 0.2.1.10-alpha.
:var datetime start_time: time in UTC that we started collecting these stats
:var dict locales: mapping of country codes to a rounded count for the number of users
:var dict ip_versions: mapping of ip protocols to a rounded count for the number of users
"""
_KEYWORD_ARGS = {
'TimeStarted': 'start_time',
'CountrySummary': 'locales',
'IPVersions': 'ip_versions',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CLIENTS_SEEN
def _parse(self):
if self.start_time is not None:
self.start_time = stem.util.str_tools._parse_timestamp(self.start_time)
if self.locales is not None:
locale_to_count = {}
for entry in self.locales.split(','):
if '=' not in entry:
raise stem.ProtocolError("The CLIENTS_SEEN's CountrySummary should be a comma separated listing of '<locale>=<count>' mappings: %s" % self)
locale, count = entry.split('=', 1)
if len(locale) != 2:
raise stem.ProtocolError("Locales should be a two character code, got '%s': %s" % (locale, self))
elif not count.isdigit():
raise stem.ProtocolError('Locale count was non-numeric (%s): %s' % (count, self))
elif locale in locale_to_count:
raise stem.ProtocolError("CountrySummary had multiple mappings for '%s': %s" % (locale, self))
locale_to_count[locale] = int(count)
self.locales = locale_to_count
if self.ip_versions is not None:
protocol_to_count = {}
for entry in self.ip_versions.split(','):
if '=' not in entry:
raise stem.ProtocolError("The CLIENTS_SEEN's IPVersions should be a comma separated listing of '<protocol>=<count>' mappings: %s" % self)
protocol, count = entry.split('=', 1)
if not count.isdigit():
raise stem.ProtocolError('IP protocol count was non-numeric (%s): %s' % (count, self))
protocol_to_count[protocol] = int(count)
self.ip_versions = protocol_to_count
class ConfChangedEvent(Event):
"""
Event that indicates that our configuration changed, either in response to a
SETCONF or RELOAD signal.
The CONF_CHANGED event was introduced in tor version 0.2.3.3-alpha.
:var dict config: mapping of configuration options to their new values
(**None** if the option is being unset)
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_CONF_CHANGED
def _parse(self):
self.config = {}
# Skip first and last line since they're the header and footer. For
# instance...
#
# 650-CONF_CHANGED
# 650-ExitNodes=caerSidi
# 650-ExitPolicy
# 650-MaxCircuitDirtiness=20
# 650 OK
for line in str(self).splitlines()[1:-1]:
if '=' in line:
key, value = line.split('=', 1)
else:
key, value = line, None
self.config[key] = value
class DescChangedEvent(Event):
"""
Event that indicates that our descriptor has changed.
The DESCCHANGED event was introduced in tor version 0.1.2.2-alpha.
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_DESCCHANGED
class GuardEvent(Event):
"""
Event that indicates that our guard relays have changed. The 'endpoint' could
be either a...
* fingerprint
* 'fingerprint=nickname' pair
The derived 'endpoint_*' attributes are generally more useful.
The GUARD event was introduced in tor version 0.1.2.5-alpha.
:var stem.GuardType guard_type: purpose the guard relay is for
:var str endpoint: relay that the event concerns
:var str endpoint_fingerprint: endpoint's finterprint
:var str endpoint_nickname: endpoint's nickname if it was provided
:var stem.GuardStatus status: status of the guard relay
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_GUARD
_POSITIONAL_ARGS = ('guard_type', 'endpoint', 'status')
def _parse(self):
self.endpoint_fingerprint = None
self.endpoint_nickname = None
try:
self.endpoint_fingerprint, self.endpoint_nickname = \
stem.control._parse_circ_entry(self.endpoint)
except stem.ProtocolError:
raise stem.ProtocolError("GUARD's endpoint doesn't match a ServerSpec: %s" % self)
self._log_if_unrecognized('guard_type', stem.GuardType)
self._log_if_unrecognized('status', stem.GuardStatus)
class HSDescEvent(Event):
"""
Event triggered when we fetch a hidden service descriptor that currently isn't in our cache.
The HS_DESC event was introduced in tor version 0.2.5.2-alpha.
.. versionadded:: 1.2.0
.. versionchanged:: 1.3.0
Added the reason attribute.
.. versionchanged:: 1.5.0
Added the replica attribute.
:var stem.HSDescAction action: what is happening with the descriptor
:var str address: hidden service address
:var stem.HSAuth authentication: service's authentication method
:var str directory: hidden service directory servicing the request
:var str directory_fingerprint: hidden service directory's finterprint
:var str directory_nickname: hidden service directory's nickname if it was provided
:var str descriptor_id: descriptor identifier
:var stem.HSDescReason reason: reason the descriptor failed to be fetched
:var int replica: replica number the descriptor involves
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_HS_DESC
_POSITIONAL_ARGS = ('action', 'address', 'authentication', 'directory', 'descriptor_id')
_KEYWORD_ARGS = {'REASON': 'reason', 'REPLICA': 'replica'}
def _parse(self):
self.directory_fingerprint = None
self.directory_nickname = None
if self.directory != 'UNKNOWN':
try:
self.directory_fingerprint, self.directory_nickname = \
stem.control._parse_circ_entry(self.directory)
except stem.ProtocolError:
raise stem.ProtocolError("HS_DESC's directory doesn't match a ServerSpec: %s" % self)
if self.replica is not None:
if not self.replica.isdigit():
raise stem.ProtocolError('HS_DESC event got a non-numeric replica count (%s): %s' % (self.replica, self))
self.replica = int(self.replica)
self._log_if_unrecognized('action', stem.HSDescAction)
self._log_if_unrecognized('authentication', stem.HSAuth)
class HSDescContentEvent(Event):
"""
Provides the content of hidden service descriptors we fetch.
The HS_DESC_CONTENT event was introduced in tor version 0.2.7.1-alpha.
.. versionadded:: 1.4.0
:var str address: hidden service address
:var str descriptor_id: descriptor identifier
:var str directory: hidden service directory servicing the request
:var str directory_fingerprint: hidden service directory's finterprint
:var str directory_nickname: hidden service directory's nickname if it was provided
:var stem.descriptor.hidden_service_descriptor.HiddenServiceDescriptor descriptor: descriptor that was retrieved
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_HS_DESC_CONTENT
_POSITIONAL_ARGS = ('address', 'descriptor_id', 'directory')
def _parse(self):
if self.address == 'UNKNOWN':
self.address = None
self.directory_fingerprint = None
self.directory_nickname = None
try:
self.directory_fingerprint, self.directory_nickname = \
stem.control._parse_circ_entry(self.directory)
except stem.ProtocolError:
raise stem.ProtocolError("HS_DESC_CONTENT's directory doesn't match a ServerSpec: %s" % self)
# skip the first line (our positional arguments) and last ('OK')
desc_content = str_tools._to_bytes('\n'.join(str(self).splitlines()[1:-1]))
self.descriptor = None
if desc_content:
self.descriptor = list(stem.descriptor.hidden_service_descriptor._parse_file(io.BytesIO(desc_content)))[0]
class LogEvent(Event):
"""
Tor logging event. These are the most visible kind of event since, by
default, tor logs at the NOTICE :data:`~stem.Runlevel` to stdout.
The logging events were some of the first Control Protocol V1 events
and were introduced in tor version 0.1.1.1-alpha.
:var stem.Runlevel runlevel: runlevel of the logged message
:var str message: logged message
"""
_SKIP_PARSING = True
def _parse(self):
self.runlevel = self.type
self._log_if_unrecognized('runlevel', stem.Runlevel)
# message is our content, minus the runlevel and ending "OK" if a
# multi-line message
self.message = str(self)[len(self.runlevel) + 1:].rstrip('\nOK')
class NetworkStatusEvent(Event):
"""
Event for when our copy of the consensus has changed. This was introduced in
tor version 0.1.2.3.
The NS event was introduced in tor version 0.1.2.3-alpha.
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_NS
def _parse(self):
content = str(self).lstrip('NS\n').rstrip('\nOK')
# TODO: For stem 2.0.0 consider changing 'desc' to 'descriptors' to match
# our other events.
self.desc = list(stem.descriptor.router_status_entry._parse_file(
io.BytesIO(str_tools._to_bytes(content)),
True,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
class NetworkLivenessEvent(Event):
"""
Event for when the network becomes reachable or unreachable.
The NETWORK_LIVENESS event was introduced in tor version 0.2.7.2-alpha.
.. versionadded:: 1.5.0
:var str status: status of the network ('UP', 'DOWN', or possibly other
statuses in the future)
"""
_VERSION_ADDED = stem.version.Requirement.EVENT_NETWORK_LIVENESS
_POSITIONAL_ARGS = ('status',)
class NewConsensusEvent(Event):
"""
Event for when we have a new consensus. This is similar to
:class:`~stem.response.events.NetworkStatusEvent`, except that it contains
the whole consensus so anything not listed is implicitly no longer
recommended.
The NEWCONSENSUS event was introduced in tor version 0.2.1.13-alpha.
:var list desc: :class:`~stem.descriptor.router_status_entry.RouterStatusEntryV3` for the changed descriptors
"""
_SKIP_PARSING = True
_VERSION_ADDED = stem.version.Requirement.EVENT_NEWCONSENSUS
def _parse(self):
content = str(self).lstrip('NEWCONSENSUS\n').rstrip('\nOK')
# TODO: For stem 2.0.0 consider changing 'desc' to 'descriptors' to match
# our other events.
self.desc = list(stem.descriptor.router_status_entry._parse_file(
io.BytesIO(str_tools._to_bytes(content)),
True,
entry_class = stem.descriptor.router_status_entry.RouterStatusEntryV3,
))
class NewDescEvent(Event):
"""
Event that indicates that a new descriptor is available.
The fingerprint or nickname values in our 'relays' may be **None** if the
VERBOSE_NAMES feature isn't enabled. The option was first introduced in tor
version 0.1.2.2, and on by default after 0.2.2.1.
The NEWDESC event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var tuple relays: **(fingerprint, nickname)** tuples for the relays with
new descriptors
"""
def _parse(self):
self.relays = tuple([stem.control._parse_circ_entry(entry) for entry in str(self).split()[1:]])
class ORConnEvent(Event):
"""
Event that indicates a change in a relay connection. The 'endpoint' could be
any of several things including a...
* fingerprint
* nickname
* 'fingerprint=nickname' pair
* address:port
The derived 'endpoint_*' attributes are generally more useful.
The ORCONN event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha. Its id attribute was added in
version 0.2.5.2-alpha.
.. versionchanged:: 1.2.0
Added the id attribute.
:var str id: connection identifier
:var str endpoint: relay that the event concerns
:var str endpoint_fingerprint: endpoint's finterprint if it was provided
:var str endpoint_nickname: endpoint's nickname if it was provided
:var str endpoint_address: endpoint's address if it was provided
:var int endpoint_port: endpoint's port if it was provided
:var stem.ORStatus status: state of the connection
:var stem.ORClosureReason reason: reason for the connection to be closed
:var int circ_count: number of established and pending circuits
"""
_POSITIONAL_ARGS = ('endpoint', 'status')
_KEYWORD_ARGS = {
'REASON': 'reason',
'NCIRCS': 'circ_count',
'ID': 'id',
}
def _parse(self):
self.endpoint_fingerprint = None
self.endpoint_nickname = None
self.endpoint_address = None
self.endpoint_port = None
try:
self.endpoint_fingerprint, self.endpoint_nickname = \
stem.control._parse_circ_entry(self.endpoint)
except stem.ProtocolError:
if ':' not in self.endpoint:
raise stem.ProtocolError("ORCONN endpoint is neither a relay nor 'address:port': %s" % self)
address, port = self.endpoint.rsplit(':', 1)
if not connection.is_valid_port(port):
raise stem.ProtocolError("ORCONN's endpoint location's port is invalid: %s" % self)
self.endpoint_address = address
self.endpoint_port = int(port)
if self.circ_count is not None:
if not self.circ_count.isdigit():
raise stem.ProtocolError('ORCONN event got a non-numeric circuit count (%s): %s' % (self.circ_count, self))
self.circ_count = int(self.circ_count)
if self.id and not tor_tools.is_valid_connection_id(self.id):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self._log_if_unrecognized('status', stem.ORStatus)
self._log_if_unrecognized('reason', stem.ORClosureReason)
class SignalEvent(Event):
"""
Event that indicates that tor has received and acted upon a signal being sent
to the process. As of tor version 0.2.4.6 the only signals conveyed by this
event are...
* RELOAD
* DUMP
* DEBUG
* NEWNYM
* CLEARDNSCACHE
The SIGNAL event was introduced in tor version 0.2.3.1-alpha.
:var stem.Signal signal: signal that tor received
"""
_POSITIONAL_ARGS = ('signal',)
_VERSION_ADDED = stem.version.Requirement.EVENT_SIGNAL
def _parse(self):
# log if we recieved an unrecognized signal
expected_signals = (
stem.Signal.RELOAD,
stem.Signal.DUMP,
stem.Signal.DEBUG,
stem.Signal.NEWNYM,
stem.Signal.CLEARDNSCACHE,
)
self._log_if_unrecognized('signal', expected_signals)
class StatusEvent(Event):
"""
Notification of a change in tor's state. These are generally triggered for
the same sort of things as log messages of the NOTICE level or higher.
However, unlike :class:`~stem.response.events.LogEvent` these contain well
formed data.
The STATUS_GENERAL, STATUS_CLIENT, STATUS_SERVER events were introduced
in tor version 0.1.2.3-alpha.
:var stem.StatusType status_type: category of the status event
:var stem.Runlevel runlevel: runlevel of the logged message
:var str action: activity that caused this message
:var dict arguments: attributes about the event
"""
_POSITIONAL_ARGS = ('runlevel', 'action')
_VERSION_ADDED = stem.version.Requirement.EVENT_STATUS
def _parse(self):
if self.type == 'STATUS_GENERAL':
self.status_type = stem.StatusType.GENERAL
elif self.type == 'STATUS_CLIENT':
self.status_type = stem.StatusType.CLIENT
elif self.type == 'STATUS_SERVER':
self.status_type = stem.StatusType.SERVER
else:
raise ValueError("BUG: Unrecognized status type (%s), likely an EVENT_TYPE_TO_CLASS addition without revising how 'status_type' is assigned." % self.type)
# Just an alias for our parent class' keyword_args since that already
# parses these for us. Unlike our other event types Tor commonly supplies
# arbitrary key/value pairs for these, so making an alias here to better
# draw attention that the StatusEvent will likely have them.
self.arguments = self.keyword_args
self._log_if_unrecognized('runlevel', stem.Runlevel)
class StreamEvent(Event):
"""
Event that indicates that a stream has changed.
The STREAM event was one of the first Control Protocol V1 events and was
introduced in tor version 0.1.1.1-alpha.
:var str id: stream identifier
:var stem.StreamStatus status: reported status for the stream
:var str circ_id: circuit that the stream is attached to, this is **None** of
the stream is unattached
:var str target: destination of the stream
:var str target_address: destination address (ip, hostname, or '(Tor_internal)')
:var int target_port: destination port
:var stem.StreamClosureReason reason: reason for the stream to be closed
:var stem.StreamClosureReason remote_reason: remote side's reason for the stream to be closed
:var stem.StreamSource source: origin of the REMAP request
:var str source_addr: requester of the connection
:var str source_address: requester address (ip or hostname)
:var int source_port: requester port
:var stem.StreamPurpose purpose: purpose for the stream
"""
_POSITIONAL_ARGS = ('id', 'status', 'circ_id', 'target')
_KEYWORD_ARGS = {
'REASON': 'reason',
'REMOTE_REASON': 'remote_reason',
'SOURCE': 'source',
'SOURCE_ADDR': 'source_addr',
'PURPOSE': 'purpose',
}
def _parse(self):
if self.target is None:
raise stem.ProtocolError("STREAM event didn't have a target: %s" % self)
else:
if ':' not in self.target:
raise stem.ProtocolError("Target location must be of the form 'address:port': %s" % self)
address, port = self.target.rsplit(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Target location's port is invalid: %s" % self)
self.target_address = address
self.target_port = int(port)
if self.source_addr is None:
self.source_address = None
self.source_port = None
else:
if ':' not in self.source_addr:
raise stem.ProtocolError("Source location must be of the form 'address:port': %s" % self)
address, port = self.source_addr.rsplit(':', 1)
if not connection.is_valid_port(port, allow_zero = True):
raise stem.ProtocolError("Source location's port is invalid: %s" % self)
self.source_address = address
self.source_port = int(port)
# spec specifies a circ_id of zero if the stream is unattached
if self.circ_id == '0':
self.circ_id = None
self._log_if_unrecognized('reason', stem.StreamClosureReason)
self._log_if_unrecognized('remote_reason', stem.StreamClosureReason)
self._log_if_unrecognized('purpose', stem.StreamPurpose)
class StreamBwEvent(Event):
"""
Event (emitted approximately every second) with the bytes sent and received
by the application since the last such event on this stream.
The STREAM_BW event was introduced in tor version 0.1.2.8-beta.
.. versionchanged:: 1.6.0
Added the time attribute.
:var str id: stream identifier
:var long written: bytes sent by the application
:var long read: bytes received by the application
:var datetime time: time when the measurement was recorded
"""
_POSITIONAL_ARGS = ('id', 'written', 'read', 'time')
_VERSION_ADDED = stem.version.Requirement.EVENT_STREAM_BW
def _parse(self):
if not tor_tools.is_valid_stream_id(self.id):
raise stem.ProtocolError("Stream IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
elif not self.written:
raise stem.ProtocolError('STREAM_BW event is missing its written value')
elif not self.read:
raise stem.ProtocolError('STREAM_BW event is missing its read value')
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A STREAM_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
self.read = int_type(self.read)
self.written = int_type(self.written)
self.time = self._iso_timestamp(self.time)
class TransportLaunchedEvent(Event):
"""
Event triggered when a pluggable transport is launched.
The TRANSPORT_LAUNCHED event was introduced in tor version 0.2.5.0-alpha.
.. versionadded:: 1.1.0
:var str type: 'server' or 'client'
:var str name: name of the pluggable transport
:var str address: IPv4 or IPv6 address where the transport is listening for
connections
:var int port: port where the transport is listening for connections
"""
_POSITIONAL_ARGS = ('type', 'name', 'address', 'port')
_VERSION_ADDED = stem.version.Requirement.EVENT_TRANSPORT_LAUNCHED
def _parse(self):
if self.type not in ('server', 'client'):
raise stem.ProtocolError("Transport type should either be 'server' or 'client': %s" % self)
if not connection.is_valid_ipv4_address(self.address) and \
not connection.is_valid_ipv6_address(self.address):
raise stem.ProtocolError("Transport address isn't a valid IPv4 or IPv6 address: %s" % self)
if not connection.is_valid_port(self.port):
raise stem.ProtocolError('Transport port is invalid: %s' % self)
self.port = int(self.port)
class ConnectionBandwidthEvent(Event):
"""
Event emitted every second with the bytes sent and received by tor on a
per-connection basis.
The CONN_BW event was introduced in tor version 0.2.5.2-alpha.
.. versionadded:: 1.2.0
.. versionchanged:: 1.6.0
Renamed 'type' attribute to 'conn_type' so it wouldn't be override parent
class attribute with the same name.
:var str id: connection identifier
:var stem.ConnectionType conn_type: connection type
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
"""
_KEYWORD_ARGS = {
'ID': 'id',
'TYPE': 'conn_type',
'READ': 'read',
'WRITTEN': 'written',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CONN_BW
def _parse(self):
if not self.id:
raise stem.ProtocolError('CONN_BW event is missing its id')
elif not self.conn_type:
raise stem.ProtocolError('CONN_BW event is missing its connection type')
elif not self.read:
raise stem.ProtocolError('CONN_BW event is missing its read value')
elif not self.written:
raise stem.ProtocolError('CONN_BW event is missing its written value')
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A CONN_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
elif not tor_tools.is_valid_connection_id(self.id):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self.read = int_type(self.read)
self.written = int_type(self.written)
self._log_if_unrecognized('conn_type', stem.ConnectionType)
class CircuitBandwidthEvent(Event):
"""
Event emitted every second with the bytes sent and received by tor on a
per-circuit basis.
The CIRC_BW event was introduced in tor version 0.2.5.2-alpha.
.. versionadded:: 1.2.0
.. versionchanged:: 1.6.0
Added the time attribute.
:var str id: circuit identifier
:var long read: bytes received by tor that second
:var long written: bytes sent by tor that second
:var datetime time: time when the measurement was recorded
"""
_KEYWORD_ARGS = {
'ID': 'id',
'READ': 'read',
'WRITTEN': 'written',
'TIME': 'time',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CIRC_BW
def _parse(self):
if not self.id:
raise stem.ProtocolError('CIRC_BW event is missing its id')
elif not self.read:
raise stem.ProtocolError('CIRC_BW event is missing its read value')
elif not self.written:
raise stem.ProtocolError('CIRC_BW event is missing its written value')
elif not self.read.isdigit() or not self.written.isdigit():
raise stem.ProtocolError("A CIRC_BW event's bytes sent and received should be a positive numeric value, received: %s" % self)
elif not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
self.read = int_type(self.read)
self.written = int_type(self.written)
self.time = self._iso_timestamp(self.time)
class CellStatsEvent(Event):
"""
Event emitted every second with a count of the number of cells types broken
down by the circuit. **These events are only emitted if TestingTorNetwork is
set.**
The CELL_STATS event was introduced in tor version 0.2.5.2-alpha.
.. versionadded:: 1.2.0
:var str id: circuit identifier
:var str inbound_queue: inbound queue identifier
:var str inbound_connection: inbound connection identifier
:var dict inbound_added: mapping of added inbound cell types to their count
:var dict inbound_removed: mapping of removed inbound cell types to their count
:var dict inbound_time: mapping of inbound cell types to the time they took to write in milliseconds
:var str outbound_queue: outbound queue identifier
:var str outbound_connection: outbound connection identifier
:var dict outbound_added: mapping of added outbound cell types to their count
:var dict outbound_removed: mapping of removed outbound cell types to their count
:var dict outbound_time: mapping of outbound cell types to the time they took to write in milliseconds
"""
_KEYWORD_ARGS = {
'ID': 'id',
'InboundQueue': 'inbound_queue',
'InboundConn': 'inbound_connection',
'InboundAdded': 'inbound_added',
'InboundRemoved': 'inbound_removed',
'InboundTime': 'inbound_time',
'OutboundQueue': 'outbound_queue',
'OutboundConn': 'outbound_connection',
'OutboundAdded': 'outbound_added',
'OutboundRemoved': 'outbound_removed',
'OutboundTime': 'outbound_time',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_CELL_STATS
def _parse(self):
if self.id and not tor_tools.is_valid_circuit_id(self.id):
raise stem.ProtocolError("Circuit IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
elif self.inbound_queue and not tor_tools.is_valid_circuit_id(self.inbound_queue):
raise stem.ProtocolError("Queue IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.inbound_queue, self))
elif self.inbound_connection and not tor_tools.is_valid_connection_id(self.inbound_connection):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.inbound_connection, self))
elif self.outbound_queue and not tor_tools.is_valid_circuit_id(self.outbound_queue):
raise stem.ProtocolError("Queue IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.outbound_queue, self))
elif self.outbound_connection and not tor_tools.is_valid_connection_id(self.outbound_connection):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.outbound_connection, self))
self.inbound_added = _parse_cell_type_mapping(self.inbound_added)
self.inbound_removed = _parse_cell_type_mapping(self.inbound_removed)
self.inbound_time = _parse_cell_type_mapping(self.inbound_time)
self.outbound_added = _parse_cell_type_mapping(self.outbound_added)
self.outbound_removed = _parse_cell_type_mapping(self.outbound_removed)
self.outbound_time = _parse_cell_type_mapping(self.outbound_time)
class TokenBucketEmptyEvent(Event):
"""
Event emitted when refilling an empty token bucket. **These events are only
emitted if TestingTorNetwork is set.**
The TB_EMPTY event was introduced in tor version 0.2.5.2-alpha.
.. versionadded:: 1.2.0
:var stem.TokenBucket bucket: bucket being refilled
:var str id: connection identifier
:var int read: time in milliseconds since the read bucket was last refilled
:var int written: time in milliseconds since the write bucket was last refilled
:var int last_refill: time in milliseconds the bucket has been empty since last refilled
"""
_POSITIONAL_ARGS = ('bucket',)
_KEYWORD_ARGS = {
'ID': 'id',
'READ': 'read',
'WRITTEN': 'written',
'LAST': 'last_refill',
}
_VERSION_ADDED = stem.version.Requirement.EVENT_TB_EMPTY
def _parse(self):
if self.id and not tor_tools.is_valid_connection_id(self.id):
raise stem.ProtocolError("Connection IDs must be one to sixteen alphanumeric characters, got '%s': %s" % (self.id, self))
elif not self.read.isdigit():
raise stem.ProtocolError("A TB_EMPTY's READ value should be a positive numeric value, received: %s" % self)
elif not self.written.isdigit():
raise stem.ProtocolError("A TB_EMPTY's WRITTEN value should be a positive numeric value, received: %s" % self)
elif not self.last_refill.isdigit():
raise stem.ProtocolError("A TB_EMPTY's LAST value should be a positive numeric value, received: %s" % self)
self.read = int(self.read)
self.written = int(self.written)
self.last_refill = int(self.last_refill)
self._log_if_unrecognized('bucket', stem.TokenBucket)
def _parse_cell_type_mapping(mapping):
"""
Parses a mapping of the form...
key1:value1,key2:value2...
... in which keys are strings and values are integers.
:param str mapping: value to be parsed
:returns: dict of **str => int** mappings
:rasies: **stem.ProtocolError** if unable to parse the mapping
"""
if mapping is None:
return None
results = {}
for entry in mapping.split(','):
if ':' not in entry:
raise stem.ProtocolError("Mappings are expected to be of the form 'key:value', got '%s': %s" % (entry, mapping))
key, value = entry.rsplit(':', 1)
if not CELL_TYPE.match(key):
raise stem.ProtocolError("Key had invalid characters, got '%s': %s" % (key, mapping))
elif not value.isdigit():
raise stem.ProtocolError("Values should just be integers, got '%s': %s" % (value, mapping))
results[key] = int(value)
return results
EVENT_TYPE_TO_CLASS = {
'ADDRMAP': AddrMapEvent,
'AUTHDIR_NEWDESCS': AuthDirNewDescEvent,
'BUILDTIMEOUT_SET': BuildTimeoutSetEvent,
'BW': BandwidthEvent,
'CELL_STATS': CellStatsEvent,
'CIRC': CircuitEvent,
'CIRC_BW': CircuitBandwidthEvent,
'CIRC_MINOR': CircMinorEvent,
'CLIENTS_SEEN': ClientsSeenEvent,
'CONF_CHANGED': ConfChangedEvent,
'CONN_BW': ConnectionBandwidthEvent,
'DEBUG': LogEvent,
'DESCCHANGED': DescChangedEvent,
'ERR': LogEvent,
'GUARD': GuardEvent,
'HS_DESC': HSDescEvent,
'HS_DESC_CONTENT': HSDescContentEvent,
'INFO': LogEvent,
'NETWORK_LIVENESS': NetworkLivenessEvent,
'NEWCONSENSUS': NewConsensusEvent,
'NEWDESC': NewDescEvent,
'NOTICE': LogEvent,
'NS': NetworkStatusEvent,
'ORCONN': ORConnEvent,
'SIGNAL': SignalEvent,
'STATUS_CLIENT': StatusEvent,
'STATUS_GENERAL': StatusEvent,
'STATUS_SERVER': StatusEvent,
'STREAM': StreamEvent,
'STREAM_BW': StreamBwEvent,
'TB_EMPTY': TokenBucketEmptyEvent,
'TRANSPORT_LAUNCHED': TransportLaunchedEvent,
'WARN': LogEvent,
# accounting for a bug in tor 0.2.0.22
'STATUS_SEVER': StatusEvent,
}
| ewongbb/stem | stem/response/events.py | Python | lgpl-3.0 | 49,087 |
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import six
from heat.engine.resources import signal_responder
from heat.engine.resources import wait_condition as wc_base
from heat.engine import support
class WaitConditionHandle(wc_base.BaseWaitConditionHandle):
"""AWS WaitConditionHandle resource.
the main point of this class is to :
have no dependencies (so the instance can reference it)
generate a unique url (to be returned in the reference)
then the cfn-signal will use this url to post to and
WaitCondition will poll it to see if has been written to.
"""
support_status = support.SupportStatus(version='2014.1')
METADATA_KEYS = (
DATA, REASON, STATUS, UNIQUE_ID
) = (
'Data', 'Reason', 'Status', 'UniqueId'
)
def get_reference_id(self):
if self.resource_id:
wc = signal_responder.WAITCONDITION
return six.text_type(self._get_ec2_signed_url(signal_type=wc))
else:
return six.text_type(self.name)
def metadata_update(self, new_metadata=None):
"""DEPRECATED. Should use handle_signal instead."""
self.handle_signal(details=new_metadata)
def handle_signal(self, details=None):
"""Validate and update the resource metadata.
metadata must use the following format:
{
"Status" : "Status (must be SUCCESS or FAILURE)",
"UniqueId" : "Some ID, should be unique for Count>1",
"Data" : "Arbitrary Data",
"Reason" : "Reason String"
}
"""
if details is None:
return
return super(WaitConditionHandle, self).handle_signal(details)
def resource_mapping():
return {
'AWS::CloudFormation::WaitConditionHandle': WaitConditionHandle,
}
| dragorosson/heat | heat/engine/resources/aws/cfn/wait_condition_handle.py | Python | apache-2.0 | 2,334 |
# -*- coding: utf-8 -*-
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from mycroft.util.lang.format_common import convert_to_mixed_fraction
from math import floor
months = ['januari', 'februari', 'maart', 'april', 'mei', 'juni',
'juli', 'augustus', 'september', 'oktober', 'november',
'december']
NUM_STRING_NL = {
0: 'nul',
1: u'één',
2: 'twee',
3: 'drie',
4: 'vier',
5: 'vijf',
6: 'zes',
7: 'zeven',
8: 'acht',
9: 'negen',
10: 'tien',
11: 'elf',
12: 'twaalf',
13: 'dertien',
14: 'veertien',
15: 'vijftien',
16: 'zestien',
17: 'zeventien',
18: 'actien',
19: 'negentien',
20: 'twintig',
30: 'dertig',
40: 'veertig',
50: 'vijftig',
60: 'zestig',
70: 'zeventig',
80: 'tachtig',
90: 'negentig',
100: 'honderd'
}
# German uses "long scale" https://en.wikipedia.org/wiki/Long_and_short_scales
# Currently, numbers are limited to 1000000000000000000000000,
# but NUM_POWERS_OF_TEN can be extended to include additional number words
NUM_POWERS_OF_TEN = [
'', 'duizend', 'miljoen', 'miljard', 'biljoen', 'biljard', 'triljoen',
'triljard'
]
FRACTION_STRING_NL = {
2: 'half',
3: 'derde',
4: 'vierde',
5: 'vijfde',
6: 'zesde',
7: 'zevende',
8: 'achtste',
9: 'negende',
10: 'tiende',
11: 'elfde',
12: 'twaalfde',
13: 'dertiende',
14: 'veertiende',
15: 'vijftiende',
16: 'zestiende',
17: 'zeventiende',
18: 'achttiende',
19: 'negentiende',
20: 'twintigste'
}
# Numbers below 1 million are written in one word in dutch, yielding very
# long words
# In some circumstances it may better to seperate individual words
# Set EXTRA_SPACE=" " for separating numbers below 1 million (
# orthographically incorrect)
# Set EXTRA_SPACE="" for correct spelling, this is standard
# EXTRA_SPACE = " "
EXTRA_SPACE = ""
def nice_number_nl(number, speech, denominators):
""" Dutch helper for nice_number
This function formats a float to human understandable functions. Like
4.5 becomes "4 einhalb" for speech and "4 1/2" for text
Args:
number (int or float): the float to format
speech (bool): format for speech (True) or display (False)
denominators (iter of ints): denominators to use, default [1 .. 20]
Returns:
(str): The formatted string.
"""
result = convert_to_mixed_fraction(number, denominators)
if not result:
# Give up, just represent as a 3 decimal number
return str(round(number, 3)).replace(".", ",")
whole, num, den = result
if not speech:
if num == 0:
# TODO: Number grouping? E.g. "1,000,000"
return str(whole)
else:
return '{} {}/{}'.format(whole, num, den)
if num == 0:
return str(whole)
den_str = FRACTION_STRING_NL[den]
if whole == 0:
if num == 1:
return_string = u'één {}'.format(den_str)
else:
return_string = '{} {}'.format(num, den_str)
elif num == 1:
return_string = u'{} en één {}'.format(whole, den_str)
else:
return_string = '{} en {} {}'.format(whole, num, den_str)
return return_string
def pronounce_number_nl(num, places=2):
"""
Convert a number to its spoken equivalent
For example, '5.2' would return 'five point two'
Args:
num(float or int): the number to pronounce (set limit below)
places(int): maximum decimal places to speak
Returns:
(str): The pronounced number
"""
def pronounce_triplet_nl(num):
result = ""
num = floor(num)
if num > 99:
hundreds = floor(num / 100)
if hundreds > 0:
result += NUM_STRING_NL[
hundreds] + EXTRA_SPACE + 'honderd' + EXTRA_SPACE
num -= hundreds * 100
if num == 0:
result += '' # do nothing
elif num <= 20:
result += NUM_STRING_NL[num] # + EXTRA_SPACE
elif num > 20:
ones = num % 10
tens = num - ones
if ones > 0:
result += NUM_STRING_NL[ones] + EXTRA_SPACE
if tens > 0:
result += 'en' + EXTRA_SPACE
if tens > 0:
result += NUM_STRING_NL[tens] + EXTRA_SPACE
return result
def pronounce_fractional_nl(num,
places): # fixed number of places even with
# trailing zeros
result = ""
place = 10
while places > 0: # doesn't work with 1.0001 and places = 2: int(
# num*place) % 10 > 0 and places > 0:
result += " " + NUM_STRING_NL[int(num * place) % 10]
if int(num * place) % 10 == 1:
result += '' # "1" is pronounced "eins" after the decimal
# point
place *= 10
places -= 1
return result
def pronounce_whole_number_nl(num, scale_level=0):
if num == 0:
return ''
num = floor(num)
result = ''
last_triplet = num % 1000
if last_triplet == 1:
if scale_level == 0:
if result != '':
result += '' + u'één'
else:
result += u"één"
elif scale_level == 1:
result += u'één' + EXTRA_SPACE + 'duizend' + EXTRA_SPACE
else:
result += u"één " + NUM_POWERS_OF_TEN[scale_level] + ' '
elif last_triplet > 1:
result += pronounce_triplet_nl(last_triplet)
if scale_level == 1:
# result += EXTRA_SPACE
result += 'duizend' + EXTRA_SPACE
if scale_level >= 2:
# if EXTRA_SPACE == '':
# result += " "
result += " " + NUM_POWERS_OF_TEN[scale_level] + ' '
if scale_level >= 2:
if scale_level % 2 == 0:
result += "" # Miljioen
result += "" # Miljard, Miljoen
num = floor(num / 1000)
scale_level += 1
return pronounce_whole_number_nl(num,
scale_level) + result + ''
result = ""
if abs(num) >= 1000000000000000000000000: # cannot do more than this
return str(num)
elif num == 0:
return str(NUM_STRING_NL[0])
elif num < 0:
return "min " + pronounce_number_nl(abs(num), places)
else:
if num == int(num):
return pronounce_whole_number_nl(num)
else:
whole_number_part = floor(num)
fractional_part = num - whole_number_part
result += pronounce_whole_number_nl(whole_number_part)
if places > 0:
result += " komma"
result += pronounce_fractional_nl(fractional_part, places)
return result
def pronounce_ordinal_nl(num):
ordinals = ["nulste", "eerste", "tweede", "derde", "vierde", "vijfde",
"zesde", "zevende", "achtste"]
# only for whole positive numbers including zero
if num < 0 or num != int(num):
return num
if num < 4:
return ordinals[num]
if num < 8:
return pronounce_number_nl(num) + "de"
if num < 9:
return pronounce_number_nl(num) + "ste"
if num < 20:
return pronounce_number_nl(num) + "de"
return pronounce_number_nl(num) + "ste"
def nice_time_nl(dt, speech=True, use_24hour=False, use_ampm=False):
"""
Format a time to a comfortable human format
For example, generate 'five thirty' for speech or '5:30' for
text display.
Args:
dt (datetime): date to format (assumes already in local timezone)
speech (bool): format for speech (default/True) or display (False)=Fal
use_24hour (bool): output in 24-hour/military or 12-hour format
use_ampm (bool): include the am/pm for 12-hour format
Returns:
(str): The formatted time string
"""
if use_24hour:
# e.g. "03:01" or "14:22"
string = dt.strftime("%H:%M")
else:
if use_ampm:
# e.g. "3:01 AM" or "2:22 PM"
string = dt.strftime("%I:%M %p")
else:
# e.g. "3:01" or "2:22"
string = dt.strftime("%I:%M")
if string[0] == '0':
string = string[1:] # strip leading zeros
if not speech:
return string
# Generate a speakable version of the time
speak = ""
if use_24hour:
speak += pronounce_number_nl(dt.hour)
speak += " uur"
if not dt.minute == 0: # zero minutes are not pronounced, 13:00 is
# "13 uur" not "13 hundred hours"
speak += " " + pronounce_number_nl(dt.minute)
return speak # ampm is ignored when use_24hour is true
else:
if dt.hour == 0 and dt.minute == 0:
return "Middernacht"
hour = dt.hour % 12
if dt.minute == 0:
hour = fix_hour(hour)
speak += pronounce_number_nl(hour)
speak += " uur"
elif dt.minute == 30:
speak += "half "
hour += 1
hour = fix_hour(hour)
speak += pronounce_number_nl(hour)
elif dt.minute == 15:
speak += "kwart over "
hour = fix_hour(hour)
speak += pronounce_number_nl(hour)
elif dt.minute == 45:
speak += "kwart voor "
hour += 1
hour = fix_hour(hour)
speak += pronounce_number_nl(hour)
elif dt.minute > 30:
speak += pronounce_number_nl(60 - dt.minute)
speak += " voor "
hour += 1
hour = fix_hour(hour)
speak += pronounce_number_nl(hour)
else:
speak += pronounce_number_nl(dt.minute)
speak += " over "
hour = fix_hour(hour)
speak += pronounce_number_nl(hour)
if use_ampm:
speak += nice_part_of_day_nl(dt)
return speak
def fix_hour(hour):
hour = hour % 12
if hour == 0:
hour = 12
return hour
def nice_part_of_day_nl(dt):
if dt.hour < 6:
return " 's nachts"
if dt.hour < 12:
return " 's ochtends"
if dt.hour < 18:
return " 's middags"
if dt.hour < 24:
return " 's avonds"
raise Exception('dt.hour is bigger than 24')
def nice_response_nl(text):
# check for months and call nice_ordinal_nl declension of ordinals
# replace "^" with "tot de macht" (to the power of)
words = text.split()
for idx, word in enumerate(words):
if word.lower() in months:
text = nice_ordinal_nl(text)
if word == '^':
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
if wordNext.isnumeric():
words[idx] = "tot de macht"
text = " ".join(words)
return text
def nice_ordinal_nl(text):
# check for months for declension of ordinals before months
# depending on articles/prepositions
normalized_text = text
words = text.split()
for idx, word in enumerate(words):
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordPrev = words[idx - 1] if idx > 0 else ""
if word[:-1].isdecimal():
if wordNext.lower() in months:
if wordPrev == 'de':
word = pronounce_ordinal_nl(int(word))
else:
word = pronounce_number_nl(int(word))
words[idx] = word
normalized_text = " ".join(words)
return normalized_text
| linuxipho/mycroft-core | mycroft/util/lang/format_nl.py | Python | apache-2.0 | 12,272 |
# -*- coding: utf-8 -*-
r'''
werkzeug.script
~~~~~~~~~~~~~~~
.. admonition:: Deprecated Functionality
``werkzeug.script`` is deprecated without replacement functionality.
Python's command line support improved greatly with :mod:`argparse`
and a bunch of alternative modules.
Most of the time you have recurring tasks while writing an application
such as starting up an interactive python interpreter with some prefilled
imports, starting the development server, initializing the database or
something similar.
For that purpose werkzeug provides the `werkzeug.script` module which
helps you writing such scripts.
Basic Usage
-----------
The following snippet is roughly the same in every werkzeug script::
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from werkzeug import script
# actions go here
if __name__ == '__main__':
script.run()
Starting this script now does nothing because no actions are defined.
An action is a function in the same module starting with ``"action_"``
which takes a number of arguments where every argument has a default. The
type of the default value specifies the type of the argument.
Arguments can then be passed by position or using ``--name=value`` from
the shell.
Because a runserver and shell command is pretty common there are two
factory functions that create such commands::
def make_app():
from yourapplication import YourApplication
return YourApplication(...)
action_runserver = script.make_runserver(make_app, use_reloader=True)
action_shell = script.make_shell(lambda: {'app': make_app()})
Using The Scripts
-----------------
The script from above can be used like this from the shell now:
.. sourcecode:: text
$ ./manage.py --help
$ ./manage.py runserver localhost 8080 --debugger --no-reloader
$ ./manage.py runserver -p 4000
$ ./manage.py shell
As you can see it's possible to pass parameters as positional arguments
or as named parameters, pretty much like Python function calls.
:copyright: (c) 2014 by the Werkzeug Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
'''
from __future__ import print_function
import sys
import inspect
import getopt
from warnings import warn
from os.path import basename
from werkzeug._compat import iteritems
argument_types = {
bool: 'boolean',
str: 'string',
int: 'integer',
float: 'float'
}
converters = {
'boolean': lambda x: x.lower() in ('1', 'true', 'yes', 'on'),
'string': str,
'integer': int,
'float': float
}
def _deprecated():
warn(DeprecationWarning('werkzeug.script is deprecated and '
'will be removed soon'), stacklevel=2)
def run(namespace=None, action_prefix='action_', args=None):
"""Run the script. Participating actions are looked up in the caller's
namespace if no namespace is given, otherwise in the dict provided.
Only items that start with action_prefix are processed as actions. If
you want to use all items in the namespace provided as actions set
action_prefix to an empty string.
:param namespace: An optional dict where the functions are looked up in.
By default the local namespace of the caller is used.
:param action_prefix: The prefix for the functions. Everything else
is ignored.
:param args: the arguments for the function. If not specified
:data:`sys.argv` without the first argument is used.
"""
_deprecated()
if namespace is None:
namespace = sys._getframe(1).f_locals
actions = find_actions(namespace, action_prefix)
if args is None:
args = sys.argv[1:]
if not args or args[0] in ('-h', '--help'):
return print_usage(actions)
elif args[0] not in actions:
fail('Unknown action \'%s\'' % args[0])
arguments = {}
types = {}
key_to_arg = {}
long_options = []
formatstring = ''
func, doc, arg_def = actions[args.pop(0)]
for idx, (arg, shortcut, default, option_type) in enumerate(arg_def):
real_arg = arg.replace('-', '_')
if shortcut:
formatstring += shortcut
if not isinstance(default, bool):
formatstring += ':'
key_to_arg['-' + shortcut] = real_arg
long_options.append(isinstance(default, bool) and arg or arg + '=')
key_to_arg['--' + arg] = real_arg
key_to_arg[idx] = real_arg
types[real_arg] = option_type
arguments[real_arg] = default
try:
optlist, posargs = getopt.gnu_getopt(args, formatstring, long_options)
except getopt.GetoptError as e:
fail(str(e))
specified_arguments = set()
for key, value in enumerate(posargs):
try:
arg = key_to_arg[key]
except IndexError:
fail('Too many parameters')
specified_arguments.add(arg)
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for argument %s (%s): %s' % (key, arg, value))
for key, value in optlist:
arg = key_to_arg[key]
if arg in specified_arguments:
fail('Argument \'%s\' is specified twice' % arg)
if types[arg] == 'boolean':
if arg.startswith('no_'):
value = 'no'
else:
value = 'yes'
try:
arguments[arg] = converters[types[arg]](value)
except ValueError:
fail('Invalid value for \'%s\': %s' % (key, value))
newargs = {}
for k, v in iteritems(arguments):
newargs[k.startswith('no_') and k[3:] or k] = v
arguments = newargs
return func(**arguments)
def fail(message, code=-1):
"""Fail with an error."""
_deprecated()
print('Error: %s' % message, file=sys.stderr)
sys.exit(code)
def find_actions(namespace, action_prefix):
"""Find all the actions in the namespace."""
_deprecated()
actions = {}
for key, value in iteritems(namespace):
if key.startswith(action_prefix):
actions[key[len(action_prefix):]] = analyse_action(value)
return actions
def print_usage(actions):
"""Print the usage information. (Help screen)"""
_deprecated()
actions = sorted(iteritems(actions))
print('usage: %s <action> [<options>]' % basename(sys.argv[0]))
print(' %s --help' % basename(sys.argv[0]))
print()
print('actions:')
for name, (func, doc, arguments) in actions:
print(' %s:' % name)
for line in doc.splitlines():
print(' %s' % line)
if arguments:
print()
for arg, shortcut, default, argtype in arguments:
if isinstance(default, bool):
print(' %s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg
))
else:
print(' %-30s%-10s%s' % (
(shortcut and '-%s, ' % shortcut or '') + '--' + arg,
argtype, default
))
print()
def analyse_action(func):
"""Analyse a function."""
_deprecated()
description = inspect.getdoc(func) or 'undocumented action'
arguments = []
args, varargs, kwargs, defaults = inspect.getargspec(func)
if varargs or kwargs:
raise TypeError('variable length arguments for action not allowed.')
if len(args) != len(defaults or ()):
raise TypeError('not all arguments have proper definitions')
for idx, (arg, definition) in enumerate(zip(args, defaults or ())):
if arg.startswith('_'):
raise TypeError('arguments may not start with an underscore')
if not isinstance(definition, tuple):
shortcut = None
default = definition
else:
shortcut, default = definition
argument_type = argument_types[type(default)]
if isinstance(default, bool) and default is True:
arg = 'no-' + arg
arguments.append((arg.replace('_', '-'), shortcut,
default, argument_type))
return func, description, arguments
def make_shell(init_func=None, banner=None, use_ipython=True):
"""Returns an action callback that spawns a new interactive
python shell.
:param init_func: an optional initialization function that is
called before the shell is started. The return
value of this function is the initial namespace.
:param banner: the banner that is displayed before the shell. If
not specified a generic banner is used instead.
:param use_ipython: if set to `True` ipython is used if available.
"""
_deprecated()
if banner is None:
banner = 'Interactive Werkzeug Shell'
if init_func is None:
init_func = dict
def action(ipython=use_ipython):
"""Start a new interactive python session."""
namespace = init_func()
if ipython:
try:
try:
from IPython.frontend.terminal.embed import InteractiveShellEmbed
sh = InteractiveShellEmbed(banner1=banner)
except ImportError:
from IPython.Shell import IPShellEmbed
sh = IPShellEmbed(banner=banner)
except ImportError:
pass
else:
sh(global_ns={}, local_ns=namespace)
return
from code import interact
interact(banner, local=namespace)
return action
def make_runserver(app_factory, hostname='localhost', port=5000,
use_reloader=False, use_debugger=False, use_evalex=True,
threaded=False, processes=1, static_files=None,
extra_files=None, ssl_context=None):
"""Returns an action callback that spawns a new development server.
.. versionadded:: 0.5
`static_files` and `extra_files` was added.
..versionadded:: 0.6.1
`ssl_context` was added.
:param app_factory: a function that returns a new WSGI application.
:param hostname: the default hostname the server should listen on.
:param port: the default port of the server.
:param use_reloader: the default setting for the reloader.
:param use_evalex: the default setting for the evalex flag of the debugger.
:param threaded: the default threading setting.
:param processes: the default number of processes to start.
:param static_files: optional dict of static files.
:param extra_files: optional list of extra files to track for reloading.
:param ssl_context: optional SSL context for running server in HTTPS mode.
"""
_deprecated()
def action(hostname=('h', hostname), port=('p', port),
reloader=use_reloader, debugger=use_debugger,
evalex=use_evalex, threaded=threaded, processes=processes):
"""Start a new development server."""
from werkzeug.serving import run_simple
app = app_factory()
run_simple(hostname, port, app,
use_reloader=reloader, use_debugger=debugger,
use_evalex=evalex, extra_files=extra_files,
reloader_interval=1, threaded=threaded, processes=processes,
static_files=static_files, ssl_context=ssl_context)
return action
| joshfriend/werkzeug | werkzeug/script.py | Python | bsd-3-clause | 11,671 |
class Lib:
def hello(self):
print 'Hello from lib1'
def kw_from_lib1(self):
pass
| eric-stanley/robotframework | atest/testdata/test_libraries/dir_for_libs/lib1/Lib.py | Python | apache-2.0 | 107 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.5 on 2016-04-03 15:09
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Boss',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('image', models.ImageField(blank=True, null=True, upload_to='/boss/')),
('name', models.CharField(blank=True, max_length=100)),
('lvl', models.IntegerField(blank=True, null=True)),
('health', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='DifficultType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100)),
],
),
migrations.CreateModel(
name='Raid',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100)),
('lvl', models.IntegerField(blank=True, null=True)),
('difficult_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.DifficultType')),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.Game')),
],
),
migrations.CreateModel(
name='RaidGroup',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.Game')),
],
),
migrations.CreateModel(
name='RaidGroupAvaliable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('status', models.BooleanField(default=False)),
('execution_date', models.DateField(blank=True, null=True)),
('boss', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.Boss')),
('difficult_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.DifficultType')),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.Game')),
('raid', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.Raid')),
('raid_group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.RaidGroup')),
],
),
migrations.AddField(
model_name='raidgroup',
name='raids',
field=models.ManyToManyField(through='raid.RaidGroupAvaliable', to='raid.Raid'),
),
migrations.AddField(
model_name='boss',
name='difficult_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='raid.DifficultType'),
),
]
| valdergallo/raidmanager | raid/migrations/0001_initial.py | Python | mit | 3,751 |
def check_num(e):
try:
return int(e)
except ValueError:
return 0
def sumUpNumbers(inputString):
arr = []
for e in re.findall(r"[^\W\d_]+|\d+",inputString):
arr.append(check_num(e))
return sum(arr)
| emirot/codefights | intro/sumUpNumbers.py | Python | apache-2.0 | 222 |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.mgmt.core.exceptions import ARMErrorFormat
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class ExpressRouteLinksOperations:
"""ExpressRouteLinksOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2019_11_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
express_route_port_name: str,
link_name: str,
**kwargs: Any
) -> "_models.ExpressRouteLink":
"""Retrieves the specified ExpressRouteLink resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:param link_name: The name of the ExpressRouteLink resource.
:type link_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: ExpressRouteLink, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2019_11_01.models.ExpressRouteLink
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteLink"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
'linkName': self._serialize.url("link_name", link_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('ExpressRouteLink', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}/links/{linkName}'} # type: ignore
def list(
self,
resource_group_name: str,
express_route_port_name: str,
**kwargs: Any
) -> AsyncIterable["_models.ExpressRouteLinkListResult"]:
"""Retrieve the ExpressRouteLink sub-resources of the specified ExpressRoutePort resource.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param express_route_port_name: The name of the ExpressRoutePort resource.
:type express_route_port_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either ExpressRouteLinkListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2019_11_01.models.ExpressRouteLinkListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.ExpressRouteLinkListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-11-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'expressRoutePortName': self._serialize.url("express_route_port_name", express_route_port_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('ExpressRouteLinkListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/ExpressRoutePorts/{expressRoutePortName}/links'} # type: ignore
| Azure/azure-sdk-for-python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2019_11_01/aio/operations/_express_route_links_operations.py | Python | mit | 8,718 |
# Copyright (c) 2010-2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Object Server for Swift """
import six.moves.cPickle as pickle
import json
import os
import multiprocessing
import time
import traceback
import socket
import math
from swift import gettext_ as _
from hashlib import md5
from eventlet import sleep, wsgi, Timeout
from eventlet.greenthread import spawn
from swift.common.utils import public, get_logger, \
config_true_value, timing_stats, replication, \
normalize_delete_at_timestamp, get_log_line, Timestamp, \
get_expirer_container, parse_mime_headers, \
iter_multipart_mime_documents, extract_swift_bytes
from swift.common.bufferedhttp import http_connect
from swift.common.constraints import check_object_creation, \
valid_timestamp, check_utf8
from swift.common.exceptions import ConnectionTimeout, DiskFileQuarantined, \
DiskFileNotExist, DiskFileCollision, DiskFileNoSpace, DiskFileDeleted, \
DiskFileDeviceUnavailable, DiskFileExpired, ChunkReadTimeout, \
ChunkReadError, DiskFileXattrNotSupported
from swift.obj import ssync_receiver
from swift.common.http import is_success
from swift.common.base_storage_server import BaseStorageServer
from swift.common.header_key_dict import HeaderKeyDict
from swift.common.request_helpers import get_name_and_placement, \
is_user_meta, is_sys_or_user_meta, is_object_transient_sysmeta, \
resolve_etag_is_at_header
from swift.common.swob import HTTPAccepted, HTTPBadRequest, HTTPCreated, \
HTTPInternalServerError, HTTPNoContent, HTTPNotFound, \
HTTPPreconditionFailed, HTTPRequestTimeout, HTTPUnprocessableEntity, \
HTTPClientDisconnect, HTTPMethodNotAllowed, Request, Response, \
HTTPInsufficientStorage, HTTPForbidden, HTTPException, HTTPConflict, \
HTTPServerError
from swift.obj.diskfile import DATAFILE_SYSTEM_META, DiskFileRouter
def iter_mime_headers_and_bodies(wsgi_input, mime_boundary, read_chunk_size):
mime_documents_iter = iter_multipart_mime_documents(
wsgi_input, mime_boundary, read_chunk_size)
for file_like in mime_documents_iter:
hdrs = parse_mime_headers(file_like)
yield (hdrs, file_like)
def drain(file_like, read_size, timeout):
"""
Read and discard any bytes from file_like.
:param file_like: file-like object to read from
:param read_size: how big a chunk to read at a time
:param timeout: how long to wait for a read (use None for no timeout)
:raises ChunkReadTimeout: if no chunk was read in time
"""
while True:
with ChunkReadTimeout(timeout):
chunk = file_like.read(read_size)
if not chunk:
break
class EventletPlungerString(str):
"""
Eventlet won't send headers until it's accumulated at least
eventlet.wsgi.MINIMUM_CHUNK_SIZE bytes or the app iter is exhausted. If we
want to send the response body behind Eventlet's back, perhaps with some
zero-copy wizardry, then we have to unclog the plumbing in eventlet.wsgi
to force the headers out, so we use an EventletPlungerString to empty out
all of Eventlet's buffers.
"""
def __len__(self):
return wsgi.MINIMUM_CHUNK_SIZE + 1
class ObjectController(BaseStorageServer):
"""Implements the WSGI application for the Swift Object Server."""
server_type = 'object-server'
def __init__(self, conf, logger=None):
"""
Creates a new WSGI application for the Swift Object Server. An
example configuration is given at
<source-dir>/etc/object-server.conf-sample or
/etc/swift/object-server.conf-sample.
"""
super(ObjectController, self).__init__(conf)
self.logger = logger or get_logger(conf, log_route='object-server')
self.node_timeout = float(conf.get('node_timeout', 3))
self.container_update_timeout = float(
conf.get('container_update_timeout', 1))
self.conn_timeout = float(conf.get('conn_timeout', 0.5))
self.client_timeout = int(conf.get('client_timeout', 60))
self.disk_chunk_size = int(conf.get('disk_chunk_size', 65536))
self.network_chunk_size = int(conf.get('network_chunk_size', 65536))
self.log_requests = config_true_value(conf.get('log_requests', 'true'))
self.max_upload_time = int(conf.get('max_upload_time', 86400))
self.slow = int(conf.get('slow', 0))
self.keep_cache_private = \
config_true_value(conf.get('keep_cache_private', 'false'))
default_allowed_headers = '''
content-disposition,
content-encoding,
x-delete-at,
x-object-manifest,
x-static-large-object,
'''
extra_allowed_headers = [
header.strip().lower() for header in conf.get(
'allowed_headers', default_allowed_headers).split(',')
if header.strip()
]
self.allowed_headers = set()
for header in extra_allowed_headers:
if header not in DATAFILE_SYSTEM_META:
self.allowed_headers.add(header)
self.auto_create_account_prefix = \
conf.get('auto_create_account_prefix') or '.'
self.expiring_objects_account = self.auto_create_account_prefix + \
(conf.get('expiring_objects_account_name') or 'expiring_objects')
self.expiring_objects_container_divisor = \
int(conf.get('expiring_objects_container_divisor') or 86400)
# Initialization was successful, so now apply the network chunk size
# parameter as the default read / write buffer size for the network
# sockets.
#
# NOTE WELL: This is a class setting, so until we get set this on a
# per-connection basis, this affects reading and writing on ALL
# sockets, those between the proxy servers and external clients, and
# those between the proxy servers and the other internal servers.
#
# ** Because the primary motivation for this is to optimize how data
# is written back to the proxy server, we could use the value from the
# disk_chunk_size parameter. However, it affects all created sockets
# using this class so we have chosen to tie it to the
# network_chunk_size parameter value instead.
socket._fileobject.default_bufsize = self.network_chunk_size
# Provide further setup specific to an object server implementation.
self.setup(conf)
def setup(self, conf):
"""
Implementation specific setup. This method is called at the very end
by the constructor to allow a specific implementation to modify
existing attributes or add its own attributes.
:param conf: WSGI configuration parameter
"""
# Common on-disk hierarchy shared across account, container and object
# servers.
self._diskfile_router = DiskFileRouter(conf, self.logger)
# This is populated by global_conf_callback way below as the semaphore
# is shared by all workers.
if 'replication_semaphore' in conf:
# The value was put in a list so it could get past paste
self.replication_semaphore = conf['replication_semaphore'][0]
else:
self.replication_semaphore = None
self.replication_failure_threshold = int(
conf.get('replication_failure_threshold') or 100)
self.replication_failure_ratio = float(
conf.get('replication_failure_ratio') or 1.0)
def get_diskfile(self, device, partition, account, container, obj,
policy, **kwargs):
"""
Utility method for instantiating a DiskFile object supporting a given
REST API.
An implementation of the object server that wants to use a different
DiskFile class would simply over-ride this method to provide that
behavior.
"""
return self._diskfile_router[policy].get_diskfile(
device, partition, account, container, obj, policy, **kwargs)
def async_update(self, op, account, container, obj, host, partition,
contdevice, headers_out, objdevice, policy,
logger_thread_locals=None):
"""
Sends or saves an async update.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param host: host that the container is on
:param partition: partition that the container is on
:param contdevice: device name that the container is on
:param headers_out: dictionary of headers to send in the container
request
:param objdevice: device name that the object is in
:param policy: the associated BaseStoragePolicy instance
:param logger_thread_locals: The thread local values to be set on the
self.logger to retain transaction
logging information.
"""
if logger_thread_locals:
self.logger.thread_locals = logger_thread_locals
headers_out['user-agent'] = 'object-server %s' % os.getpid()
full_path = '/%s/%s/%s' % (account, container, obj)
if all([host, partition, contdevice]):
try:
with ConnectionTimeout(self.conn_timeout):
ip, port = host.rsplit(':', 1)
conn = http_connect(ip, port, contdevice, partition, op,
full_path, headers_out)
with Timeout(self.node_timeout):
response = conn.getresponse()
response.read()
if is_success(response.status):
return
else:
self.logger.error(_(
'ERROR Container update failed '
'(saving for async update later): %(status)d '
'response from %(ip)s:%(port)s/%(dev)s'),
{'status': response.status, 'ip': ip, 'port': port,
'dev': contdevice})
except (Exception, Timeout):
self.logger.exception(_(
'ERROR container update failed with '
'%(ip)s:%(port)s/%(dev)s (saving for async update later)'),
{'ip': ip, 'port': port, 'dev': contdevice})
data = {'op': op, 'account': account, 'container': container,
'obj': obj, 'headers': headers_out}
timestamp = headers_out.get('x-meta-timestamp',
headers_out.get('x-timestamp'))
self._diskfile_router[policy].pickle_async_update(
objdevice, account, container, obj, data, timestamp, policy)
def container_update(self, op, account, container, obj, request,
headers_out, objdevice, policy):
"""
Update the container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request object driving the update
:param headers_out: dictionary of headers to send in the container
request(s)
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance
"""
headers_in = request.headers
conthosts = [h.strip() for h in
headers_in.get('X-Container-Host', '').split(',')]
contdevices = [d.strip() for d in
headers_in.get('X-Container-Device', '').split(',')]
contpartition = headers_in.get('X-Container-Partition', '')
if len(conthosts) != len(contdevices):
# This shouldn't happen unless there's a bug in the proxy,
# but if there is, we want to know about it.
self.logger.error(_(
'ERROR Container update failed: different '
'numbers of hosts and devices in request: '
'"%(hosts)s" vs "%(devices)s"') % {
'hosts': headers_in.get('X-Container-Host', ''),
'devices': headers_in.get('X-Container-Device', '')})
return
if contpartition:
updates = zip(conthosts, contdevices)
else:
updates = []
headers_out['x-trans-id'] = headers_in.get('x-trans-id', '-')
headers_out['referer'] = request.as_referer()
headers_out['X-Backend-Storage-Policy-Index'] = int(policy)
update_greenthreads = []
for conthost, contdevice in updates:
gt = spawn(self.async_update, op, account, container, obj,
conthost, contpartition, contdevice, headers_out,
objdevice, policy,
logger_thread_locals=self.logger.thread_locals)
update_greenthreads.append(gt)
# Wait a little bit to see if the container updates are successful.
# If we immediately return after firing off the greenthread above, then
# we're more likely to confuse the end-user who does a listing right
# after getting a successful response to the object create. The
# `container_update_timeout` bounds the length of time we wait so that
# one slow container server doesn't make the entire request lag.
try:
with Timeout(self.container_update_timeout):
for gt in update_greenthreads:
gt.wait()
except Timeout:
# updates didn't go through, log it and return
self.logger.debug(
'Container update timeout (%.4fs) waiting for %s',
self.container_update_timeout, updates)
def delete_at_update(self, op, delete_at, account, container, obj,
request, objdevice, policy):
"""
Update the expiring objects container when objects are updated.
:param op: operation performed (ex: 'PUT', or 'DELETE')
:param delete_at: scheduled delete in UNIX seconds, int
:param account: account name for the object
:param container: container name for the object
:param obj: object name
:param request: the original request driving the update
:param objdevice: device name that the object is in
:param policy: the BaseStoragePolicy instance (used for tmp dir)
"""
if config_true_value(
request.headers.get('x-backend-replication', 'f')):
return
delete_at = normalize_delete_at_timestamp(delete_at)
updates = [(None, None)]
partition = None
hosts = contdevices = [None]
headers_in = request.headers
headers_out = HeaderKeyDict({
# system accounts are always Policy-0
'X-Backend-Storage-Policy-Index': 0,
'x-timestamp': request.timestamp.internal,
'x-trans-id': headers_in.get('x-trans-id', '-'),
'referer': request.as_referer()})
if op != 'DELETE':
delete_at_container = headers_in.get('X-Delete-At-Container', None)
if not delete_at_container:
self.logger.warning(
'X-Delete-At-Container header must be specified for '
'expiring objects background %s to work properly. Making '
'best guess as to the container name for now.' % op)
# TODO(gholt): In a future release, change the above warning to
# a raised exception and remove the guess code below.
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
partition = headers_in.get('X-Delete-At-Partition', None)
hosts = headers_in.get('X-Delete-At-Host', '')
contdevices = headers_in.get('X-Delete-At-Device', '')
updates = [upd for upd in
zip((h.strip() for h in hosts.split(',')),
(c.strip() for c in contdevices.split(',')))
if all(upd) and partition]
if not updates:
updates = [(None, None)]
headers_out['x-size'] = '0'
headers_out['x-content-type'] = 'text/plain'
headers_out['x-etag'] = 'd41d8cd98f00b204e9800998ecf8427e'
else:
# DELETEs of old expiration data have no way of knowing what the
# old X-Delete-At-Container was at the time of the initial setting
# of the data, so a best guess is made here.
# Worst case is a DELETE is issued now for something that doesn't
# exist there and the original data is left where it is, where
# it will be ignored when the expirer eventually tries to issue the
# object DELETE later since the X-Delete-At value won't match up.
delete_at_container = get_expirer_container(
delete_at, self.expiring_objects_container_divisor,
account, container, obj)
delete_at_container = normalize_delete_at_timestamp(
delete_at_container)
for host, contdevice in updates:
self.async_update(
op, self.expiring_objects_account, delete_at_container,
'%s-%s/%s/%s' % (delete_at, account, container, obj),
host, partition, contdevice, headers_out, objdevice,
policy)
def _make_timeout_reader(self, file_like):
def timeout_reader():
with ChunkReadTimeout(self.client_timeout):
return file_like.read(self.network_chunk_size)
return timeout_reader
def _read_put_commit_message(self, mime_documents_iter):
rcvd_commit = False
try:
with ChunkReadTimeout(self.client_timeout):
commit_hdrs, commit_iter = next(mime_documents_iter)
if commit_hdrs.get('X-Document', None) == "put commit":
rcvd_commit = True
drain(commit_iter, self.network_chunk_size, self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find PUT commit MIME doc")
return rcvd_commit
def _read_metadata_footer(self, mime_documents_iter):
try:
with ChunkReadTimeout(self.client_timeout):
footer_hdrs, footer_iter = next(mime_documents_iter)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
raise HTTPBadRequest(body="couldn't find footer MIME doc")
timeout_reader = self._make_timeout_reader(footer_iter)
try:
footer_body = ''.join(iter(timeout_reader, ''))
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
footer_md5 = footer_hdrs.get('Content-MD5')
if not footer_md5:
raise HTTPBadRequest(body="no Content-MD5 in footer")
if footer_md5 != md5(footer_body).hexdigest():
raise HTTPUnprocessableEntity(body="footer MD5 mismatch")
try:
return HeaderKeyDict(json.loads(footer_body))
except ValueError:
raise HTTPBadRequest("invalid JSON for footer doc")
def _check_container_override(self, update_headers, metadata,
footers=None):
"""
Applies any overrides to the container update headers.
Overrides may be in the x-object-sysmeta-container-update- namespace or
the x-backend-container-update-override- namespace. The former is
preferred and is used by proxy middlewares. The latter is historical
but is still used with EC policy PUT requests; for backwards
compatibility the header names used with EC policy requests have not
been changed to the sysmeta namespace - that way the EC PUT path of a
newer proxy will remain compatible with an object server that pre-dates
the introduction of the x-object-sysmeta-container-update- namespace
and vice-versa.
:param update_headers: a dict of headers used in the container update
:param metadata: a dict that may container override items
:param footers: another dict that may container override items, at a
higher priority than metadata
"""
footers = footers or {}
# the order of this list is significant:
# x-object-sysmeta-container-update-override-* headers take precedence
# over x-backend-container-update-override-* headers
override_prefixes = ['x-backend-container-update-override-',
'x-object-sysmeta-container-update-override-']
for override_prefix in override_prefixes:
for key, val in metadata.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
# apply x-backend-container-update-override* from footers *before*
# x-object-sysmeta-container-update-override-* from headers
for key, val in footers.items():
if key.lower().startswith(override_prefix):
override = key.lower().replace(override_prefix, 'x-')
update_headers[override] = val
def _preserve_slo_manifest(self, update_metadata, orig_metadata):
if 'X-Static-Large-Object' in orig_metadata:
update_metadata['X-Static-Large-Object'] = \
orig_metadata['X-Static-Large-Object']
@public
@timing_stats()
def POST(self, request):
"""Handle HTTP POST requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past', request=request,
content_type='text/plain')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined):
return HTTPNotFound(request=request)
orig_timestamp = Timestamp(orig_metadata.get('X-Timestamp', 0))
orig_ctype_timestamp = disk_file.content_type_timestamp
req_ctype_time = '0'
req_ctype = request.headers.get('Content-Type')
if req_ctype:
req_ctype_time = request.headers.get('Content-Type-Timestamp',
req_timestamp.internal)
req_ctype_timestamp = Timestamp(req_ctype_time)
if orig_timestamp >= req_timestamp \
and orig_ctype_timestamp >= req_ctype_timestamp:
return HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
if req_timestamp > orig_timestamp:
metadata = {'X-Timestamp': req_timestamp.internal}
self._preserve_slo_manifest(metadata, orig_metadata)
metadata.update(val for val in request.headers.items()
if (is_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
else:
# preserve existing metadata, only content-type may be updated
metadata = dict(disk_file.get_metafile_metadata())
if req_ctype_timestamp > orig_ctype_timestamp:
# we have a new content-type, add to metadata and container update
content_type_headers = {
'Content-Type': request.headers['Content-Type'],
'Content-Type-Timestamp': req_ctype_timestamp.internal
}
metadata.update(content_type_headers)
else:
# send existing content-type with container update
content_type_headers = {
'Content-Type': disk_file.content_type,
'Content-Type-Timestamp': orig_ctype_timestamp.internal
}
if orig_ctype_timestamp != disk_file.data_timestamp:
# only add to metadata if it's not the datafile content-type
metadata.update(content_type_headers)
try:
disk_file.write_metadata(metadata)
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
if (content_type_headers['Content-Type-Timestamp']
!= disk_file.data_timestamp):
# Current content-type is not from the datafile, but the datafile
# content-type may have a swift_bytes param that was appended by
# SLO and we must continue to send that with the container update.
# Do this (rather than use a separate header) for backwards
# compatibility because there may be 'legacy' container updates in
# async pending that have content-types with swift_bytes params, so
# we have to be able to handle those in container server anyway.
_, swift_bytes = extract_swift_bytes(
disk_file.get_datafile_metadata()['Content-Type'])
if swift_bytes:
content_type_headers['Content-Type'] += (';swift_bytes=%s'
% swift_bytes)
update_headers = HeaderKeyDict({
'x-size': orig_metadata['Content-Length'],
'x-content-type': content_type_headers['Content-Type'],
'x-timestamp': disk_file.data_timestamp.internal,
'x-content-type-timestamp':
content_type_headers['Content-Type-Timestamp'],
'x-meta-timestamp': metadata['X-Timestamp'],
'x-etag': orig_metadata['ETag']})
# Special cases for backwards compatibility.
# For EC policy, send X-Object-Sysmeta-Ec-Etag which is same as the
# X-Backend-Container-Update-Override-Etag value sent with the original
# PUT. Similarly send X-Object-Sysmeta-Ec-Content-Length which is the
# same as the X-Backend-Container-Update-Override-Size value. We have
# to send Etag and size with a POST container update because the
# original PUT container update may have failed or be in async_pending.
if 'X-Object-Sysmeta-Ec-Etag' in orig_metadata:
update_headers['X-Etag'] = orig_metadata[
'X-Object-Sysmeta-Ec-Etag']
if 'X-Object-Sysmeta-Ec-Content-Length' in orig_metadata:
update_headers['X-Size'] = orig_metadata[
'X-Object-Sysmeta-Ec-Content-Length']
self._check_container_override(update_headers, orig_metadata)
# object POST updates are PUT to the container server
self.container_update(
'PUT', account, container, obj, request, update_headers,
device, policy)
return HTTPAccepted(request=request)
@public
@timing_stats()
def PUT(self, request):
"""Handle HTTP PUT requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
error_response = check_object_creation(request, obj)
if error_response:
return error_response
new_delete_at = int(request.headers.get('X-Delete-At') or 0)
if new_delete_at and new_delete_at < time.time():
return HTTPBadRequest(body='X-Delete-At in past', request=request,
content_type='text/plain')
try:
fsize = request.message_length()
except ValueError as e:
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# In case of multipart-MIME put, the proxy sends a chunked request,
# but may let us know the real content length so we can verify that
# we have enough disk space to hold the object.
if fsize is None:
fsize = request.headers.get('X-Backend-Obj-Content-Length')
if fsize is not None:
try:
fsize = int(fsize)
except ValueError as e:
return HTTPBadRequest(body=str(e), request=request,
content_type='text/plain')
# SSYNC will include Frag-Index header for subrequests to primary
# nodes; handoff nodes should 409 subrequests to over-write an
# existing data fragment until they offloaded the existing fragment
frag_index = request.headers.get('X-Backend-Ssync-Frag-Index')
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy, frag_index=frag_index)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
orig_timestamp = disk_file.data_timestamp
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except DiskFileDeleted as e:
orig_metadata = e.metadata
orig_timestamp = e.timestamp
except (DiskFileNotExist, DiskFileQuarantined):
orig_metadata = {}
orig_timestamp = Timestamp(0)
# Checks for If-None-Match
if request.if_none_match is not None and orig_metadata:
if '*' in request.if_none_match:
# File exists already so return 412
return HTTPPreconditionFailed(request=request)
if orig_metadata.get('ETag') in request.if_none_match:
# The current ETag matches, so return 412
return HTTPPreconditionFailed(request=request)
if orig_timestamp >= req_timestamp:
return HTTPConflict(
request=request,
headers={'X-Backend-Timestamp': orig_timestamp.internal})
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
upload_expiration = time.time() + self.max_upload_time
etag = md5()
elapsed_time = 0
try:
with disk_file.create(size=fsize) as writer:
upload_size = 0
# If the proxy wants to send us object metadata after the
# object body, it sets some headers. We have to tell the
# proxy, in the 100 Continue response, that we're able to
# parse a multipart MIME document and extract the object and
# metadata from it. If we don't, then the proxy won't
# actually send the footer metadata.
have_metadata_footer = False
use_multiphase_commit = False
mime_documents_iter = iter([])
obj_input = request.environ['wsgi.input']
hundred_continue_headers = []
if config_true_value(
request.headers.get(
'X-Backend-Obj-Multiphase-Commit')):
use_multiphase_commit = True
hundred_continue_headers.append(
('X-Obj-Multiphase-Commit', 'yes'))
if config_true_value(
request.headers.get('X-Backend-Obj-Metadata-Footer')):
have_metadata_footer = True
hundred_continue_headers.append(
('X-Obj-Metadata-Footer', 'yes'))
if have_metadata_footer or use_multiphase_commit:
obj_input.set_hundred_continue_response_headers(
hundred_continue_headers)
mime_boundary = request.headers.get(
'X-Backend-Obj-Multipart-Mime-Boundary')
if not mime_boundary:
return HTTPBadRequest("no MIME boundary")
try:
with ChunkReadTimeout(self.client_timeout):
mime_documents_iter = iter_mime_headers_and_bodies(
request.environ['wsgi.input'],
mime_boundary, self.network_chunk_size)
_junk_hdrs, obj_input = next(mime_documents_iter)
except ChunkReadError:
return HTTPClientDisconnect(request=request)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=request)
timeout_reader = self._make_timeout_reader(obj_input)
try:
for chunk in iter(timeout_reader, ''):
start_time = time.time()
if start_time > upload_expiration:
self.logger.increment('PUT.timeouts')
return HTTPRequestTimeout(request=request)
etag.update(chunk)
upload_size = writer.write(chunk)
elapsed_time += time.time() - start_time
except ChunkReadError:
return HTTPClientDisconnect(request=request)
except ChunkReadTimeout:
return HTTPRequestTimeout(request=request)
if upload_size:
self.logger.transfer_rate(
'PUT.' + device + '.timing', elapsed_time,
upload_size)
if fsize is not None and fsize != upload_size:
return HTTPClientDisconnect(request=request)
footer_meta = {}
if have_metadata_footer:
footer_meta = self._read_metadata_footer(
mime_documents_iter)
request_etag = (footer_meta.get('etag') or
request.headers.get('etag', '')).lower()
etag = etag.hexdigest()
if request_etag and request_etag != etag:
return HTTPUnprocessableEntity(request=request)
metadata = {
'X-Timestamp': request.timestamp.internal,
'Content-Type': request.headers['content-type'],
'ETag': etag,
'Content-Length': str(upload_size),
}
metadata.update(val for val in request.headers.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
metadata.update(val for val in footer_meta.items()
if (is_sys_or_user_meta('object', val[0]) or
is_object_transient_sysmeta(val[0])))
headers_to_copy = (
request.headers.get(
'X-Backend-Replication-Headers', '').split() +
list(self.allowed_headers))
for header_key in headers_to_copy:
if header_key in request.headers:
header_caps = header_key.title()
metadata[header_caps] = request.headers[header_key]
writer.put(metadata)
# if the PUT requires a two-phase commit (a data and a commit
# phase) send the proxy server another 100-continue response
# to indicate that we are finished writing object data
if use_multiphase_commit:
request.environ['wsgi.input'].\
send_hundred_continue_response()
if not self._read_put_commit_message(mime_documents_iter):
return HTTPServerError(request=request)
# got 2nd phase confirmation, write a timestamp.durable
# state file to indicate a successful PUT
writer.commit(request.timestamp)
# Drain any remaining MIME docs from the socket. There
# shouldn't be any, but we must read the whole request body.
try:
while True:
with ChunkReadTimeout(self.client_timeout):
_junk_hdrs, _junk_body = next(mime_documents_iter)
drain(_junk_body, self.network_chunk_size,
self.client_timeout)
except ChunkReadError:
raise HTTPClientDisconnect()
except ChunkReadTimeout:
raise HTTPRequestTimeout()
except StopIteration:
pass
except (DiskFileXattrNotSupported, DiskFileNoSpace):
return HTTPInsufficientStorage(drive=device, request=request)
if orig_delete_at != new_delete_at:
if new_delete_at:
self.delete_at_update(
'PUT', new_delete_at, account, container, obj, request,
device, policy)
if orig_delete_at:
self.delete_at_update(
'DELETE', orig_delete_at, account, container, obj,
request, device, policy)
update_headers = HeaderKeyDict({
'x-size': metadata['Content-Length'],
'x-content-type': metadata['Content-Type'],
'x-timestamp': metadata['X-Timestamp'],
'x-etag': metadata['ETag']})
# apply any container update header overrides sent with request
self._check_container_override(update_headers, request.headers,
footer_meta)
self.container_update(
'PUT', account, container, obj, request,
update_headers,
device, policy)
return HTTPCreated(request=request, etag=etag)
@public
@timing_stats()
def GET(self, request):
"""Handle HTTP GET requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
with disk_file.open():
metadata = disk_file.get_metadata()
obj_size = int(metadata['Content-Length'])
file_x_ts = Timestamp(metadata['X-Timestamp'])
keep_cache = (self.keep_cache_private or
('X-Auth-Token' not in request.headers and
'X-Storage-Token' not in request.headers))
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(
app_iter=disk_file.reader(keep_cache=keep_cache),
request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
response.last_modified = math.ceil(float(file_x_ts))
response.content_length = obj_size
try:
response.content_encoding = metadata[
'Content-Encoding']
except KeyError:
pass
response.headers['X-Timestamp'] = file_x_ts.normal
response.headers['X-Backend-Timestamp'] = file_x_ts.internal
resp = request.get_response(response)
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
resp = HTTPNotFound(request=request, headers=headers,
conditional_response=True)
return resp
@public
@timing_stats(sample_rate=0.8)
def HEAD(self, request):
"""Handle HTTP HEAD requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except (DiskFileNotExist, DiskFileQuarantined) as e:
headers = {}
if hasattr(e, 'timestamp'):
headers['X-Backend-Timestamp'] = e.timestamp.internal
return HTTPNotFound(request=request, headers=headers,
conditional_response=True)
conditional_etag = resolve_etag_is_at_header(request, metadata)
response = Response(request=request, conditional_response=True,
conditional_etag=conditional_etag)
response.headers['Content-Type'] = metadata.get(
'Content-Type', 'application/octet-stream')
for key, value in metadata.items():
if (is_sys_or_user_meta('object', key) or
is_object_transient_sysmeta(key) or
key.lower() in self.allowed_headers):
response.headers[key] = value
response.etag = metadata['ETag']
ts = Timestamp(metadata['X-Timestamp'])
response.last_modified = math.ceil(float(ts))
# Needed for container sync feature
response.headers['X-Timestamp'] = ts.normal
response.headers['X-Backend-Timestamp'] = ts.internal
response.content_length = int(metadata['Content-Length'])
try:
response.content_encoding = metadata['Content-Encoding']
except KeyError:
pass
return response
@public
@timing_stats()
def DELETE(self, request):
"""Handle HTTP DELETE requests for the Swift Object Server."""
device, partition, account, container, obj, policy = \
get_name_and_placement(request, 5, 5, True)
req_timestamp = valid_timestamp(request)
try:
disk_file = self.get_diskfile(
device, partition, account, container, obj,
policy=policy)
except DiskFileDeviceUnavailable:
return HTTPInsufficientStorage(drive=device, request=request)
try:
orig_metadata = disk_file.read_metadata()
except DiskFileXattrNotSupported:
return HTTPInsufficientStorage(drive=device, request=request)
except DiskFileExpired as e:
orig_timestamp = e.timestamp
orig_metadata = e.metadata
response_class = HTTPNotFound
except DiskFileDeleted as e:
orig_timestamp = e.timestamp
orig_metadata = {}
response_class = HTTPNotFound
except (DiskFileNotExist, DiskFileQuarantined):
orig_timestamp = 0
orig_metadata = {}
response_class = HTTPNotFound
else:
orig_timestamp = disk_file.data_timestamp
if orig_timestamp < req_timestamp:
response_class = HTTPNoContent
else:
response_class = HTTPConflict
response_timestamp = max(orig_timestamp, req_timestamp)
orig_delete_at = int(orig_metadata.get('X-Delete-At') or 0)
try:
req_if_delete_at_val = request.headers['x-if-delete-at']
req_if_delete_at = int(req_if_delete_at_val)
except KeyError:
pass
except ValueError:
return HTTPBadRequest(
request=request,
body='Bad X-If-Delete-At header value')
else:
# request includes x-if-delete-at; we must not place a tombstone
# if we can not verify the x-if-delete-at time
if not orig_timestamp:
# no object found at all
return HTTPNotFound()
if orig_delete_at != req_if_delete_at:
return HTTPPreconditionFailed(
request=request,
body='X-If-Delete-At and X-Delete-At do not match')
else:
# differentiate success from no object at all
response_class = HTTPNoContent
if orig_delete_at:
self.delete_at_update('DELETE', orig_delete_at, account,
container, obj, request, device,
policy)
if orig_timestamp < req_timestamp:
try:
disk_file.delete(req_timestamp)
except DiskFileNoSpace:
return HTTPInsufficientStorage(drive=device, request=request)
self.container_update(
'DELETE', account, container, obj, request,
HeaderKeyDict({'x-timestamp': req_timestamp.internal}),
device, policy)
return response_class(
request=request,
headers={'X-Backend-Timestamp': response_timestamp.internal})
@public
@replication
@timing_stats(sample_rate=0.1)
def REPLICATE(self, request):
"""
Handle REPLICATE requests for the Swift Object Server. This is used
by the object replicator to get hashes for directories.
Note that the name REPLICATE is preserved for historical reasons as
this verb really just returns the hashes information for the specified
parameters and is used, for example, by both replication and EC.
"""
device, partition, suffix_parts, policy = \
get_name_and_placement(request, 2, 3, True)
suffixes = suffix_parts.split('-') if suffix_parts else []
try:
hashes = self._diskfile_router[policy].get_hashes(
device, partition, suffixes, policy)
except DiskFileDeviceUnavailable:
resp = HTTPInsufficientStorage(drive=device, request=request)
else:
resp = Response(body=pickle.dumps(hashes))
return resp
@public
@replication
@timing_stats(sample_rate=0.1)
def SSYNC(self, request):
return Response(app_iter=ssync_receiver.Receiver(self, request)())
def __call__(self, env, start_response):
"""WSGI Application entry point for the Swift Object Server."""
start_time = time.time()
req = Request(env)
self.logger.txn_id = req.headers.get('x-trans-id', None)
if not check_utf8(req.path_info):
res = HTTPPreconditionFailed(body='Invalid UTF8 or contains NULL')
else:
try:
# disallow methods which have not been marked 'public'
if req.method not in self.allowed_methods:
res = HTTPMethodNotAllowed()
else:
res = getattr(self, req.method)(req)
except DiskFileCollision:
res = HTTPForbidden(request=req)
except HTTPException as error_response:
res = error_response
except (Exception, Timeout):
self.logger.exception(_(
'ERROR __call__ error with %(method)s'
' %(path)s '), {'method': req.method, 'path': req.path})
res = HTTPInternalServerError(body=traceback.format_exc())
trans_time = time.time() - start_time
if self.log_requests:
log_line = get_log_line(req, res, trans_time, '')
if req.method in ('REPLICATE', 'SSYNC') or \
'X-Backend-Replication' in req.headers:
self.logger.debug(log_line)
else:
self.logger.info(log_line)
if req.method in ('PUT', 'DELETE'):
slow = self.slow - trans_time
if slow > 0:
sleep(slow)
# To be able to zero-copy send the object, we need a few things.
# First, we have to be responding successfully to a GET, or else we're
# not sending the object. Second, we have to be able to extract the
# socket file descriptor from the WSGI input object. Third, the
# diskfile has to support zero-copy send.
#
# There's a good chance that this could work for 206 responses too,
# but the common case is sending the whole object, so we'll start
# there.
if req.method == 'GET' and res.status_int == 200 and \
isinstance(env['wsgi.input'], wsgi.Input):
app_iter = getattr(res, 'app_iter', None)
checker = getattr(app_iter, 'can_zero_copy_send', None)
if checker and checker():
# For any kind of zero-copy thing like sendfile or splice, we
# need the file descriptor. Eventlet doesn't provide a clean
# way of getting that, so we resort to this.
wsock = env['wsgi.input'].get_socket()
wsockfd = wsock.fileno()
# Don't call zero_copy_send() until after we force the HTTP
# headers out of Eventlet and into the socket.
def zero_copy_iter():
# If possible, set TCP_CORK so that headers don't
# immediately go on the wire, but instead, wait for some
# response body to make the TCP frames as large as
# possible (and hence as few packets as possible).
#
# On non-Linux systems, we might consider TCP_NODELAY, but
# since the only known zero-copy-capable diskfile uses
# Linux-specific syscalls, we'll defer that work until
# someone needs it.
if hasattr(socket, 'TCP_CORK'):
wsock.setsockopt(socket.IPPROTO_TCP,
socket.TCP_CORK, 1)
yield EventletPlungerString()
try:
app_iter.zero_copy_send(wsockfd)
except Exception:
self.logger.exception("zero_copy_send() blew up")
raise
yield ''
# Get headers ready to go out
res(env, start_response)
return zero_copy_iter()
else:
return res(env, start_response)
else:
return res(env, start_response)
def global_conf_callback(preloaded_app_conf, global_conf):
"""
Callback for swift.common.wsgi.run_wsgi during the global_conf
creation so that we can add our replication_semaphore, used to
limit the number of concurrent SSYNC_REQUESTS across all
workers.
:param preloaded_app_conf: The preloaded conf for the WSGI app.
This conf instance will go away, so
just read from it, don't write.
:param global_conf: The global conf that will eventually be
passed to the app_factory function later.
This conf is created before the worker
subprocesses are forked, so can be useful to
set up semaphores, shared memory, etc.
"""
replication_concurrency = int(
preloaded_app_conf.get('replication_concurrency') or 4)
if replication_concurrency:
# Have to put the value in a list so it can get past paste
global_conf['replication_semaphore'] = [
multiprocessing.BoundedSemaphore(replication_concurrency)]
def app_factory(global_conf, **local_conf):
"""paste.deploy app factory for creating WSGI object server apps"""
conf = global_conf.copy()
conf.update(local_conf)
return ObjectController(conf)
| larsbutler/swift | swift/obj/server.py | Python | apache-2.0 | 55,588 |
from harr import *
import Image
import numpy as np
img = load('sky1024px.jpg')
rcoeffs = haar_2d(to_float(img[0]))
gcoeffs = haar_2d(to_float(img[1]))
bcoeffs = haar_2d(to_float(img[2]))
rstrong_coeffs = keep_ratio(rcoeffs, .0025)
gstrong_coeffs = keep_ratio(gcoeffs, .0025)
bstrong_coeffs = keep_ratio(bcoeffs, .0025)
rlossy = ihaar_2d(rstrong_coeffs)
glossy = ihaar_2d(gstrong_coeffs)
blossy = ihaar_2d(bstrong_coeffs)
save('cat-output.png', from_float(rlossy), from_float(glossy), from_float(blossy))
| nbingham1/reconstruction | old/harr_test.py | Python | gpl-3.0 | 507 |
# -*- coding: utf-8 -*-
# #START_LICENSE###########################################################
#
#
# This file is part of the Environment for Tree Exploration program
# (ETE). http://ete.cgenomics.org
#
# ETE is free software: you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ETE is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details.
#
# You should have received a copy of the GNU General Public License
# along with ETE. If not, see <http://www.gnu.org/licenses/>.
#
#
# ABOUT THE ETE PACKAGE
# =====================
#
# ETE is distributed under the GPL copyleft license (2008-2011).
#
# If you make use of ETE in published work, please cite:
#
# Jaime Huerta-Cepas, Joaquin Dopazo and Toni Gabaldon.
# ETE: a python Environment for Tree Exploration. Jaime BMC
# Bioinformatics 2010,:24doi:10.1186/1471-2105-11-24
#
# Note that extra references to the specific methods implemented in
# the toolkit are available in the documentation.
#
# More info at http://ete.cgenomics.org
#
#
# #END_LICENSE#############################################################
__VERSION__="ete2-2.2rev1056"
from PyQt4 import QtCore, QtGui
from main import _leaf
class RectPartition(QtGui.QGraphicsRectItem):
def __init__(self, *args):
QtGui.QGraphicsRectItem.__init__(self, *args)
self.drawbg = False
self.nodeRegion = QtCore.QRectF()
self.facesRegion = QtCore.QRectF()
self.fullRegion = QtCore.QRectF()
def paint(self, painter, option, index):
if self.drawbg:
painter.setClipRect( option.exposedRect )
return QtGui.QGraphicsRectItem.paint(self, painter, option, index)
def get_partition_center(n, n2i, n2f):
down_h = n2f[n]["branch-bottom"].h
up_h = n2f[n]["branch-top"].h
#right_h = max(n2f[n]["branch-right"].h, n.img_style["size"]) /2
right_h = n2i[n].nodeRegion.height()/2
up_h = max(right_h, up_h)
down_h = max(right_h, down_h)
fullR = n2i[n].fullRegion
if _leaf(n):
center = fullR.height()/2
else:
first_child_part = n2i[n.children[0]]
last_child_part = n2i[n.children[-1]]
c1 = first_child_part.start_y + first_child_part.center
c2 = last_child_part.start_y + last_child_part.center
center = c1 + ((c2-c1)/2)
if up_h > center:
center = up_h
elif down_h > fullR.height() - center:
center = fullR.height() - down_h
return center
def init_rect_leaf_item(node, n2i, n2f):
item = n2i[node]
item.center = get_partition_center(node, n2i, n2f)
def init_rect_node_item(node, n2i, n2f):
item = n2i[node]
all_childs_height = sum([n2i[c].fullRegion.height() for c in node.children])
all_childs_width = max([n2i[c].fullRegion.width() for c in node.children])
if all_childs_height > item.fullRegion.height():
item.fullRegion.setHeight(all_childs_height)
item.fullRegion.setWidth(all_childs_width + item.nodeRegion.width())
suby = 0
subx = item.nodeRegion.width()
if item.nodeRegion.height() > all_childs_height:
suby += ((item.fullRegion.height() - all_childs_height))/2
for c in node.children:
cpart = n2i[c]
# Sets x and y position of child within parent
# partition (relative positions)
cpart.setParentItem(item)
cpart.setPos(subx, suby)
cpart.start_y = suby
suby += cpart.fullRegion.height()
item.center = get_partition_center(node, n2i, n2f)
| csc8630Spring2014/Clusterizer | ete2/treeview/qt4_rect_render.py | Python | mit | 3,941 |
from energyPATHWAYS.database import CsvDatabase
from energyPATHWAYS.schema import load_data_objects
if __name__ == '__main__':
dbname = '171112_US'
testPostgres = False
db = CsvDatabase.get_database(pathname=dbname)
scenario = 'foo' # TBD: not yet implemented
load_data_objects(scenario)
print("Done.")
| energyPATHWAYS/energyPATHWAYS | model_building_tools/gen_classes/test_classes.py | Python | mit | 334 |
import os
from selenium import webdriver
from django.test import LiveServerTestCase
from dcbase.tests import BaseTestCase
from dcbase.tests.browser.pages.loginPage import LoginPage
class BrowserTestCase(LiveServerTestCase, BaseTestCase):
_pageClass = None
_browser = None
_windowWidth = 1024
_windowHeight = 768
_requiresLogin = False
_loggedInBrowserUser = None
def __init__(self, methodName):
BaseTestCase.__init__(self)
super().__init__(methodName)
self._urlFields = {}
@classmethod
def setUpClass(cls):
super().setUpClass()
webDriverClassName = os.environ.get('BROWSER', 'Chrome')
webDriverClass = getattr(webdriver, webDriverClassName)
cls._browser = webDriverClass()
cls._browser.set_window_size(cls._windowWidth, cls._windowHeight)
@classmethod
def tearDownClass(cls):
cls._browser.quit()
super().tearDownClass()
def setUp(self):
super().setUp()
if self._requiresLogin and not self._loggedInBrowserUser:
user = self.createUser()
self.logInAs(user)
self._loggedInBrowserUser = user
self.browseToPageUnderTest()
@property
def pageClass(self):
return self._pageClass
@property
def browser(self):
return self._browser
def getWindowWidth(self):
return self._windowWidth
def setWindowWidth(self):
self.browser.set_window_size(self.windowWidth, self.windowHeight)
windowWidth = property(getWindowWidth, setWindowWidth)
def getWindowHeight(self):
return self._windowHeight
def setWindowHeight(self):
self.browser.set_window_size(self.windowWidth, self.windowHeight)
windowHeight = property(getWindowHeight, setWindowHeight)
def browseToPage(self, cls, **urlFields):
self.page = cls.get(self._browser, self.live_server_url, **urlFields)
def browseToPageUnderTest(self):
self.browseToPage(self._pageClass, **self._urlFields)
def logInAs(self, user, *, password=None):
self.browseToPage(LoginPage)
self.page.enterCredentials(user.username, self.getPasswordForUser(user, password))
self.page = self.page.submit()
def logOut(self):
pass
| tctimmeh/dc-django-base | dcbase/tests/browser/browserTestCase.py | Python | mit | 2,291 |
import logging
import threading
import sys
from libs import Leap
from core.listeners import LeapListener
class LeapMotionListenerThread(threading.Thread):
def __init__(self):
threading.Thread.__init__(self)
# set this thread as a daemon to terminate when the
# main program terminates.
# self.daemon = True
def run(self):
try:
listener = LeapListener()
controller = Leap.Controller()
controller.set_policy_flags(Leap.Controller.POLICY_BACKGROUND_FRAMES)
controller.add_listener(listener)
sys.stdin.readline()
except Exception as e:
logging.error(e);
finally:
controller.remove_listener(listener)
| jerechua/LeapBoard | core/threads.py | Python | mit | 753 |
from six import PY3
import sys
import unittest
from six.moves import StringIO
from io import BytesIO
from robot.result import ExecutionResult
from robot.reporting.outputwriter import OutputWriter
from robot.utils import XmlWriter
from robot.utils.asserts import assert_equals
from robot.utils import ET, ETSource
from test_resultbuilder import GOLDEN_XML, GOLDEN_XML_TWICE
class StreamXmlWriter(XmlWriter):
def _create_output(self, output):
return output
def close(self):
pass
class TestableOutputWriter(OutputWriter):
def _get_writer(self, output, generator):
writer = StreamXmlWriter(output, encoding='UTF-8')
writer.start('robot')
return writer
class TestResultSerializer(unittest.TestCase):
def test_single_result_serialization(self):
output = StringIO()
writer = TestableOutputWriter(output)
ExecutionResult(GOLDEN_XML).visit(writer)
self._assert_xml_content(self._xml_lines(output.getvalue()),
self._xml_lines(GOLDEN_XML))
def _xml_lines(self, text):
with ETSource(text) as source:
tree = ET.parse(source)
output = BytesIO()
tree.write(output)
return output.getvalue().splitlines()
def _assert_xml_content(self, actual, expected):
assert_equals(len(actual), len(expected))
for index, (act, exp) in enumerate(list(zip(actual, expected))[2:]):
assert_equals(act, exp.strip(), 'Different values on line %d' % index)
def test_combining_results(self):
output = StringIO()
writer = TestableOutputWriter(output)
ExecutionResult(GOLDEN_XML, GOLDEN_XML).visit(writer)
self._assert_xml_content(self._xml_lines(output.getvalue()),
self._xml_lines(GOLDEN_XML_TWICE))
if __name__ == '__main__':
unittest.main()
| userzimmermann/robotframework-python3 | utest/result/test_resultserializer.py | Python | apache-2.0 | 1,897 |
"""This file implements the gym environment of minitaur.
"""
import math
import random
import os, inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
from gym import spaces
import numpy as np
from pybullet_envs.minitaur.envs import minitaur_gym_env
import pybullet_data
GOAL_DISTANCE_THRESHOLD = 0.8
GOAL_REWARD = 1000.0
REWARD_SCALING = 1e-3
INIT_BALL_ANGLE = math.pi / 3
INIT_BALL_DISTANCE = 5.0
ACTION_EPS = 0.01
class MinitaurBallGymEnv(minitaur_gym_env.MinitaurGymEnv):
"""The gym environment for the minitaur and a ball.
It simulates a minitaur (a quadruped robot) and a ball. The state space
includes the angle and distance of the ball relative to minitaur's base.
The action space is a steering command. The reward function is based
on how far the ball is relative to the minitaur's base.
"""
def __init__(self,
urdf_root=pybullet_data.getDataPath(),
self_collision_enabled=True,
pd_control_enabled=False,
leg_model_enabled=True,
on_rack=False,
render=False):
"""Initialize the minitaur and ball gym environment.
Args:
urdf_root: The path to the urdf data folder.
self_collision_enabled: Whether to enable self collision in the sim.
pd_control_enabled: Whether to use PD controller for each motor.
leg_model_enabled: Whether to use a leg motor to reparameterize the action
space.
on_rack: Whether to place the minitaur on rack. This is only used to debug
the walking gait. In this mode, the minitaur's base is hanged midair so
that its walking gait is clearer to visualize.
render: Whether to render the simulation.
"""
super(MinitaurBallGymEnv, self).__init__(urdf_root=urdf_root,
self_collision_enabled=self_collision_enabled,
pd_control_enabled=pd_control_enabled,
leg_model_enabled=leg_model_enabled,
on_rack=on_rack,
render=render)
self._cam_dist = 2.0
self._cam_yaw = -70
self._cam_pitch = -30
self.action_space = spaces.Box(np.array([-1]), np.array([1]))
self.observation_space = spaces.Box(np.array([-math.pi, 0]), np.array([math.pi, 100]))
def reset(self):
self._ball_id = 0
super(MinitaurBallGymEnv, self).reset()
self._init_ball_theta = random.uniform(-INIT_BALL_ANGLE, INIT_BALL_ANGLE)
self._init_ball_distance = INIT_BALL_DISTANCE
self._ball_pos = [
self._init_ball_distance * math.cos(self._init_ball_theta),
self._init_ball_distance * math.sin(self._init_ball_theta), 1
]
self._ball_id = self._pybullet_client.loadURDF(
"%s/sphere_with_restitution.urdf" % self._urdf_root, self._ball_pos)
return self._get_observation()
def _get_observation(self):
world_translation_minitaur, world_rotation_minitaur = (
self._pybullet_client.getBasePositionAndOrientation(self.minitaur.quadruped))
world_translation_ball, world_rotation_ball = (
self._pybullet_client.getBasePositionAndOrientation(self._ball_id))
minitaur_translation_world, minitaur_rotation_world = (self._pybullet_client.invertTransform(
world_translation_minitaur, world_rotation_minitaur))
minitaur_translation_ball, _ = (self._pybullet_client.multiplyTransforms(
minitaur_translation_world, minitaur_rotation_world, world_translation_ball,
world_rotation_ball))
distance = math.sqrt(minitaur_translation_ball[0]**2 + minitaur_translation_ball[1]**2)
angle = math.atan2(minitaur_translation_ball[0], minitaur_translation_ball[1])
self._observation = [angle - math.pi / 2, distance]
return self._observation
def _transform_action_to_motor_command(self, action):
if self._leg_model_enabled:
for i, action_component in enumerate(action):
if not (-self._action_bound - ACTION_EPS <= action_component <=
self._action_bound + ACTION_EPS):
raise ValueError("{}th action {} out of bounds.".format(i, action_component))
action = self._apply_steering_to_locomotion(action)
action = self.minitaur.ConvertFromLegModel(action)
return action
def _apply_steering_to_locomotion(self, action):
# A hardcoded feedforward walking controller based on sine functions.
amplitude_swing = 0.5
amplitude_extension = 0.5
speed = 200
steering_amplitude = 0.5 * action[0]
t = self.minitaur.GetTimeSinceReset()
a1 = math.sin(t * speed) * (amplitude_swing + steering_amplitude)
a2 = math.sin(t * speed + math.pi) * (amplitude_swing - steering_amplitude)
a3 = math.sin(t * speed) * amplitude_extension
a4 = math.sin(t * speed + math.pi) * amplitude_extension
action = [a1, a2, a2, a1, a3, a4, a4, a3]
return action
def _distance_to_ball(self):
world_translation_minitaur, _ = (self._pybullet_client.getBasePositionAndOrientation(
self.minitaur.quadruped))
world_translation_ball, _ = (self._pybullet_client.getBasePositionAndOrientation(
self._ball_id))
distance = math.sqrt((world_translation_ball[0] - world_translation_minitaur[0])**2 +
(world_translation_ball[1] - world_translation_minitaur[1])**2)
return distance
def _goal_state(self):
return self._observation[1] < GOAL_DISTANCE_THRESHOLD
def _reward(self):
reward = -self._observation[1]
if self._goal_state():
reward += GOAL_REWARD
return reward * REWARD_SCALING
def _termination(self):
if self._goal_state():
return True
return False
| MadManRises/Madgine | shared/bullet3-2.89/examples/pybullet/gym/pybullet_envs/minitaur/envs/minitaur_ball_gym_env.py | Python | mit | 5,864 |
# Module: UnitTests.tUtils.py
# Version: 0.1
# Author: Aaron Sharp
# Date: 06/25/2015
#
# The purpose of this module is to provide unit tests for Utils
import unittest
from UnitTests.Helper import Mock
import os
from copy import copy
import Utils.CD
import Utils.Resources
import Utils.MacbookProResources
import Utils.FultonResources
import Utils.FultonResourcesLight
import Utils.Workspace
class tCD(unittest.TestCase):
def setUp(self):
os.makedirs("tmp")
def tearDown(self):
os.rmdir("tmp")
def test_directory_exists(self):
base=os.getcwd()
with Utils.CD.CD("tmp"):
self.assertEqual(base+"/tmp", os.getcwd())
self.assertNotEqual(base+"/tmp", os.getcwd())
def test_directory_does_not_exist(self):
with self.assertRaises(OSError):
with Utils.CD.CD("tmp_1"):
pass
class tResources(unittest.TestCase):
def setUp(self):
self.obj=Utils.Resources.Resources()
def test_get_small_memory(self):
with self.assertRaises(Exception):
self.obj.getSmallMemory()
def test_get_medium_memory(self):
with self.assertRaises(Exception):
self.obj.getMediumMemory()
def test_get_large_memory(self):
with self.assertRaises(Exception):
self.obj.getLargeMemory()
def test_get_small_time(self):
with self.assertRaises(Exception):
self.obj.getSmallTime()
def test_get_medium_time(self):
with self.assertRaises(Exception):
self.obj.getMediumTime()
def test_get_large_time(self):
with self.assertRaises(Exception):
self.obj.getLargeTime()
def test_get_small_threads(self):
with self.assertRaises(Exception):
self.obj.getSmallThreads()
def test_get_medium_threads(self):
with self.assertRaises(Exception):
self.obj.getMediumThreads()
def test_get_large_threads(self):
with self.assertRaises(Exception):
self.obj.getLargeThreads()
class tMacbookProResources(unittest.TestCase):
def setUp(self):
self.obj=Utils.MacbookProResources.Resources()
def test_get_small_memory(self):
self.assertEqual(1, self.obj.getSmallMemory())
def test_get_medium_memory(self):
self.assertEqual(3, self.obj.getMediumMemory())
def test_get_large_memory(self):
self.assertEqual(7, self.obj.getLargeMemory())
def test_get_small_time(self):
self.assertEqual(24, self.obj.getSmallTime())
def test_get_medium_time(self):
self.assertEqual(4392, self.obj.getMediumTime())
def test_get_large_time(self):
self.assertEqual(8760, self.obj.getLargeTime())
def test_get_small_threads(self):
self.assertEqual(1, self.obj.getSmallThreads())
def test_get_medium_threads(self):
self.assertEqual(1, self.obj.getMediumThreads())
def test_get_large_threads(self):
self.assertEqual(2, self.obj.getLargeThreads())
class tFultonResources(unittest.TestCase):
def setUp(self):
self.obj=Utils.FultonResources.Resources()
def test_eq_None(self):
other=None
expected=[False, True]
actual=[
self.obj==other,
self.obj!=other
]
self.assertEqual(expected, actual)
def test_eq_diffClass(self):
other=Mock()
expected=[False, True]
actual=[
self.obj==other,
self.obj!=other
]
self.assertEqual(expected, actual)
def test_eq_sameClass(self):
other=Utils.FultonResources.Resources()
expected=[True, False]
actual=[
self.obj==other,
self.obj!=other
]
self.assertEqual(expected, actual)
def test_get_small_memory(self):
self.assertEqual(1, self.obj.getSmallMemory())
def test_get_medium_memory(self):
self.assertEqual(8, self.obj.getMediumMemory())
def test_get_large_memory(self):
self.assertEqual(24, self.obj.getLargeMemory())
def test_get_small_time(self):
self.assertEqual(1, self.obj.getSmallTime())
def test_get_medium_time(self):
self.assertEqual(12, self.obj.getMediumTime())
def test_get_large_time(self):
self.assertEqual(24, self.obj.getLargeTime())
def test_get_small_threads(self):
self.assertEqual(2, self.obj.getSmallThreads())
def test_get_medium_threads(self):
self.assertEqual(6, self.obj.getMediumThreads())
def test_get_large_threads(self):
self.assertEqual(12, self.obj.getLargeThreads())
class tFultonResourcesLight(unittest.TestCase):
def setUp(self):
self.obj=Utils.FultonResourcesLight.Resources()
def test_get_small_memory(self):
self.assertEqual(1, self.obj.getSmallMemory())
def test_get_medium_memory(self):
self.assertEqual(1, self.obj.getMediumMemory())
def test_get_large_memory(self):
self.assertEqual(1, self.obj.getLargeMemory())
def test_get_small_time(self):
self.assertEqual(1, self.obj.getSmallTime())
def test_get_medium_time(self):
self.assertEqual(1, self.obj.getMediumTime())
def test_get_large_time(self):
self.assertEqual(1, self.obj.getLargeTime())
def test_get_small_threads(self):
self.assertEqual(2, self.obj.getSmallThreads())
def test_get_medium_threads(self):
self.assertEqual(2, self.obj.getMediumThreads())
def test_get_large_threads(self):
self.assertEqual(2, self.obj.getLargeThreads())
class tWorkspace(unittest.TestCase):
def test_constructor(self):
work_dir="work_dir"
input_file="input_file"
obj=Utils.Workspace.Workspace(work_dir, input_file)
self.assertEqual([work_dir, input_file, {}, Utils.FultonResources.Resources().__class__, None], [obj.work_dir, obj.input_file, obj.binaries, obj.resources.__class__, obj.errorNotificationEmail])
def test_eq_None(self):
other=None
obj=Utils.Workspace.Workspace("work_dir", "input_file")
expected=[False, True]
actual=[
obj==other,
obj!=other
]
self.assertEqual(expected, actual)
def test_eq_notEq(self):
obj=Utils.Workspace.Workspace("work_dir", "input_file")
other=Utils.Workspace.Workspace("diff_dir", "diff_file")
expected=[False, True]
actual=[
obj==other,
obj!=other
]
self.assertEqual(expected, actual)
def test_eq_eq(self):
obj=Utils.Workspace.Workspace("work_dir", "input_file")
other=Utils.Workspace.Workspace("work_dir", "input_file")
expected=[True, False]
actual=[
obj==other,
obj!=other
]
self.assertEqual(expected, actual)
def test_error_notification_email(self):
test_email="address@domain.com"
obj=Utils.Workspace.Workspace("work_dir", "input_file")
before_change=obj.errorNotificationEmail
obj.errorNotificationEmail=test_email
after_change=obj.errorNotificationEmail
self.assertEqual([None, test_email], [before_change, after_change])
def test_add_binary(self):
obj=Utils.Workspace.Workspace("work_dir", "input_file")
before_change=copy(obj.binaries)
obj.addBinary("name1", "path1")
with_one=copy(obj.binaries)
obj.addBinary("name2", "path2")
with_two=copy(obj.binaries)
obj.addBinary("name1", "path2")
after_overwrite=copy(obj.binaries)
self.assertEqual([{}, {"name1": "path1"}, {"name1": "path1", "name2": "path2"}, {"name1": "path2", "name2": "path2"}], [before_change, with_one, with_two, after_overwrite])
| sharpa/OMWare | UnitTests/tUtils.py | Python | gpl-2.0 | 6,805 |
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from tests import IntegrationTestCase
from tests.holodeck import Request
from twilio.base.exceptions import TwilioException
from twilio.http.response import Response
class TokenTestCase(IntegrationTestCase):
def test_create_request(self):
self.holodeck.mock(Response(500, ''))
with self.assertRaises(TwilioException):
self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tokens.create()
self.holodeck.assert_has_request(Request(
'post',
'https://api.twilio.com/2010-04-01/Accounts/ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX/Tokens.json',
))
def test_create_response(self):
self.holodeck.mock(Response(
201,
'''
{
"account_sid": "ACaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa",
"date_created": "Fri, 24 Jul 2015 18:43:58 +0000",
"date_updated": "Fri, 24 Jul 2015 18:43:58 +0000",
"ice_servers": [
{
"url": "stun:global.stun:3478?transport=udp",
"urls": "stun:global.stun:3478?transport=udp"
},
{
"credential": "5SR2x8mZK1lTFJW3NVgLGw6UM9C0dja4jI/Hdw3xr+w=",
"url": "turn:global.turn:3478?transport=udp",
"urls": "turn:global.turn:3478?transport=udp",
"username": "cda92e5006c7810494639fc466ecc80182cef8183fdf400f84c4126f3b59d0bb"
}
],
"password": "5SR2x8mZK1lTFJW3NVgLGw6UM9C0dja4jI/Hdw3xr+w=",
"ttl": "86400",
"username": "cda92e5006c7810494639fc466ecc80182cef8183fdf400f84c4126f3b59d0bb"
}
'''
))
actual = self.client.api.v2010.accounts(sid="ACXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX") \
.tokens.create()
self.assertIsNotNone(actual)
| tysonholub/twilio-python | tests/integration/api/v2010/account/test_token.py | Python | mit | 2,152 |
import gc
import upywraptest
def TestRefCount(getObject):
simpleCollection = upywraptest.SimpleCollection()
simpleCollection.Add(getObject())
for i in range(20):
if simpleCollection.Reference(0) == 1:
print('collect did reclaim object', i)
break
gc.collect()
else:
print('collect did not reclaim object')
if __name__ == '__main__':
TestRefCount(lambda: upywraptest.Simple(1))
| stinos/micropython-wrap | tests/py/classref.py | Python | mit | 412 |
def print_title():
print ('\n' * 80 + '*' * 80 + """
_____ __ .__ __________ __
/ \ ____ _____/ |_|__| ____ ____\______ \ _____/ |_
/ \ / \_/ __ \ / \ __\ |/ _ \ / \| | _// _ \ __\\
/ Y \ ___/| | \ | | ( <_> ) | \ | ( <_> ) |
\____|__ /\___ >___| /__| |__|\____/|___| /______ /\____/|__|
\/ \/ \/ \/ \/
Verison 0.95a
********************************************************************************
Welcome to Mentionbot! This simple bot scans Reddit for keywords, then
writes the results to a MySQL database.
Get the latest version at: http://github.com/MyBagofTricks
Requirements:
- python 3.5 (older versions should still work)
- pyMySQL - https://github.com/PyMySQL/PyMySQL/
- PRAW - https://praw.readthedocs.org/en/v2.1.20/
- MySQL compatible database with
- database named 'mentionbot'
- table named 'posts'
- user created with all privileges to the table
********************************************************************************
""")
def print_add(link, title):
print ("[+] POST ADDED! | {} | {}...".format(link, title[0:35]))
def print_clear():
print ("[-] Clearing database...")
def print_not_clear():
print ("[-] Database was not cleared.")
def print_populating():
print ("[-] Populating existing threads.")
def print_loaded(done):
print ("[+] Database successfully loaded. {} post(s) already populated.\n"
.format(done))
def run_msg(c_time, red_sub):
print ("[-] {} Scanning {} for keyword(s)".format(c_time, red_sub))
def error_gen(e):
print ("[x] ERROR! {}".format(e))
def error_nosql(e):
print ("[x] ERROR! {}\n[x] You are currently using Mentionbot without "
"a database.\n[x] This mode is mostly for testing and not very "
"useful.".format(e))
def error_exit():
print ("\n[*] Exiting Mentionbot.\n[*] Goodbye!")
| MyBagofTricks/MentionBot | modules/messages.py | Python | gpl-2.0 | 2,011 |
r"""
This module contains specific inner product matrices for the different bases in
the Chebyshev family of the second kind.
A naming convention is used for the first capital letter for all matrices.
The first letter refers to type of matrix.
- Mass matrices start with `B`
- One derivative start with `C`
- Two derivatives (Laplace) start with `A`
- Four derivatives (Biharmonic) start with `S`
A matrix may consist of different types of test and trialfunctions as long as
they are all in the Chebyshev family, either first or second kind.
The next letters in the matrix name uses the short form for all these
different bases according to
- T = Orthogonal
- CD = CompactDirichlet
- CN = CompactNeumann
- BD = BCDirichlet
- BB = BCBiharmonic
- P1 = Phi1
- P2 = Phi2
So a mass matrix using CompactDirichlet trial and Phi1 test is named
BP1CDmat.
All matrices in this module may be looked up using the 'mat' dictionary,
which takes test and trialfunctions along with the number of derivatives
to be applied to each. As such the mass matrix BTTmat may be looked up
as
>>> from shenfun.chebyshevu.matrices import mat
>>> from shenfun.chebyshevu.bases import Orthogonal as T
>>> B = mat[((T, 0), (T, 0))]
and an instance of the matrix can be created as
>>> B0 = T(10)
>>> BM = B((B0, 0), (B0, 0))
>>> import numpy as np
>>> d = {0: np.pi/2}
>>> [np.all(BM[k] == v) for k, v in d.items()]
[True]
However, this way of creating matrices is not reccommended use. It is far
more elegant to use the TrialFunction/TestFunction interface, and to
generate the matrix as an inner product:
>>> from shenfun import TrialFunction, TestFunction, inner
>>> u = TrialFunction(B0)
>>> v = TestFunction(B0)
>>> BM = inner(u, v)
>>> [np.all(BM[k] == v) for k, v in d.items()]
[True]
To see that this is in fact the BUUmat:
>>> print(BM.__class__)
<class 'shenfun.chebyshevu.matrices.BUUmat'>
"""
#pylint: disable=bad-continuation, redefined-builtin
from __future__ import division
import functools
import numpy as np
import sympy as sp
import scipy.sparse as scp
from shenfun.matrixbase import SpectralMatrix, SparseMatrix
from shenfun.la import TwoDMA
from . import bases
from shenfun.chebyshev import bases as chebbases
x = sp.symbols('x', real=True)
xp = sp.symbols('x', real=True, positive=True)
# Short names for instances of bases
U = bases.Orthogonal
CD = bases.CompactDirichlet
CN = bases.CompactNeumann
P1 = bases.Phi1
P2 = bases.Phi2
BCD = bases.BCDirichlet
BCB = bases.BCBiharmonic
SD = chebbases.ShenDirichlet
SN = chebbases.ShenNeumann
class BUUmat(SpectralMatrix):
r"""Mass matrix :math:`B=(b_{kj}) \in \mathbb{R}^{M \times N}`, where
.. math::
b_{kj}=(U_j, U_k)_w,
:math:`U_k \in` :class:`.chebyshevu.bases.Orthogonal` and test and trial spaces have
dimensions of M and N, respectively.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], U)
assert isinstance(trial[0], U)
SpectralMatrix.__init__(self, {0: np.pi/2}, test, trial, scale=scale, measure=measure)
class BP1SDmat(SpectralMatrix):
r"""Mass matrix :math:`B=(b_{kj}) \in \mathbb{R}^{M \times N}`, where
.. math::
b_{kj}=(\psi_j, \phi_k)_w,
where the test function :math:`\phi_k \in` :class:`.chebyshevu.bases.Phi1`, the
trial :math:`\psi_j \in` :class:`.chebyshev.bases.ShenDirichlet`, and test and
trial spaces have dimensions of M and N, respectively.
"""
def __init__(self, test, trial, scale=1, measure=1):
from shenfun.jacobi.recursions import Lmat, half, cn
assert isinstance(test[0], P1)
assert isinstance(trial[0], SD)
N = test[0].N-2
K = trial[0].stencil_matrix()
K.shape = (N, N+2)
K = K.diags('csr')
B2 = Lmat(2, 0, 2, N, N+2, -half, -half, cn) # B^{(2)_{(2)}}
if not test[0].is_scaled:
k = np.arange(N+2)
B2 = SparseMatrix({0: (k[:-2]+2)}, (N, N)).diags('csr')*B2
M = B2 * K.T
d = {-2: M.diagonal(-2), 0: M.diagonal(0), 2: M.diagonal(2), 4: M.diagonal(4)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class AP1SDmat(SpectralMatrix):
r"""Stiffness matrix :math:`A=(a_{kj}) \in \mathbb{R}^{M \times N}`, where
.. math::
a_{kj}=(\psi''_j, \phi_k)_w,
where the test function :math:`\phi_k \in` :class:`.chebyshevu.bases.Phi2`, the trial
function :math:`\psi_j \in` :class:`.chebyshev.bases.ShenDirichlet`, and test and
trial spaces have dimensions of M and N, respectively.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], P1)
assert isinstance(trial[0], SD)
d = {0: -1, 2: 1}
if not test[0].is_scaled:
k = np.arange(test[0].N-2)
d = {0: -(k+2), 2: k[:-2]+2}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def get_solver(self):
return TwoDMA
class BP1SNmat(SpectralMatrix):
r"""Mass matrix :math:`B=(b_{kj}) \in \mathbb{R}^{M \times N}`, where
.. math::
b_{kj}=(\psi_j, \phi_k)_w,
where the test function :math:`\phi_k \in` :class:`.chebyshevu.bases.Phi1`, the
trial function :math:`\psi_j \in` :class:`.chebyshev.bases.ShenNeumann`, and test and
trial spaces have dimensions of M and N, respectively.
"""
def __init__(self, test, trial, scale=1, measure=1):
from shenfun.jacobi.recursions import Lmat, half, cn
assert isinstance(test[0], P1)
assert isinstance(trial[0], SN)
N = test[0].N-2
K = trial[0].stencil_matrix()
K.shape = (N, N+2)
K = K.diags('csr')
B2 = Lmat(2, 0, 2, N, N+2, -half, -half, cn) # B^{(2)_{(2)}}
if not test[0].is_scaled:
k = np.arange(test[0].N)
B2 = SparseMatrix({0: (k[:-2]+2)}, (N, N)).diags('csr')*B2
M = B2 * K.T
d = {-2: M.diagonal(-2), 0: M.diagonal(0), 2: M.diagonal(2), 4: M.diagonal(4)}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
class AP1SNmat(SpectralMatrix):
r"""Stiffness matrix :math:`A=(a_{kj}) \in \mathbb{R}^{M \times N}`, where
.. math::
a_{kj}=(\psi''_j, \phi_k)_w,
where the test function :math:`\phi_k \in` :class:`.chebyshevu.bases.Phi1`, the trial
function :math:`\psi_j \in` :class:`.chebyshev.bases.ShenNeumann`, and test and
trial spaces have dimensions of M and N, respectively.
"""
def __init__(self, test, trial, scale=1, measure=1):
assert isinstance(test[0], P1)
assert isinstance(trial[0], SN)
k = np.arange(test[0].N-2)
d = {0: -(k/(k+2))**2, 2: 1}
if not test[0].is_scaled:
d = {0: -k**2/(k+2), 2: k[:-2]+2}
SpectralMatrix.__init__(self, d, test, trial, scale=scale, measure=measure)
def get_solver(self):
return TwoDMA
class _Chebumatrix(SpectralMatrix):
def __init__(self, test, trial, scale=1, measure=1):
SpectralMatrix.__init__(self, {}, test, trial, scale=scale, measure=measure)
class _ChebuMatDict(dict):
"""Dictionary of inner product matrices
Matrices that are missing keys are generated from Vandermonde type
computations.
"""
def __missing__(self, key):
measure = 1 if len(key) == 2 else key[3]
c = functools.partial(_Chebumatrix, measure=measure)
self[key] = c
return c
def __getitem__(self, key):
matrix = dict.__getitem__(self, key)
#assert key[0][1] == 0, 'Test cannot be differentiated (weighted space)'
return matrix
# Define dictionary to hold all predefined matrices
# When looked up, missing matrices will be generated automatically
mat = _ChebuMatDict({
((U, 0), (U, 0)): BUUmat,
((P1, 0), (SD, 0)): BP1SDmat,
((P1, 0), (SN, 0)): BP1SNmat,
((P1, 0), (SD, 2)): AP1SDmat,
((P1, 0), (SN, 2)): AP1SNmat,
})
| spectralDNS/shenfun | shenfun/chebyshevu/matrices.py | Python | bsd-2-clause | 8,007 |
from addondev import initializer
import os
# Initialize mock kodi environment
initializer(os.path.join(os.path.dirname(os.path.dirname(__file__)), "script.module.codequick"))
| willforde/script.module.codequick | tests/__init__.py | Python | gpl-2.0 | 176 |
# Markov Logic Networks - Grounding
#
# (C) 2013 by Daniel Nyga (nyga@cs.tum.edu)
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
from collections import defaultdict
from dnutils import logs
from .fastconj import FastConjunctionGrounding
from ..util import unifyDicts, dict_union
from ..constants import HARD
from ..errors import SatisfiabilityException
from ...utils.undo import Ref, Number, List, ListDict, Boolean
from ...logic.common import Logic
from ...utils.multicore import with_tracing, checkmem
import types
from multiprocessing.pool import Pool
# this readonly global is for multiprocessing to exploit copy-on-write
# on linux systems
global_bpll_grounding = None
logger = logs.getlogger(__name__)
# multiprocessing function
def create_formula_groundings(formula, unsatfailure=True):
checkmem()
results = []
if global_bpll_grounding.mrf.mln.logic.islitconj(formula):
for res in global_bpll_grounding.itergroundings_fast(formula):
checkmem()
results.append(res)
else:
for gf in formula.itergroundings(global_bpll_grounding.mrf, simplify=False):
checkmem()
stat = []
for gndatom in gf.gndatoms():
world = list(global_bpll_grounding.mrf.evidence)
var = global_bpll_grounding.mrf.variable(gndatom)
for validx, value in var.itervalues():
var.setval(value, world)
truth = gf(world)
if truth != 0:
stat.append((var.idx, validx, truth))
elif unsatfailure and gf.weight == HARD and gf(global_bpll_grounding.mrf.evidence) != 1:
print()
gf.print_structure(global_bpll_grounding.mrf.evidence)
raise SatisfiabilityException('MLN is unsatisfiable due to hard constraint violation {} (see above)'.format(global_bpll_grounding.mrf.formulas[gf.idx]))
results.append((gf.idx, stat))
return results
class BPLLGroundingFactory(FastConjunctionGrounding):
"""
Grounding factory for efficient grounding of conjunctions for
pseudo-likelihood learning.
"""
def __init__(self, mrf, formulas=None, cache=None, **params):
FastConjunctionGrounding.__init__(self, mrf, simplify=False, unsatfailure=False, formulas=formulas, cache=cache, **params)
self._stat = {}
self._varidx2fidx = defaultdict(set)
def itergroundings_fast(self, formula):
"""
Recursively generate the groundings of a conjunction. Prunes the
generated grounding tree in case that a formula cannot be rendered
true by subsequent literals.
"""
# make a copy of the formula to avoid side effects
formula = formula.ground(self.mrf, {}, partial=True)
children = [formula] if not hasattr(formula, 'children') else formula.children
# make equality constraints access their variable domains
# this is a _really_ dirty hack but it does the job ;-)
vardoms = formula.vardoms()
def eqvardoms(self, v=None, c=None):
if v is None: v = defaultdict(set)
for a in self.args:
if self.mln.logic.isvar(a):
v[a] = vardoms[a]
return v
for child in children:
if isinstance(child, Logic.Equality):
setattr(child, 'vardoms', types.MethodType(eqvardoms, child))
lits = sorted(children, key=self._conjsort)
for gf in self._itergroundings_fast(formula, lits, 0, assignment={}, variables=[]):
yield gf
def _itergroundings_fast(self, formula, constituents, cidx, assignment, variables, falsevar=None, level=0):
if cidx == len(constituents):
# no remaining literals to ground. return the ground formula
# and statistics
stat = [(varidx, validx, count) for (varidx, validx, count) in variables]
yield formula.idx, stat
return
c = constituents[cidx]
# go through all remaining groundings of the current constituent
for varass in c.itervargroundings(self.mrf, partial=assignment):
gnd = c.ground(self.mrf, dict_union(varass, assignment))
# check if it violates a hard constraint
if formula.weight == HARD and gnd(self.mrf.evidence) < 1:
raise SatisfiabilityException('MLN is unsatisfiable by evidence due to hard constraint violation {} (see above)'.format(global_bpll_grounding.mrf.formulas[formula.idx]))
if isinstance(gnd, Logic.Equality):
# if an equality grounding is false in a conjunction, we can
# stop since the conjunction cannot be rendered true in any
# grounding that follows
if gnd.truth(None) == 0: continue
for gf in self._itergroundings_fast(formula, constituents, cidx + 1, dict_union(assignment, varass),
variables, falsevar, level + 1):
yield gf
else:
var = self.mrf.variable(gnd.gndatom)
world_ = list(self.mrf.evidence)
stat = []
skip = False
falsevar_ = falsevar
vars_ = list(variables)
for validx, value in var.itervalues():
var.setval(value, world_)
truth = gnd(world_)
if truth == 0 and value == var.evidence_value():
# if the evidence value renders the current
# consituent false and there was already a false
# literal in the grounding path, we can prune the
# tree since no grounding will be true
if falsevar is not None and falsevar != var.idx:
skip = True
break
else:
# if there was no literal false so far, we
# collect statistics only for the current literal
# and only if all future literals will be true
# by evidence
vars_ = []
falsevar_ = var.idx
if truth > 0 and falsevar is None:
stat.append((var.idx, validx, truth))
if falsevar is not None and falsevar == var.idx:
# in case of non-mutual exclusive values take only the
# values that render all literals true
# example: soft-functional constraint with
# !foo(?x) ^ foo(?y), x={X,Y,Z} where the evidence
# foo(Z) is true
# here the grounding !foo(X) ^ foo(Y) is false:
# !foo(X) is true for foo(Z) and foo(Y) and
# (!foo(Z) ^ !foox(X) ^ !foo(Y))
# foo(Y) is true for foo(Y)
# both are only true for foo(Y)
stat = set(variables).intersection(stat)
skip = not bool(stat) # skip if no values remain
if skip: continue
for gf in self._itergroundings_fast(formula, constituents, cidx + 1, dict_union(assignment, varass), vars_ + stat, falsevar=falsevar_, level=level + 1):
yield gf
def _itergroundings(self, simplify=False, unsatfailure=False):
global global_bpll_grounding
global_bpll_grounding = self
if self.multicore:
pool = Pool(maxtasksperchild=1)
try:
for gndresult in pool.imap(with_tracing(create_formula_groundings), self.formulas):
for fidx, stat in gndresult:
for (varidx, validx, val) in stat:
self._varidx2fidx[varidx].add(fidx)
self._addstat(fidx, varidx, validx, val)
checkmem()
yield None
except Exception as e:
logger.error('Error in child process. Terminating pool...')
pool.close()
raise e
finally:
pool.terminate()
pool.join()
else:
for gndresult in map(create_formula_groundings, self.formulas):
for fidx, stat in gndresult:
for (varidx, validx, val) in stat:
self._varidx2fidx[varidx].add(fidx)
self._addstat(fidx, varidx, validx, val)
yield None
def _addstat(self, fidx, varidx, validx, inc=1):
if fidx not in self._stat:
self._stat[fidx] = {}
d = self._stat[fidx]
if varidx not in d:
d[varidx] = [0] * self.mrf.variable(varidx).valuecount()
d[varidx][validx] += inc
# # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # # #
# here comes some very experimental code. which is currently not in use.
class FormulaGrounding(object):
"""
Represents a particular (partial) grounding of a formula with respect to
_one_ predicate and in terms of disjoint sets of variables occurring in
that formula. A grounding of the formula is represented as a list of
assignments of the independent variable sets.
It represents a node in the search tree for weighted SAT solving.
Additional fields:
- depth: the depth of this formula grounding (node) in the search tree
The root node (the formula with no grounded variable has
depth 0.
- children: list of formula groundings that have been generated from this
fg.
"""
def __init__(self, formula, mrf, parent=None, assignment=None):
"""
Instantiates the formula grounding for a given
- formula: the formula grounded in this node
- mrf: the MRF associated to this problem
- parent: the formula grounding this fg has been created from
- assignment: dictionary mapping variables to their values
"""
self.mrf = mrf
self.formula = formula
self.parent = Ref(parent)
self.trueGroundings = Number(0.)
self.processed = Boolean(False)
if parent is None:
self.depth = 0
else:
self.depth = parent.depth + 1
self.children = List()
self.assignment = assignment
self.domains = ListDict()
if parent is None:
for var in self.formula.getVariables(self.mrf.mln):
self.domains.extend(var, list(self.mrf.domains[
self.formula.getVarDomain(
var, self.mrf.mln)]))
else:
for (v, d) in parent.domains.items():
self.domains.extend(v, list(d))
self.domains.epochEndsHere()
def epochEndsHere(self):
for mem in (
self.parent, self.trueGroundings, self.children, self.domains,
self.processed):
mem.epochEndsHere()
def undoEpoch(self):
for mem in (
self.parent, self.trueGroundings, self.children, self.domains,
self.processed):
mem.undoEpoch()
def countGroundings(self):
"""
Computes the number of ground formulas subsumed by this
FormulaGrounding based on the domain sizes of the free (unbound)
variables.
"""
gf_count = 1
for var in self.formula.getVariables(self.mrf):
domain = self.mrf.domains[self.formula.getVarDomain(var, self.mrf)]
gf_count *= len(domain)
return gf_count
def ground(self, assignment=None):
"""
Takes an assignment of _one_ particular variable and
returns a new FormulaGrounding with that assignment. If
the assignment renders the formula false true, then
the number of groundings rendered true is returned.
"""
# calculate the number of ground formulas resulting from
# the remaining set of free variables
if assignment is None:
assignment = {}
gf_count = 1
for var in set(self.formula.getVariables(self.mrf.mln)).difference(
list(assignment.keys())):
domain = self.domains[var]
if domain is None: return 0.
gf_count *= len(domain)
gf = self.formula.ground(self.mrf, assignment,
allowPartialGroundings=True)
gf.weight = self.formula.weight
for var_name, val in assignment.items(): break
self.domains.drop(var_name, val)
# if the simplified gf reduces to a TrueFalse instance, then
# we return the no of groundings if it's true, or 0 otherwise.
truth = gf.isTrue(self.mrf.evidence)
if truth in (True, False):
if not truth:
trueGFCounter = 0.0
else:
trueGFCounter = gf_count
self.trueGroundings += trueGFCounter
return trueGFCounter
# if the truth value cannot be determined yet, we return
# a new formula grounding with the given assignment
else:
new_grounding = FormulaGrounding(gf, self.mrf, parent=self,
assignment=assignment)
self.children.append(new_grounding)
return new_grounding
def __str__(self):
return str(self.assignment) + '->' + str(self.formula) + str(
self.domains) # str(self.assignment)
def __repr__(self):
return str(self)
class SmartGroundingFactory(object):
"""
Implements a factory for generating the groundings of one formula.
The groundings are created incrementally with one
particular ground atom being presented at a time.
fields:
- formula: the (ungrounded) formula representing the root of the
search tree
- mrf: the respective MRF
- root: a FormulaGrounding instance representing the root of the
tree, i.e. an ungrounded formula
- costs: the costs accumulated so far
- depth2fgs: mapping from a depth of the search tree to the
corresponding list of FormulaGroundings
- vars_processed: list of variable names that have already been
processed so far
- values_processed: mapping from a variable name to the list of values
of that vaiable that
have already been assigned so far.
This class maintains a stack of all its fields in order allow undoing
groundings that have been performed once.
"""
def __init__(self, formula, mrf):
"""
formula might be a formula or a FormulaGrounding instance.
"""
self.mrf = mrf
self.costs = .0
if isinstance(formula, Logic.Formula):
self.formula = formula
self.root = FormulaGrounding(formula, mrf)
elif isinstance(formula, FormulaGrounding):
self.root = formula
self.formula = formula.formula
self.values_processed = ListDict()
self.variable_stack = List(None)
self.var2fgs = ListDict({None: [self.root]})
self.gndAtom2fgs = ListDict()
self.manipulatedFgs = List()
def epochEndsHere(self):
for mem in (self.values_processed, self.variable_stack, self.var2fgs,
self.gndAtom2fgs, self.manipulatedFgs):
mem.epochEndsHere()
for fg in self.manipulatedFgs:
fg.epochEndsHere()
def undoEpoch(self):
for fg in self.manipulatedFgs:
fg.undoEpoch()
for mem in (self.values_processed, self.variable_stack, self.var2fgs,
self.gndAtom2fgs, self.manipulatedFgs):
mem.undoEpoch()
def ground(self, gndAtom):
"""
Expects a ground atom and creates all groundings
that can be derived by it in terms of FormulaGroundings.
"""
self.manipulatedFgs.clear()
# get all variable assignments of matching literals in the formula
var_assignments = {}
for lit in self.formula.iterLiterals():
assignment = self.gndAtom2Assignment(lit, gndAtom)
if assignment is not None:
unifyDicts(var_assignments, assignment)
cost = .0
# first evaluate formula groundings that contain
# this gnd atom as an artifact
min_depth = None
min_depth_fgs = []
for fg in self.gndAtom2fgs.get(gndAtom, []):
if len(self.variable_stack) <= fg.depth:
continue
if fg.processed.value:
continue
truth = fg.formula.isTrue(self.mrf.evidence)
if truth is not None:
cost -= fg.trueGroundings.value
if not fg in self.manipulatedFgs:
self.manipulatedFgs.append(fg)
fg.processed.set(True)
self.var2fgs.drop(self.variable_stack[fg.depth], fg)
if not fg.parent.obj in self.manipulatedFgs:
self.manipulatedFgs.append(fg.parent.obj)
fg.parent.obj.children.remove(
fg) # this is just for the visualization/ no real functionality
if fg.depth == min_depth or min_depth is None:
min_depth_fgs.append(fg)
min_depth = fg.depth
if fg.depth < min_depth:
min_depth = fg.depth
min_depth_fgs = []
min_depth_fgs.append(fg)
for fg in min_depth_fgs:
# add the costs which are aggregated by the root of the subtree
if fg.formula.isTrue(fg.mrf.evidence) == False:
cost += fg.formula.weight * fg.countGroundings()
fg.trueGroundings.set(cost)
# straighten up the variable stack and formula groundings
# since they might have become empty
for var in list(self.variable_stack):
if self.var2fgs[var] is None:
self.variable_stack.remove(var)
for var, value in var_assignments.items():
# skip the variables with values that have already been processed
if not var in self.variable_stack:
depth = len(self.variable_stack)
else:
depth = self.variable_stack.index(var)
queue = list(self.var2fgs[self.variable_stack[depth - 1]])
while len(queue) > 0:
fg = queue.pop()
# first hinge the new variable grounding to all possible
# parents, i.e. all FormulaGroundings with depth - 1...
if fg.depth < depth:
vars_and_values = [{var: value}]
# ...then hinge all previously seen subtrees to the newly
# created formula groundings...
elif fg.depth >= depth and fg.depth < len( self.variable_stack) - 1:
vars_and_values = [{self.variable_stack[fg.depth + 1]: v}
for v in self.values_processed[
self.variable_stack[fg.depth + 1]]]
# ...and finally all variable values that are not part of
# the subtrees i.e. variables that are currently NOT in the
# variable_stack (since they have been removed due to falsity
# of a formula grounding).
else:
vars_and_values = []
varNotInTree = None
for varNotInTree in [v for v in
list(self.values_processed.keys()) if
v not in self.variable_stack]: break
if varNotInTree is None: continue
values = self.values_processed[varNotInTree]
for v in values:
vars_and_values.append({varNotInTree: v})
for var_value in vars_and_values:
for var_name, val in var_value.items(): break
if not fg.domains.contains(var_name, val): continue
gnd_result = fg.ground(var_value)
if not fg in self.manipulatedFgs:
self.manipulatedFgs.append(fg)
# if the truth value of a grounding cannot be determined...
if isinstance(gnd_result, FormulaGrounding):
# collect all ground atoms that have been created as
# as artifacts for future evaluation
artifactGndAtoms = [a for a in
gnd_result.formula.getGroundAtoms()
if not a == gndAtom]
for artGndAtom in artifactGndAtoms:
self.gndAtom2fgs.put(artGndAtom, gnd_result)
if not var_name in self.variable_stack:
self.variable_stack.append(var_name)
self.var2fgs.put(self.variable_stack[gnd_result.depth],
gnd_result)
queue.append(gnd_result)
else:
# ...otherwise it's true/false; add its costs and
# discard it.
if self.formula.isHard and gnd_result > 0.:
gnd_result = float('inf')
cost += gnd_result
self.values_processed.put(var, value)
return cost
def printTree(self):
queue = [self.root]
print('---')
while len(queue) > 0:
n = queue.pop()
space = ''
for _ in range(n.depth): space += '--'
print(space + str(n))
queue.extend(n.children.list)
print('---')
def gndAtom2Assignment(self, lit, atom):
"""
Returns None if the literal and the atom do not match.
"""
if type(lit) is Logic.Equality or \
lit.predName != atom.predName:
return None
assignment = {}
for p1, p2 in zip(lit.params, atom.params):
if self.mrf.mln.logic.isVar(p1):
assignment[p1] = p2
elif p1 != p2:
return None
return assignment
| danielnyga/pracmln | python3/pracmln/mln/grounding/bpll.py | Python | bsd-2-clause | 23,873 |
# Do not delete - marks this directory as a python package.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import basic, broker, example, exchange, queue, testlib, tx
| mbroadst/debian-qpid-python | qpid_tests/broker_0_8/__init__.py | Python | apache-2.0 | 913 |
#!/usr/bin/env python
## \file adjoint.py
# \brief python package for running adjoint problems
# \author T. Lukaczyk, F. Palacios
# \version 5.0.0 "Raven"
#
# SU2 Lead Developers: Dr. Francisco Palacios (Francisco.D.Palacios@boeing.com).
# Dr. Thomas D. Economon (economon@stanford.edu).
#
# SU2 Developers: Prof. Juan J. Alonso's group at Stanford University.
# Prof. Piero Colonna's group at Delft University of Technology.
# Prof. Nicolas R. Gauger's group at Kaiserslautern University of Technology.
# Prof. Alberto Guardone's group at Polytechnic University of Milan.
# Prof. Rafael Palacios' group at Imperial College London.
# Prof. Edwin van der Weide's group at the University of Twente.
# Prof. Vincent Terrapon's group at the University of Liege.
#
# Copyright (C) 2012-2017 SU2, the open-source CFD code.
#
# SU2 is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# SU2 is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with SU2. If not, see <http://www.gnu.org/licenses/>.
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
import os, sys, shutil, copy
from .. import io as su2io
from merge import merge as su2merge
from interface import CFD as SU2_CFD
# ----------------------------------------------------------------------
# Adjoint Simulation
# ----------------------------------------------------------------------
def adjoint( config ):
""" info = SU2.run.adjoint(config)
Runs an adjoint analysis with:
SU2.run.decomp()
SU2.run.CFD()
SU2.run.merge()
Assumptions:
Does not run Gradient Projection
Does not rename restart filename to solution filename
Adds 'adjoint' suffix to convergence filename
Outputs:
info - SU2 State with keys:
HISTORY.ADJOINT_NAME
FILES.ADJOINT_NAME
Updates:
config.MATH_PROBLEM
Executes in:
./
"""
# local copy
konfig = copy.deepcopy(config)
# setup problem
if konfig.get('GRADIENT_METHOD', 'CONTINUOUS_ADJOINT') == 'DISCRETE_ADJOINT':
konfig['MATH_PROBLEM'] = 'DISCRETE_ADJOINT'
else:
konfig['MATH_PROBLEM'] = 'CONTINUOUS_ADJOINT'
konfig['CONV_FILENAME'] = konfig['CONV_FILENAME'] + '_adjoint'
# Run Solution
SU2_CFD(konfig)
# merge
konfig['SOLUTION_ADJ_FILENAME'] = konfig['RESTART_ADJ_FILENAME']
su2merge(konfig)
# filenames
plot_format = konfig['OUTPUT_FORMAT']
plot_extension = su2io.get_extension(plot_format)
history_filename = konfig['CONV_FILENAME'] + plot_extension
special_cases = su2io.get_specialCases(konfig)
# get history
history = su2io.read_history( history_filename )
# update super config
config.update({ 'MATH_PROBLEM' : konfig['MATH_PROBLEM'] ,
'OBJECTIVE_FUNCTION' : konfig['OBJECTIVE_FUNCTION'] })
# files out
objective = konfig['OBJECTIVE_FUNCTION']
adj_title = 'ADJOINT_' + objective
suffix = su2io.get_adjointSuffix(objective)
restart_name = konfig['RESTART_FLOW_FILENAME']
restart_name = su2io.add_suffix(restart_name,suffix)
# info out
info = su2io.State()
info.FILES[adj_title] = restart_name
info.HISTORY[adj_title] = history
return info
| cspode/SU2 | SU2_PY/SU2/run/adjoint.py | Python | lgpl-2.1 | 4,118 |
import os
import boto3
from chalice import Chalice
from chalicelib import db
app = Chalice(app_name='mytodo')
app.debug = True
_DB = None
def get_app_db():
global _DB
if _DB is None:
_DB = db.DynamoDBTodo(
boto3.resource('dynamodb').Table(
os.environ['APP_TABLE_NAME'])
)
return _DB
@app.route('/todos', methods=['GET'])
def get_todos():
return get_app_db().list_items()
@app.route('/todos', methods=['POST'])
def add_new_todo():
body = app.current_request.json_body
return get_app_db().add_item(
description=body['description'],
metadata=body.get('metadata'),
)
@app.route('/todos/{uid}', methods=['GET'])
def get_todo(uid):
return get_app_db().get_item(uid)
@app.route('/todos/{uid}', methods=['DELETE'])
def delete_todo(uid):
return get_app_db().delete_item(uid)
@app.route('/todos/{uid}', methods=['PUT'])
def update_todo(uid):
body = app.current_request.json_body
get_app_db().update_item(
uid,
description=body.get('description'),
state=body.get('state'),
metadata=body.get('metadata'))
| aws-samples/chalice-workshop | code/todo-app/part1/04-add-auth/app.py | Python | apache-2.0 | 1,143 |
'''
Created on Feb 4, 2013
@author: atadych
'''
from xml.etree import ElementTree
import sys
import numpy
def isodd(num):
return num & 1 and True or False
class CptNodesHolder:
'''
Parses xdsl file and holds all the nodes as dictionary
'''
def __init__(self, filename=None):
#dictionary of CptNode objects
self.nodes = {}
if filename:
self.parse(filename)
def __str__(self):
return self.nodes
def __repr__(self):
return self.__str__()
def parse(self,filename):
'''Parses the file and fills out the dictionary nodes'''
try:
tree = ElementTree.parse(filename)
#smile entry
root = tree.getroot()
for entry in root:
if entry.tag == 'nodes':
for elem in entry:
if elem.tag == 'cpt' and 'id' in elem.attrib:
node_id = elem.attrib['id']
node = CptNode(node_id=node_id)
for child in elem:
#add states
if child.tag == 'state' and 'id' in child.attrib:
node.states.append(child.attrib['id'])
#add parents
elif child.tag == 'parents':
node.parents.append(child.text)
elif child.tag == 'probabilities':
vals = self._parse_probabilities(child.text)
node.neg_probabilities = vals[0]
node.pos_probabilities = vals[1]
self.nodes[node_id] = node
del node
except IOError:
print 'Can\'t open the file: %s.\nPlease check if file exists and if it has right permission.' % filename
def _parse_probabilities(self,prob_str):
'''
Splits the prob_str values into 2 lists of float values
'''
def_vals = [None,None]
if prob_str == None or len(prob_str.strip())==0:
return def_vals
vals = prob_str.split()
strlen = len(vals)
if isodd(strlen):
print 'Incorrect # of probabilities for this str: %s' % prob_str
return def_vals
#Convert to float
float_vals = [float(s) for s in vals]
return [float_vals[0:strlen/2],float_vals[strlen/2:]]
def get_node(self,node_id):
''' Returns CptNode obj '''
if node_id in self.nodes:
return self.nodes[node_id]
def get_probabilities(self,node_id):
''' Returns list of probabilities for given node_id '''
if node_id in self.nodes:
return self.nodes[node_id].get_probabilities()
def get_nodes_ids(self):
''' Returns all nodes ids in the current holder '''
return self.nodes.keys()
class CptNode:
'''
Single node with states and probabilities
'''
def __init__(self,node_id=None):
self.node_id = node_id
self.parents = []
#list of state ids
self.states = []
self.pos_probabilities = []
self.neg_probabilities = []
def __str__(self):
string = '%s, states:%s, pos:%s, neg:%s' % (self.node_id, self.states, self.pos_probabilities, self.neg_probabilities)
return string
def __repr__(self):
return self.__str__()
def get_probabilities(self):
return [self.neg_probabilities,self.pos_probabilities]
def get_logratios(self):
bineffects = []
for (pneg, ppos) in zip(self.neg_probabilities, self.pos_probabilities):
if ppos == 0 or pneg == 0:
bineffects.append(0)
else:
bineffects.append(numpy.log(pneg/ppos))
return bineffects
if __name__ == '__main__':
from optparse import OptionParser
usage = "usage: %prog [options]"
parser = OptionParser(usage, version="%prog dev-unreleased")
parser.add_option("-i", "--xdsl-file", dest="xdsl", help="XDSL file", metavar="FILE")
parser.add_option("-o", "--out-file", dest="out", help="Output file", metavar="FILE")
(options, args) = parser.parse_args()
if options.xdsl is None:
sys.stderr.write("--xdsl file is required.\n")
sys.exit()
filename = options.xdsl
nodes = CptNodesHolder(filename=filename)
if options.out:
out_file = open(options.out, 'w')
# such bad code...
dsets = nodes.get_nodes_ids()
for i in xrange(-1,7):
for dataset in dsets:
if i==-1:
out_file.write(dataset + '_neg' + '\t')
out_file.write(dataset + '_pos' + '\t')
else:
node = nodes.get_node(dataset)
probs = node.get_probabilities()
if i < len(probs[1]):
out_file.write(str(probs[0][i]) + '\t')
out_file.write(str(probs[1][i]) + '\t')
else:
out_file.write('NA\tNA\t')
out_file.write('\n')
out_file.close()
for s in nodes.get_nodes_ids():
node = nodes.get_node(s)
befs = node.get_logratios()
llsum = 0
for i in range(len(befs)):
llsum += (befs[i])
if max(befs) > 0:
print s, llsum, [x for x in befs if x > 0]
| FunctionLab/function | flib/core/xdsl.py | Python | gpl-3.0 | 5,740 |
class PermissionDenied(Exception):
def __init__(self, detail=None):
self.detail = detail
class NotFound(Exception):
def __init__(self, detail=None):
self.detail = detail
class BadRequest(Exception):
def __init__(self, detail=None):
self.detail = detail
| renalreg/radar | radar/exceptions.py | Python | agpl-3.0 | 293 |
from django.http import HttpResponseRedirect
from satchmo_store.shop.models import Order, OrderStatus
from signals import pending_order_confirmed
def _dumb_success(controller):
"""
Stripped-down implementation of `ConfirmController._onSuccess()`:
- removed check for whether order has been paid in full
- remove code for subscription products
- changed order status
"""
controller.cart.empty()
try:
curr_status = controller.order.orderstatus_set.latest()
except OrderStatus.DoesNotExist:
curr_status = None
if (curr_status is None) or (curr_status.notes and curr_status.status == "New"):
controller.order.add_status(status='New', notes = "Order pending payment")
else:
# otherwise just update and save
if not curr_status.notes:
curr_status.notes = _("Order pending payment")
curr_status.save()
# notify listeners
pending_order_confirmed.send(controller, order=controller.order)
#Redirect to the success page
url = controller.lookup_url('satchmo_checkout-success')
return HttpResponseRedirect(url)
| rctay/satchmo-payment-dumb | utils.py | Python | bsd-3-clause | 1,133 |
# #
# Copyright 2012-2014 Ghent University
#
# This file is part of EasyBuild,
# originally created by the HPC team of Ghent University (http://ugent.be/hpc/en),
# with support of Ghent University (http://ugent.be/hpc),
# the Flemish Supercomputer Centre (VSC) (https://vscentrum.be/nl/en),
# the Hercules foundation (http://www.herculesstichting.be/in_English)
# and the Department of Economy, Science and Innovation (EWI) (http://www.ewi-vlaanderen.be/en).
#
# http://github.com/hpcugent/easybuild
#
# EasyBuild is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation v2.
#
# EasyBuild is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with EasyBuild. If not, see <http://www.gnu.org/licenses/>.
# #
"""
Module for doing parallel builds. This uses a PBS-like cluster. You should be able to submit jobs (which can have
dependencies)
Support for PBS is provided via the PbsJob class. If you want you could create other job classes and use them here.
@author: Toon Willems (Ghent University)
@author: Kenneth Hoste (Ghent University)
@author: Stijn De Weirdt (Ghent University)
@author: Ward Poelmans (Ghent University)
"""
import copy
import os
import re
import sys
from datetime import datetime
from time import gmtime, strftime
import easybuild.tools.config as config
from easybuild.framework.easyblock import build_easyconfigs
from easybuild.framework.easyconfig.tools import process_easyconfig, resolve_dependencies
from easybuild.framework.easyconfig.tools import skip_available
from easybuild.tools.build_log import EasyBuildError
from easybuild.tools.config import build_option
from easybuild.tools.filetools import find_easyconfigs, mkdir, read_file
from easybuild.tools.github import create_gist, post_comment_in_issue
from easybuild.tools.jenkins import aggregate_xml_in_dirs
from easybuild.tools.modules import modules_tool
from easybuild.tools.parallelbuild import build_easyconfigs_in_parallel
from easybuild.tools.systemtools import get_system_info
from easybuild.tools.version import FRAMEWORK_VERSION, EASYBLOCKS_VERSION
from vsc.utils import fancylogger
_log = fancylogger.getLogger('testing', fname=False)
def regtest(easyconfig_paths, build_specs=None):
"""
Run regression test, using easyconfigs available in given path
@param easyconfig_paths: path of easyconfigs to run regtest on
@param build_specs: dictionary specifying build specifications (e.g. version, toolchain, ...)
"""
cur_dir = os.getcwd()
aggregate_regtest = build_option('aggregate_regtest')
if aggregate_regtest is not None:
output_file = os.path.join(aggregate_regtest, "%s-aggregate.xml" % os.path.basename(aggregate_regtest))
aggregate_xml_in_dirs(aggregate_regtest, output_file)
_log.info("aggregated xml files inside %s, output written to: %s" % (aggregate_regtest, output_file))
sys.exit(0)
# create base directory, which is used to place
# all log files and the test output as xml
basename = "easybuild-test-%s" % datetime.now().strftime("%Y%m%d%H%M%S")
var = config.OLDSTYLE_ENVIRONMENT_VARIABLES['test_output_path']
regtest_output_dir = build_option('regtest_output_dir')
if regtest_output_dir is not None:
output_dir = regtest_output_dir
elif var in os.environ:
output_dir = os.path.abspath(os.environ[var])
else:
# default: current dir + easybuild-test-[timestamp]
output_dir = os.path.join(cur_dir, basename)
mkdir(output_dir, parents=True)
# find all easyconfigs
ecfiles = []
if easyconfig_paths:
for path in easyconfig_paths:
ecfiles += find_easyconfigs(path, ignore_dirs=build_option('ignore_dirs'))
else:
_log.error("No easyconfig paths specified.")
test_results = []
# process all the found easyconfig files
easyconfigs = []
for ecfile in ecfiles:
try:
easyconfigs.extend(process_easyconfig(ecfile, build_specs=build_specs))
except EasyBuildError, err:
test_results.append((ecfile, 'parsing_easyconfigs', 'easyconfig file error: %s' % err, _log))
# skip easyconfigs for which a module is already available, unless forced
if not build_option('force'):
_log.debug("Skipping easyconfigs from %s that already have a module available..." % easyconfigs)
easyconfigs = skip_available(easyconfigs)
_log.debug("Retained easyconfigs after skipping: %s" % easyconfigs)
if build_option('sequential'):
return build_easyconfigs(easyconfigs, output_dir, test_results)
else:
resolved = resolve_dependencies(easyconfigs, build_specs=build_specs)
cmd = "eb %(spec)s --regtest --sequential -ld"
command = "unset TMPDIR && cd %s && %s; " % (cur_dir, cmd)
# retry twice in case of failure, to avoid fluke errors
command += "if [ $? -ne 0 ]; then %(cmd)s --force && %(cmd)s --force; fi" % {'cmd': cmd}
jobs = build_easyconfigs_in_parallel(command, resolved, output_dir=output_dir)
print "List of submitted jobs:"
for job in jobs:
print "%s: %s" % (job.name, job.jobid)
print "(%d jobs submitted)" % len(jobs)
# determine leaf nodes in dependency graph, and report them
all_deps = set()
for job in jobs:
all_deps = all_deps.union(job.deps)
leaf_nodes = []
for job in jobs:
if job.jobid not in all_deps:
leaf_nodes.append(str(job.jobid).split('.')[0])
_log.info("Job ids of leaf nodes in dep. graph: %s" % ','.join(leaf_nodes))
_log.info("Submitted regression test as jobs, results in %s" % output_dir)
return True # success
def session_state():
"""Get session state: timestamp, dump of environment, system info."""
return {
'time': gmtime(),
'environment': copy.deepcopy(os.environ),
'system_info': get_system_info(),
}
def session_module_list():
"""Get list of loaded modules ('module list')."""
modtool = modules_tool()
return modtool.list()
def create_test_report(msg, ecs_with_res, init_session_state, pr_nr=None, gist_log=False):
"""Create test report for easyconfigs PR, in Markdown format."""
user = build_option('github_user')
end_time = gmtime()
# create a gist with a full test report
test_report = []
if pr_nr is not None:
test_report.extend([
"Test report for https://github.com/hpcugent/easybuild-easyconfigs/pull/%s" % pr_nr,
"",
])
test_report.extend([
"#### Test result",
"%s" % msg,
"",
])
build_overview = []
for (ec, ec_res) in ecs_with_res:
test_log = ''
if ec_res['success']:
test_result = 'SUCCESS'
else:
# compose test result string
test_result = 'FAIL '
if 'err' in ec_res:
if isinstance(ec_res['err'], EasyBuildError):
test_result += '(build issue)'
else:
test_result += '(unhandled exception: %s)' % ec_res['err'].__class__.__name__
else:
test_result += '(unknown cause, not an exception?!)'
# create gist for log file (if desired and available)
if gist_log and 'log_file' in ec_res:
logtxt = read_file(ec_res['log_file'])
partial_log_txt = '\n'.join(logtxt.split('\n')[-500:])
descr = "(partial) EasyBuild log for failed build of %s" % ec['spec']
if pr_nr is not None:
descr += " (PR #%s)" % pr_nr
fn = '%s_partial.log' % os.path.basename(ec['spec'])[:-3]
gist_url = create_gist(partial_log_txt, fn, descr=descr, github_user=user)
test_log = "(partial log available at %s)" % gist_url
build_overview.append(" * **%s** _%s_ %s" % (test_result, os.path.basename(ec['spec']), test_log))
test_report.extend(["#### Overview of tested easyconfigs (in order)"] + build_overview + [""])
time_format = "%a, %d %b %Y %H:%M:%S +0000 (UTC)"
start_time = strftime(time_format, init_session_state['time'])
end_time = strftime(time_format, end_time)
test_report.extend(["#### Time info", " * start: %s" % start_time, " * end: %s" % end_time, ""])
eb_config = [x for x in sorted(init_session_state['easybuild_configuration'])]
test_report.extend([
"#### EasyBuild info",
" * easybuild-framework version: %s" % FRAMEWORK_VERSION,
" * easybuild-easyblocks version: %s" % EASYBLOCKS_VERSION,
" * command line:",
"```",
"eb %s" % ' '.join(sys.argv[1:]),
"```",
" * full configuration (includes defaults):",
"```",
] + eb_config + ["````", ""])
system_info = init_session_state['system_info']
system_info = [" * _%s:_ %s" % (key.replace('_', ' '), system_info[key]) for key in sorted(system_info.keys())]
test_report.extend(["#### System info"] + system_info + [""])
module_list = init_session_state['module_list']
if module_list:
module_list = [" * %s" % mod['mod_name'] for mod in module_list]
else:
module_list = [" * (none)"]
test_report.extend(["#### List of loaded modules"] + module_list + [""])
environ_dump = init_session_state['environment']
environment = []
env_filter = build_option('test_report_env_filter')
for key in sorted(environ_dump.keys()):
if env_filter is not None and env_filter.search(key):
continue
else:
environment += ["%s = %s" % (key, environ_dump[key])]
test_report.extend(["#### Environment", "```"] + environment + ["```"])
return '\n'.join(test_report)
def upload_test_report_as_gist(test_report, descr=None, fn=None):
"""Upload test report as a gist."""
if descr is None:
descr = "EasyBuild test report"
if fn is None:
fn = 'easybuild_test_report_%s.md' % strftime("%Y%M%d-UTC-%H-%M-%S", gmtime())
user = build_option('github_user')
gist_url = create_gist(test_report, descr=descr, fn=fn, github_user=user)
return gist_url
def post_easyconfigs_pr_test_report(pr_nr, test_report, msg, init_session_state, success):
"""Post test report in a gist, and submit comment in easyconfigs PR."""
user = build_option('github_user')
# create gist with test report
descr = "EasyBuild test report for easyconfigs PR #%s" % pr_nr
fn = 'easybuild_test_report_easyconfigs_pr%s_%s.md' % (pr_nr, strftime("%Y%M%d-UTC-%H-%M-%S", gmtime()))
gist_url = upload_test_report_as_gist(test_report, descr=descr, fn=fn)
# post comment to report test result
system_info = init_session_state['system_info']
short_system_info = "%(os_type)s %(os_name)s %(os_version)s, %(cpu_model)s, Python %(pyver)s" % {
'cpu_model': system_info['cpu_model'],
'os_name': system_info['os_name'],
'os_type': system_info['os_type'],
'os_version': system_info['os_version'],
'pyver': system_info['python_version'].split(' ')[0],
}
comment_lines = [
"Test report by @%s" % user,
('**FAILED**', '**SUCCESS**')[success],
msg,
short_system_info,
"See %s for a full test report." % gist_url,
]
comment = '\n'.join(comment_lines)
post_comment_in_issue(pr_nr, comment, github_user=user)
msg = "Test report uploaded to %s and mentioned in a comment in easyconfigs PR#%s" % (gist_url, pr_nr)
return msg
| gc3-uzh-ch/easybuild-framework | easybuild/tools/testing.py | Python | gpl-2.0 | 11,926 |
"""
Tree_Words.py
Description
-----------
Concatenates words processed by pennconverter.jar into sentences
Usage:
Concatenate
-----------
Concatenate('Test.txt')
Si Kai Lee 06/03/2015
"""
import re
def Concatenate(filename):
F = open(filename, "r")
Input = F.readlines()
# print Input
F.close
filename = re.sub('.txt', '', filename)
G = open(filename+'_p.txt', 'w')
y = ''
for z in Input:
if z == '\n':
G.write(y + z)
y = ''
else:
y = re.sub('\{|`|\}|\'\'', '', y)
y += re.search('\\t(.*?)\\t', z).group(1) + ' '
# for z in Input:
# y = re.findall('\s([A-Za-z\.-]*|\W[a-z]{1,}|\d[^A-Za-z)]+|n\'t)\)', z)
# G.write(' '.join(y) + '\n')
G.close()
| atechnicolorskye/PTB-Unconverter | Trees_Words.py | Python | gpl-2.0 | 777 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Convert schema to sql create table command
"""
import sys
from yaml import safe_load
from sherlock.common.redshift_schema import RedShiftLogSchema
class InvalidAttribute(Exception):
pass
class NameConflict(Exception):
pass
def mk_create_table_sql_cmd(name, table):
sql_str = ""
column_sql = []
names = set()
for c in table['columns']:
col_name = c['name'] if c.get('name') else c['log_key']
if 'UNKNOWN' in c['sql_attr']:
raise InvalidAttribute(
"key: '{0}', sql_attr: '{1}'".format(
c['log_key'], c['sql_attr']
)
)
if col_name in names:
raise NameConflict(name, col_name)
names.add(col_name)
column_sql.append(' "{0}" {1}'.format(col_name, c['sql_attr']))
sql_str += "CREATE TABLE {0}(\n".format(name)
sql_str += ",\n".join(column_sql) + "\n)"
if len(table['sortkey_attr']) > 0:
sortkey_str = " SORTKEY("
sortkey_str += ", ".join([col for col in table['sortkey_attr']])
sortkey_str += ")"
sql_str += sortkey_str
sql_str += ";\n"
return sql_str
def tables_to_sql(tables):
sql_str = ""
for name in sorted(tables.keys()):
sql_str += mk_create_table_sql_cmd(name, tables[name])
return sql_str
def main():
schema = RedShiftLogSchema(safe_load(sys.stdin))
sql_str = tables_to_sql(schema.tables())
sys.stdout.write(sql_str)
if __name__ == "__main__":
main()
| Yelp/mycroft | mycroft/sherlock/tools/schema2sql.py | Python | mit | 1,553 |
"""
A formulation in original variables of a wedding seating problem
Authors: Stuart Mitchell 2010
"""
import pulp
try:
import path
except ImportError:
pass
try:
import src.dippy as dippy
from src.dippy import DipSolStatOptimal
except ImportError:
import coinor.dippy as dippy
from coinor.dippy import DipSolStatOptimal
debug_print = False
max_tables = 5
max_table_size = 4
guests = 'A B C D E F G I J K L M N O P Q R'.split()
def happiness(guest_a, guest_b):
"""
Return the happiness (0 is the best) of allocating two
guests together in the same table
"""
return abs(ord(guest_a) - ord(guest_b))
#create the set of possible tables
tables = range(max_tables)
possible_seatings = [(g, t) for g in guests
for t in tables]
#create a binary variable to model if a guest sits at a particular table
x = pulp.LpVariable.dicts('possible_seatings', possible_seatings,
lowBound = 0,
upBound = 1,
cat = pulp.LpInteger)
seating_model = dippy.DipProblem("Wedding Seating Model (DIP)", pulp.LpMinimize,
display_mode = 'xdot', display_interval = 0)
#specify the maximum number of guests per table
for table in tables:
seating_model.relaxation[table] += sum([x[(guest, table)]
for guest in guests]) <= \
max_table_size, \
"Maximum_table_size_%s"%table
#A guest must seated at one and only one table
for guest in guests:
seating_model += (sum([x[(guest, table)] for table in tables]) == 1,
"Must_seat_%s"%guest)
#create a set of variables to model the objective function
possible_pairs = [(a, b) for a in guests for b in guests if ord(a) < ord(b)]
happy = pulp.LpVariable.dicts('table_happiness', tables,
lowBound = 0,
upBound = None,
cat = pulp.LpContinuous)
seating_model += sum([happy[table] for table in tables])
#create constraints for each possible pair
for table in tables:
for (a, b) in possible_pairs:
seating_model.relaxation[table] += \
happy[table] >= (happiness(a, b) * (x[(a, table)] +
x[(b, table)] - 1))
def relaxed_solver(prob, table, redCosts, target):
"""
Generate columns (tables) with negative reduced costs
"""
dvs = []
neg_guests = [g for g in guests
if redCosts[x[(g,table)]] < 0.0]
neg_guests.sort()
# find all possible tables between two end points
for pos1, pos2 in [(i, j) for i in range(len(neg_guests))
for j in range(len(neg_guests))
if j > i]:
# find the suitable guests that can be included in between the end
# points
candidate_guests = [(redCosts[x[(g,table)]], g)
for g in neg_guests[pos1+1:pos2]]
candidate_guests.sort()
# pick the best guests (ie those with the negative reduced costs)
possible_table_inner = [g
for _, g in candidate_guests[:max_table_size-2]]
#This is the best table between the end points
possible_table = [neg_guests[pos1]] + possible_table_inner +\
[neg_guests[pos2]]
# calculate the sum of the reduced costs for each of the guests
neg_cost = sum(redCosts[x[(g, table)]] for g in possible_table)
table_happiness = happiness(possible_table[0], possible_table[-1])
rc = neg_cost + table_happiness * redCosts[happy[table]]
var_values = [(x[(g, table)], 1)
for g in possible_table]
var_values.append((happy[table], table_happiness))
dvs.append(dict(var_values))
if debug_print:
print 'Table: ', table, 'Happiness: ', table_happiness, 'RC: ', rc
return DipSolStatOptimal, dvs
#seating_model.relaxed_solver = relaxed_solver
#seating_model.writeLP('wedding_main.lp')
#for table in tables:
# seating_model.writeRelaxed(table, 'wedding_relax%s.lp' % table);
dippy.Solve(seating_model, {
'doPriceCut' : '1',
'CutCGL' : '1',
#'generateInitVars' : '1',
})
if seating_model.display_mode != 'off':
numNodes = len(seating_model.Tree.get_node_list())
if seating_model.Tree.attr['display'] == 'svg':
seating_model.Tree.write_as_svg(filename = "facility_node%d" % (numNodes + 1),
prevfile = "facility_node%d" % numNodes)
seating_model.Tree.display()
for table in tables:
print table,
for guest in guests:
if x[(guest,table)].value() >= 0.99:
print guest,
print happy[table].value()
| tkralphs/Dip | Dip/src/dippy/examples/wedding/wedding.py | Python | epl-1.0 | 5,083 |
#!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This module contains various formatters which can help format a chart
object. To use these, add them to your chart's list of formatters. For
example:
chart.formatters.append(InlineLegend)
chart.formatters.append(LabelSeparator(right=8))
Feel free to write your own formatter. Formatters are just callables that
modify the chart in some (hopefully useful) way. For example, the AutoColor
formatter makes sure each DataSeries has a color applied to it. The formatter
should take the chart to format as its only argument.
(The formatters work on a deepcopy of the user's chart, so modifications
shouldn't leak back into the user's original chart)
"""
def AutoLegend(chart):
"""Automatically fill out the legend based on series labels. This will only
fill out the legend if is at least one series with a label.
"""
chart._show_legend = False
labels = []
for series in chart.data:
if series.label is None:
labels.append('')
else:
labels.append(series.label)
chart._show_legend = True
if chart._show_legend:
chart._legend_labels = labels
class AutoColor(object):
"""Automatically add colors to any series without colors.
Object attributes:
colors: The list of colors (hex strings) to cycle through. You can modify
this list if you don't like the default colors.
"""
def __init__(self):
# TODO: Add a few more default colors.
# TODO: Add a default styles too, so if you don't specify color or
# style, you get a unique set of colors & styles for your data.
self.colors = ['0000ff', 'ff0000', '00dd00', '000000']
def __call__(self, chart):
index = -1
for series in chart.data:
if series.style.color is None:
index += 1
if index >= len(self.colors):
index = 0
series.style.color = self.colors[index]
class AutoScale(object):
"""If you don't set min/max on the dependent axes, this fills them in
automatically by calculating min/max dynamically from the data.
You can set just min or just max and this formatter will fill in the other
value for you automatically. For example, if you only set min then this will
set max automatically, but leave min untouched.
Charts can have multiple dependent axes (chart.left & chart.right, for
example.) If you set min/max on some axes but not others, then this formatter
copies your min/max to the un-set axes. For example, if you set up min/max on
only the right axis then your values will be automatically copied to the left
axis. (if you use different min/max values for different axes, the
precendence is undefined. So don't do that.)
"""
def __init__(self, buffer=0.05):
"""Create a new AutoScale formatter.
Args:
buffer: percentage of extra space to allocate around the chart's axes.
"""
self.buffer = buffer
def __call__(self, chart):
"""Format the chart by setting the min/max values on its dependent axis."""
if not chart.data:
return # Nothing to do.
min_value, max_value = chart.GetMinMaxValues()
if None in (min_value, max_value):
return # No data. Nothing to do.
# Honor user's choice, if they've picked min/max.
for axis in chart.GetDependentAxes():
if axis.min is not None:
min_value = axis.min
if axis.max is not None:
max_value = axis.max
buffer = (max_value - min_value) * self.buffer # Stay away from edge.
for axis in chart.GetDependentAxes():
if axis.min is None:
axis.min = min_value - buffer
if axis.max is None:
axis.max = max_value + buffer
class LabelSeparator(object):
"""Adjust the label positions to avoid having them overlap. This happens for
any axis with minimum_label_spacing set.
"""
def __init__(self, left=None, right=None, bottom=None):
self.left = left
self.right = right
self.bottom = bottom
def __call__(self, chart):
self.AdjustLabels(chart.left, self.left)
self.AdjustLabels(chart.right, self.right)
self.AdjustLabels(chart.bottom, self.bottom)
def AdjustLabels(self, axis, minimum_label_spacing):
if minimum_label_spacing is None:
return
if len(axis.labels) <= 1: # Nothing to adjust
return
if axis.max is not None and axis.min is not None:
# Find the spacing required to fit all labels evenly.
# Don't try to push them farther apart than that.
maximum_possible_spacing = (axis.max - axis.min) / (len(axis.labels) - 1)
if minimum_label_spacing > maximum_possible_spacing:
minimum_label_spacing = maximum_possible_spacing
labels = [list(x) for x in zip(axis.label_positions, axis.labels)]
labels = sorted(labels, reverse=True)
# First pass from the top, moving colliding labels downward
for i in range(1, len(labels)):
if labels[i - 1][0] - labels[i][0] < minimum_label_spacing:
new_position = labels[i - 1][0] - minimum_label_spacing
if axis.min is not None and new_position < axis.min:
new_position = axis.min
labels[i][0] = new_position
# Second pass from the bottom, moving colliding labels upward
for i in range(len(labels) - 2, -1, -1):
if labels[i][0] - labels[i + 1][0] < minimum_label_spacing:
new_position = labels[i + 1][0] + minimum_label_spacing
if axis.max is not None and new_position > axis.max:
new_position = axis.max
labels[i][0] = new_position
# Separate positions and labels
label_positions, labels = zip(*labels)
axis.labels = labels
axis.label_positions = label_positions
def InlineLegend(chart):
"""Provide a legend for line charts by attaching labels to the right
end of each line. Supresses the regular legend.
"""
show = False
labels = []
label_positions = []
for series in chart.data:
if series.label is None:
labels.append('')
else:
labels.append(series.label)
show = True
label_positions.append(series.data[-1])
if show:
chart.right.min = chart.left.min
chart.right.max = chart.left.max
chart.right.labels = labels
chart.right.label_positions = label_positions
chart._show_legend = False # Supress the regular legend.
| m-lab/mlab-ns | server/mapreduce/lib/graphy/formatters.py | Python | apache-2.0 | 6,837 |
__all__ = ['atleast_1d','atleast_2d','atleast_3d','vstack','hstack',
'column_stack','row_stack', 'dstack','array_split','split','hsplit',
'vsplit','dsplit','apply_over_axes','expand_dims',
'apply_along_axis', 'kron', 'tile', 'get_array_wrap']
import numpy.core.numeric as _nx
from numpy.core.numeric import asarray, zeros, newaxis, outer, \
concatenate, isscalar, array, asanyarray
from numpy.core.fromnumeric import product, reshape
def apply_along_axis(func1d,axis,arr,*args):
"""
Apply function to 1-D slices along the given axis.
Execute `func1d(a[i],*args)` where `func1d` takes 1-D arrays, `a` is
the input array, and `i` is an integer that varies in order to apply the
function along the given axis for each 1-D subarray in `a`.
Parameters
----------
func1d : function
This function should be able to take 1-D arrays. It is applied to 1-D
slices of `a` along the specified axis.
axis : integer
Axis along which `func1d` is applied.
a : ndarray
Input array.
args : any
Additional arguments to `func1d`.
Returns
-------
out : ndarray
The output array. The shape of `out` is identical to the shape of `a`,
except along the `axis` dimension, whose length is equal to the size
of the return value of `func1d`.
See Also
--------
apply_over_axes : Apply a function repeatedly over multiple axes.
Examples
--------
>>> def my_func(a):
... \"\"\"Average first and last element of a 1-D array\"\"\"
... return (a[0] + a[-1]) * 0.5
>>> b = np.array([[1,2,3], [4,5,6], [7,8,9]])
>>> np.apply_along_axis(my_func, 0, b)
array([4., 5., 6.])
>>> np.apply_along_axis(my_func, 1, b)
array([2., 5., 8.])
"""
arr = asarray(arr)
nd = arr.ndim
if axis < 0:
axis += nd
if (axis >= nd):
raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
% (axis,nd))
ind = [0]*(nd-1)
i = zeros(nd,'O')
indlist = range(nd)
indlist.remove(axis)
i[axis] = slice(None,None)
outshape = asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())],*args)
# if res is a number, then we have a smaller output array
if isscalar(res):
outarr = zeros(outshape,asarray(res).dtype)
outarr[tuple(ind)] = res
Ntot = product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist,ind)
res = func1d(arr[tuple(i.tolist())],*args)
outarr[tuple(ind)] = res
k += 1
return outarr
else:
Ntot = product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = len(res)
outarr = zeros(outshape,asarray(res).dtype)
outarr[tuple(i.tolist())] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1-nd)):
ind[n-1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())],*args)
outarr[tuple(i.tolist())] = res
k += 1
return outarr
def apply_over_axes(func, a, axes):
"""
Apply a function repeatedly over multiple axes.
`func` is called as `res = func(a, axis)`, where `axis` is the first
element of `axes`. The result `res` of the function call must have
either the same dimensions as `a` or one less dimension. If `res` has one
less dimension than `a`, a dimension is inserted before `axis`.
The call to `func` is then repeated for each axis in `axes`,
with `res` as the first argument.
Parameters
----------
func : function
This function must take two arguments, `func(a, axis)`.
a : ndarray
Input array.
axes : array_like
Axes over which `func` is applied, the elements must be
integers.
Returns
-------
val : ndarray
The output array. The number of dimensions is the same as `a`, but
the shape can be different. This depends on whether `func` changes
the shape of its output with respect to its input.
See Also
--------
apply_along_axis :
Apply a function to 1-D slices of an array along the given axis.
Examples
--------
>>> a = np.arange(24).reshape(2,3,4)
>>> a
array([[[ 0, 1, 2, 3],
[ 4, 5, 6, 7],
[ 8, 9, 10, 11]],
<BLANKLINE>
[[12, 13, 14, 15],
[16, 17, 18, 19],
[20, 21, 22, 23]]])
Sum over axes 0 and 2. The result has same number of dimensions
as the original array:
>>> np.apply_over_axes(np.sum, a, [0,2])
array([[[ 60],
[ 92],
[124]]])
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0: axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = expand_dims(res,axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError, "function is not returning"\
" an array of correct shape"
return val
def expand_dims(a, axis):
"""
Expand the shape of an array.
Insert a new axis, corresponding to a given position in the array shape.
Parameters
----------
a : array_like
Input array.
axis : int
Position (amongst axes) where new axis is to be inserted.
Returns
-------
res : ndarray
Output array. The number of dimensions is one greater than that of
the input array.
See Also
--------
doc.indexing, atleast_1d, atleast_2d, atleast_3d
Examples
--------
>>> x = np.array([1,2])
>>> x.shape
(2,)
The following is equivalent to ``x[np.newaxis,:]`` or ``x[np.newaxis]``:
>>> y = np.expand_dims(x, axis=0)
>>> y
array([[1, 2]])
>>> y.shape
(1, 2)
>>> y = np.expand_dims(x, axis=1) # Equivalent to x[:,newaxis]
>>> y
array([[1],
[2]])
>>> y.shape
(2, 1)
Note that some examples may use ``None`` instead of ``np.newaxis``. These
are the same objects:
>>> np.newaxis is None
True
"""
a = asarray(a)
shape = a.shape
if axis < 0:
axis = axis + len(shape) + 1
return a.reshape(shape[:axis] + (1,) + shape[axis:])
def atleast_1d(*arys):
"""
Convert inputs to arrays with at least one dimension.
Scalar inputs are converted to 1-dimensional arrays, whilst
higher-dimensional inputs are preserved.
Parameters
----------
array1, array2, ... : array_like
One or more input arrays.
Returns
-------
ret : ndarray
An array, or sequence of arrays, each with ``a.ndim >= 1``.
Copies are made only if necessary.
See Also
--------
atleast_2d, atleast_3d
Examples
--------
>>> np.atleast_1d(1.0)
array([ 1.])
>>> x = np.arange(9.0).reshape(3,3)
>>> np.atleast_1d(x)
array([[ 0., 1., 2.],
[ 3., 4., 5.],
[ 6., 7., 8.]])
>>> np.atleast_1d(x) is x
True
>>> np.atleast_1d(1, [3, 4])
[array([1]), array([3, 4])]
"""
res = []
for ary in arys:
res.append(array(ary,copy=False,subok=True,ndmin=1))
if len(res) == 1:
return res[0]
else:
return res
def atleast_2d(*arys):
"""
View inputs as arrays with at least two dimensions.
Parameters
----------
array1, array2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have two or more dimensions are
preserved.
Returns
-------
res, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 2``.
Copies are avoided where possible, and views with two or more
dimensions are returned.
See Also
--------
atleast_1d, atleast_3d
Examples
--------
>>> numpy.atleast_2d(3.0)
array([[ 3.]])
>>> x = numpy.arange(3.0)
>>> numpy.atleast_2d(x)
array([[ 0., 1., 2.]])
>>> numpy.atleast_2d(x).base is x
True
>>> np.atleast_2d(1, [1, 2], [[1, 2]])
[array([[1]]), array([[1, 2]]), array([[1, 2]])]
"""
res = []
for ary in arys:
res.append(array(ary,copy=False,subok=True,ndmin=2))
if len(res) == 1:
return res[0]
else:
return res
def atleast_3d(*arys):
"""
View inputs as arrays with at least three dimensions.
Parameters
----------
array1, array2, ... : array_like
One or more array-like sequences. Non-array inputs are converted
to arrays. Arrays that already have three or more dimensions are
preserved.
Returns
-------
res1, res2, ... : ndarray
An array, or tuple of arrays, each with ``a.ndim >= 3``.
Copies are avoided where possible, and views with three or more
dimensions are returned. For example, a one-dimensional array of
shape ``N`` becomes a view of shape ``(1, N, 1)``. An ``(M, N)``
array becomes a view of shape ``(N, M, 1)``.
See Also
--------
numpy.atleast_1d, numpy.atleast_2d
Examples
--------
>>> numpy.atleast_3d(3.0)
array([[[ 3.]]])
>>> x = numpy.arange(3.0)
>>> numpy.atleast_3d(x).shape
(1, 3, 1)
>>> x = numpy.arange(12.0).reshape(4,3)
>>> numpy.atleast_3d(x).shape
(4, 3, 1)
>>> numpy.atleast_3d(x).base is x
True
>>> for arr in np.atleast_3d(1, [1, 2], [[1, 2]]): print arr, "\\n"
...
[[[1]]]
[[[1]
[2]]]
[[[1]
[2]]]
"""
res = []
for ary in arys:
ary = asarray(ary)
if len(ary.shape) == 0:
result = ary.reshape(1,1,1)
elif len(ary.shape) == 1:
result = ary[newaxis,:,newaxis]
elif len(ary.shape) == 2:
result = ary[:,:,newaxis]
else:
result = ary
res.append(result)
if len(res) == 1:
return res[0]
else:
return res
def vstack(tup):
"""
Stack arrays vertically.
`vstack` can be used to rebuild arrays divided by `vsplit`.
Parameters
----------
tup : sequence of arrays
Tuple containing arrays to be stacked. The arrays must have the same
shape along all but the first axis.
See Also
--------
array_split : Split an array into a list of multiple sub-arrays of
near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
vsplit : Split array into a list of multiple sub-arrays vertically.
dsplit : Split array into a list of multiple sub-arrays along the 3rd axis
(depth).
concatenate : Join arrays together.
hstack : Stack arrays in sequence horizontally (column wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> a = np.array([1, 2, 3])
>>> b = np.array([2, 3, 4])
>>> np.vstack((a,b))
array([[1, 2, 3],
[2, 3, 4]])
>>> a = np.array([[1], [2], [3]])
>>> b = np.array([[2], [3], [4]])
>>> np.vstack((a,b))
array([[1],
[2],
[3],
[2],
[3],
[4]])
"""
return _nx.concatenate(map(atleast_2d,tup),0)
def hstack(tup):
"""
Stack arrays in sequence horizontally (column wise)
Take a sequence of arrays and stack them horizontally to make
a single array. Rebuild arrays divided by ``hsplit``.
Parameters
----------
tup : sequence of ndarrays
All arrays must have the same shape along all but the second axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack along first axis.
dstack : Stack along third axis.
concatenate : Join arrays.
hsplit : Split array along second axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=1)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.hstack((a,b))
array([1, 2, 3, 2, 3, 4])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.hstack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
return _nx.concatenate(map(atleast_1d,tup),1)
row_stack = vstack
def column_stack(tup):
"""
Stack 1-D arrays as columns into a 2-D array
Take a sequence of 1-D arrays and stack them as columns
to make a single 2-D array. 2-D arrays are stacked as-is,
just like with hstack. 1-D arrays are turned into 2-D columns
first.
Parameters
----------
tup : sequence of 1-D or 2-D arrays.
Arrays to stack. All of them must have the same first dimension.
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.column_stack((a,b))
array([[1, 2],
[2, 3],
[3, 4]])
"""
arrays = []
for v in tup:
arr = array(v,copy=False,subok=True)
if arr.ndim < 2:
arr = array(arr,copy=False,subok=True,ndmin=2).T
arrays.append(arr)
return _nx.concatenate(arrays,1)
def dstack(tup):
"""
Stack arrays in sequence depth wise (along third axis)
Takes a sequence of arrays and stack them along the third axis
to make a single array. Rebuilds arrays divided by ``dsplit``.
This is a simple way to stack 2D arrays (images) into a single
3D array for processing.
Parameters
----------
tup : sequence of arrays
Arrays to stack. All of them must have the same shape along all
but the third axis.
Returns
-------
stacked : ndarray
The array formed by stacking the given arrays.
See Also
--------
vstack : Stack along first axis.
hstack : Stack along second axis.
concatenate : Join arrays.
dsplit : Split array along third axis.
Notes
-----
Equivalent to ``np.concatenate(tup, axis=2)``
Examples
--------
>>> a = np.array((1,2,3))
>>> b = np.array((2,3,4))
>>> np.dstack((a,b))
array([[[1, 2],
[2, 3],
[3, 4]]])
>>> a = np.array([[1],[2],[3]])
>>> b = np.array([[2],[3],[4]])
>>> np.dstack((a,b))
array([[[1, 2]],
<BLANKLINE>
[[2, 3]],
<BLANKLINE>
[[3, 4]]])
"""
return _nx.concatenate(map(atleast_3d,tup),2)
def _replace_zero_by_x_arrays(sub_arys):
for i in range(len(sub_arys)):
if len(_nx.shape(sub_arys[i])) == 0:
sub_arys[i] = _nx.array([])
elif _nx.sometrue(_nx.equal(_nx.shape(sub_arys[i]),0)):
sub_arys[i] = _nx.array([])
return sub_arys
def array_split(ary,indices_or_sections,axis = 0):
"""
Split an array into multiple sub-arrays of equal or near-equal size.
Please refer to the `numpy.split` documentation. The only difference
between these functions is that `array_split` allows `indices_or_sections`
to be an integer that does *not* equally divide the axis.
See Also
--------
numpy.split : Split array into multiple sub-arrays.
Examples
--------
>>> x = np.arange(8.0)
>>> np.array_split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7.])]
"""
try:
Ntotal = ary.shape[axis]
except AttributeError:
Ntotal = len(ary)
try: # handle scalar case.
Nsections = len(indices_or_sections) + 1
div_points = [0] + list(indices_or_sections) + [Ntotal]
except TypeError: #indices_or_sections is a scalar, not an array.
Nsections = int(indices_or_sections)
if Nsections <= 0:
raise ValueError, 'number sections must be larger than 0.'
Neach_section,extras = divmod(Ntotal,Nsections)
section_sizes = [0] + \
extras * [Neach_section+1] + \
(Nsections-extras) * [Neach_section]
div_points = _nx.array(section_sizes).cumsum()
sub_arys = []
sary = _nx.swapaxes(ary,axis,0)
for i in range(Nsections):
st = div_points[i]; end = div_points[i+1]
sub_arys.append(_nx.swapaxes(sary[st:end],axis,0))
# there is a wierd issue with array slicing that allows
# 0x10 arrays and other such things. The following cluge is needed
# to get around this issue.
sub_arys = _replace_zero_by_x_arrays(sub_arys)
# end cluge.
return sub_arys
def split(ary,indices_or_sections,axis=0):
"""
Split an array into multiple sub-arrays of equal size.
Parameters
----------
ary : ndarray
Array to be divided into sub-arrays.
indices_or_sections: integer or 1D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If such a split is not possible,
an error is raised.
If `indices_or_sections` is a 1D array of sorted integers, the entries
indicate where along `axis` the array is split. For example,
``[2, 3]`` would, for ``axis = 0``, result in
- ary[:2]
- ary[2:3]
- ary[3:]
If an index exceeds the dimension of the array along `axis`,
an empty sub-array is returned correspondingly.
axis : integer, optional
The axis along which to split. Default is 0.
Returns
-------
sub-arrays : list
A list of sub-arrays.
Raises
------
ValueError
If `indices_or_sections` is given as an integer, but
a split does not result in equal division.
See Also
--------
array_split : Split an array into multiple sub-arrays of equal or
near-equal size. Does not raise an exception if
an equal division cannot be made.
hsplit : Split array into multiple sub-arrays horizontally (column-wise).
vsplit : Split array into multiple sub-arrays vertically (row wise).
dsplit : Split array into multiple sub-arrays along the 3rd axis (depth).
concatenate : Join arrays together.
hstack : Stack arrays in sequence horizontally (column wise).
vstack : Stack arrays in sequence vertically (row wise).
dstack : Stack arrays in sequence depth wise (along third dimension).
Examples
--------
>>> x = np.arange(9.0)
>>> np.split(x, 3)
[array([ 0., 1., 2.]), array([ 3., 4., 5.]), array([ 6., 7., 8.])]
>>> x = np.arange(8.0)
>>> np.split(x, [3, 5, 6, 10])
<BLANKLINE>
[array([ 0., 1., 2.]),
array([ 3., 4.]),
array([ 5.]),
array([ 6., 7.]),
array([], dtype=float64)]
"""
try: len(indices_or_sections)
except TypeError:
sections = indices_or_sections
N = ary.shape[axis]
if N % sections:
raise ValueError, 'array split does not result in an equal division'
res = array_split(ary,indices_or_sections,axis)
return res
def hsplit(ary,indices_or_sections):
"""
Split array into multiple sub-arrays horizontally.
Please refer to the `numpy.split` documentation. `hsplit` is
equivalent to `numpy.split` with ``axis = 1``.
See Also
--------
split : Split array into multiple sub-arrays.
Examples
--------
>>> x = np.arange(16.0).reshape(4, 4)
>>> np.hsplit(x, 2)
<BLANKLINE>
[array([[ 0., 1.],
[ 4., 5.],
[ 8., 9.],
[ 12., 13.]]),
array([[ 2., 3.],
[ 6., 7.],
[ 10., 11.],
[ 14., 15.]])]
>>> np.hsplit(x, array([3, 6]))
<BLANKLINE>
[array([[ 0., 1., 2.],
[ 4., 5., 6.],
[ 8., 9., 10.],
[ 12., 13., 14.]]),
array([[ 3.],
[ 7.],
[ 11.],
[ 15.]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) == 0:
raise ValueError, 'hsplit only works on arrays of 1 or more dimensions'
if len(ary.shape) > 1:
return split(ary,indices_or_sections,1)
else:
return split(ary,indices_or_sections,0)
def vsplit(ary,indices_or_sections):
"""
Split array into multiple sub-arrays vertically.
Please refer to the `numpy.split` documentation.
See Also
--------
numpy.split : The default behaviour of this function implements
`vsplit`.
"""
if len(_nx.shape(ary)) < 2:
raise ValueError, 'vsplit only works on arrays of 2 or more dimensions'
return split(ary,indices_or_sections,0)
def dsplit(ary,indices_or_sections):
"""
Split array into multiple sub-arrays along the 3rd axis (depth).
Parameters
----------
ary : ndarray
An array, with at least 3 dimensions, to be divided into sub-arrays
depth-wise, or along the third axis.
indices_or_sections: integer or 1D array
If `indices_or_sections` is an integer, N, the array will be divided
into N equal arrays along `axis`. If an equal split is not possible,
a ValueError is raised.
if `indices_or_sections` is a 1D array of sorted integers representing
indices along `axis`, the array will be divided such that each index
marks the start of each sub-array. If an index exceeds the dimension of
the array along `axis`, and empty sub-array is returned for that index.
axis : integer, optional
the axis along which to split. Default is 0.
Returns
-------
sub-arrays : list
A list of sub-arrays.
See Also
--------
array_split : Split an array into a list of multiple sub-arrays
of near-equal size.
split : Split array into a list of multiple sub-arrays of equal size.
hsplit : Split array into a list of multiple sub-arrays horizontally
vsplit : Split array into a list of multiple sub-arrays vertically
concatenate : Join arrays together.
hstack : Stack arrays in sequence horizontally (column wise)
vstack : Stack arrays in sequence vertically (row wise)
dstack : Stack arrays in sequence depth wise (along third dimension)
Notes
-----
`dsplit` requires that sub-arrays are of equal shape, whereas
`array_split` allows for sub-arrays to have nearly-equal shape.
Equivalent to `split` with `axis` = 2.
Examples
--------
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> np.dsplit(x, 2)
<BLANKLINE>
[array([[[ 0., 1.],
[ 4., 5.]],
<BLANKLINE>
[[ 8., 9.],
[ 12., 13.]]]),
array([[[ 2., 3.],
[ 6., 7.]],
<BLANKLINE>
[[ 10., 11.],
[ 14., 15.]]])]
<BLANKLINE>
>>> x = np.arange(16.0).reshape(2, 2, 4)
>>> np.dsplit(x, array([3, 6]))
<BLANKLINE>
[array([[[ 0., 1., 2.],
[ 4., 5., 6.]],
<BLANKLINE>
[[ 8., 9., 10.],
[ 12., 13., 14.]]]),
array([[[ 3.],
[ 7.]],
<BLANKLINE>
[[ 11.],
[ 15.]]]),
array([], dtype=float64)]
"""
if len(_nx.shape(ary)) < 3:
raise ValueError, 'vsplit only works on arrays of 3 or more dimensions'
return split(ary,indices_or_sections,2)
def get_array_wrap(*args):
"""Find the wrapper for the array with the highest priority.
In case of ties, leftmost wins. If no wrapper is found, return None
"""
wrappers = [(getattr(x, '__array_priority__', 0), -i,
x.__array_wrap__) for i, x in enumerate(args)
if hasattr(x, '__array_wrap__')]
wrappers.sort()
if wrappers:
return wrappers[-1][-1]
return None
def kron(a,b):
"""
Kronecker product of two arrays.
Computes the Kronecker product, a composite array made of blocks of the
second array scaled by the first.
Parameters
----------
a, b : array_like
Returns
-------
out : ndarray
See Also
--------
outer : The outer product
Notes
-----
The function assumes that the number of dimenensions of `a` and `b`
are the same, if necessary prepending the smallest with ones.
If `a.shape = (r0,r1,..,rN)` and `b.shape = (s0,s1,...,sN)`,
the Kronecker product has shape `(r0*s0, r1*s1, ..., rN*SN)`.
The elements are products of elements from `a` and `b`, organized
explicitly by::
kron(a,b)[k0,k1,...,kN] = a[i0,i1,...,iN] * b[j0,j1,...,jN]
where::
kt = it * st + jt, t = 0,...,N
In the common 2-D case (N=1), the block structure can be visualized::
[[ a[0,0]*b, a[0,1]*b, ... , a[0,-1]*b ],
[ ... ... ],
[ a[-1,0]*b, a[-1,1]*b, ... , a[-1,-1]*b ]]
Examples
--------
>>> np.kron([1,10,100], [5,6,7])
array([ 5, 6, 7, 50, 60, 70, 500, 600, 700])
>>> np.kron([5,6,7], [1,10,100])
array([ 5, 50, 500, 6, 60, 600, 7, 70, 700])
>>> np.kron(np.eye(2), np.ones((2,2)))
array([[ 1., 1., 0., 0.],
[ 1., 1., 0., 0.],
[ 0., 0., 1., 1.],
[ 0., 0., 1., 1.]])
>>> a = np.arange(100).reshape((2,5,2,5))
>>> b = np.arange(24).reshape((2,3,4))
>>> c = np.kron(a,b)
>>> c.shape
(2, 10, 6, 20)
>>> I = (1,3,0,2)
>>> J = (0,2,1)
>>> J1 = (0,) + J # extend to ndim=4
>>> S1 = (1,) + b.shape
>>> K = tuple(np.array(I) * np.array(S1) + np.array(J1))
>>> C[K] == A[I]*B[J]
True
"""
wrapper = get_array_wrap(a, b)
b = asanyarray(b)
a = array(a,copy=False,subok=True,ndmin=b.ndim)
ndb, nda = b.ndim, a.ndim
if (nda == 0 or ndb == 0):
return _nx.multiply(a,b)
as_ = a.shape
bs = b.shape
if not a.flags.contiguous:
a = reshape(a, as_)
if not b.flags.contiguous:
b = reshape(b, bs)
nd = ndb
if (ndb != nda):
if (ndb > nda):
as_ = (1,)*(ndb-nda) + as_
else:
bs = (1,)*(nda-ndb) + bs
nd = nda
result = outer(a,b).reshape(as_+bs)
axis = nd-1
for _ in xrange(nd):
result = concatenate(result, axis=axis)
if wrapper is not None:
result = wrapper(result)
return result
def tile(A, reps):
"""
Construct an array by repeating A the number of times given by reps.
Parameters
----------
A : array_like
The input array.
reps : array_like
The number of repetitions of `A` along each axis.
Returns
-------
c : ndarray
The output array.
See Also
--------
repeat
Notes
-----
If `reps` has length d, the result will have dimension of max(d, `A`.ndim).
If `A`.ndim < d, `A` is promoted to be d-dimensional by prepending new
axes. So a shape (3,) array is promoted to (1,3) for 2-D replication,
or shape (1,1,3) for 3-D replication. If this is not the desired behavior,
promote `A` to d-dimensions manually before calling this function.
If `A`.ndim > d, `reps` is promoted to `A`.ndim by pre-pending 1's to it.
Thus for an `A` of shape (2,3,4,5), a `reps` of (2,2) is treated as
(1,1,2,2).
Examples
--------
>>> a = np.array([0, 1, 2])
>>> np.tile(a, 2)
array([0, 1, 2, 0, 1, 2])
>>> np.tile(a, (2, 2))
array([[0, 1, 2, 0, 1, 2],
[0, 1, 2, 0, 1, 2]])
>>> np.tile(a, (2, 1, 2))
array([[[0, 1, 2, 0, 1, 2]],
<BLANKLINE>
[[0, 1, 2, 0, 1, 2]]])
>>> b = np.array([[1, 2], [3, 4]])
>>> np.tile(b, 2)
array([[1, 2, 1, 2],
[3, 4, 3, 4]])
>>> np.tile(b, (2, 1))
array([[1, 2],
[3, 4],
[1, 2],
[3, 4]])
"""
try:
tup = tuple(reps)
except TypeError:
tup = (reps,)
d = len(tup)
c = _nx.array(A,copy=False,subok=True,ndmin=d)
shape = list(c.shape)
n = max(c.size,1)
if (d < c.ndim):
tup = (1,)*(c.ndim-d) + tup
for i, nrep in enumerate(tup):
if nrep!=1:
c = c.reshape(-1,n).repeat(nrep,0)
dim_in = shape[i]
dim_out = dim_in*nrep
shape[i] = dim_out
n /= max(dim_in,1)
return c.reshape(shape)
| houseind/robothon | GlyphProofer/dist/GlyphProofer.app/Contents/Resources/lib/python2.6/numpy/lib/shape_base.py | Python | mit | 28,902 |
#!/usr/bin/env python3
from build import ninja_common
build = ninja_common.Build("auv_math/libquat")
build.build_c_shared('quat', ['quat.c'])
| cuauv/software | auv_math/libquat/configure.py | Python | bsd-3-clause | 144 |
#!/usr/bin/env python3
# Copyright (c) 2014 Pawel Rozlach, Brainly.com sp. z o.o.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not
# use this file except in compliance with the License. You may obtain a copy of
# the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations under
# the License.
import argparse
import re
from inventory_tool.exception import MalformedInputException
import inventory_tool.object.ippool as i
# For Python3 < 3.3, ipaddress module is available as an extra module,
# under a different name:
try:
from ipaddress import ip_address
from ipaddress import ip_network
except ImportError:
from ipaddr import IPAddress as ip_address
from ipaddr import IPNetwork as ip_network
class KeyWordValidator():
_default_ipaddres_keywords = ["ansible_ssh_host", ]
_ipaddres_keywords = _default_ipaddres_keywords
_default_ipnetwork_keywords = []
_ipnetwork_keywords = _default_ipnetwork_keywords
# These do not change that often:
_connection_keywords = ["local", "ssh", "paramiko", "smart", ]
_default_integer_keywords = ["ansible_ssh_port", ]
_integer_keywords = _default_integer_keywords
@classmethod
def set_extra_ipaddress_keywords(cls, keywords):
cls._ipaddres_keywords = cls._default_ipaddres_keywords + keywords
@classmethod
def is_ipaddress_keyword(cls, keyword):
return keyword in cls._ipaddres_keywords
@classmethod
def get_ipaddress_keywords(cls):
return list(cls._ipaddres_keywords)
@classmethod
def set_extra_ipnetwork_keywords(cls, keywords):
cls._ipnetwork_keywords = cls._default_ipnetwork_keywords + keywords
@classmethod
def is_ipnetwork_keyword(cls, keyword):
return keyword in cls._ipnetwork_keywords
@classmethod
def get_ipnetwork_keywords(cls):
return list(cls._ipnetwork_keywords)
@classmethod
def is_connection_keyword(cls, keyword):
return keyword in cls._connection_keywords
@classmethod
def get_connection_keywords(cls):
return list(cls._connection_keywords)
@classmethod
def set_extra_integer_keywords(cls, keywords):
cls._integer_keywords = cls._default_integer_keywords + keywords
@classmethod
def is_integer_keyword(cls, keyword):
return keyword in cls._integer_keywords
class HostnameParser():
_backend_domain = None
@classmethod
def normalize_hostname(cls, name):
"""Remove backend domain from hostname
Args:
name: hostname to standarize
Returns:
Hostname relative to cls._backend_domain. Function returns
unchanged string if hostname was absolute (but in different
domain), or already relative.
"""
if cls._backend_domain is None:
raise MalformedInputException("Backend domain has not been set yet")
if cls._backend_domain in name and name[-1] != '.':
msg = "{0} contains default backend domain, append '.' to the end " + \
"to force absolute dns names"
raise MalformedInputException(msg.format(name))
suffix = '.' + cls._backend_domain + '.'
if re.search(suffix + '$', name):
return name[:-len(suffix)]
else:
return name
@classmethod
def set_backend_domain(cls, domain):
cls._backend_domain = domain
def get_ippool(string):
"""Parse network string into IPPool object
Parses an IPv4/6 network string and creates an IPPool object basing on it.
Args:
string: string to parse
Returns:
An empty IPPool object (i.e. without any allocated/reserved IP)
Raises:
argparse.ArgumentTypeError: string does not represent a valid ipv4/6
network.
"""
try:
tmp = i.IPPool(string)
except ValueError as e:
msg = "IPPool network requires proper " + \
"ipv4/ipv6 network as a value: " + str(e)
raise argparse.ArgumentTypeError(msg)
return tmp
def get_ipaddr(string):
"""Parse ip address string into ipaddress.ip_address object
Parses an IPv4/6 address string and creates an ipaddress.ip_address object
basing on it.
Args:
string: string to parse
Returns:
An ipaddress.ip_address object.
Raises:
argparse.ArgumentTypeError: string does not represent a valid ipv4/6
address.
"""
try:
tmp = ip_address(string)
except ValueError as e:
msg = "A valid ipv4/ipv6 addreess is required: " + str(e)
raise argparse.ArgumentTypeError(msg)
return tmp
def get_fqdn(string):
"""Check whether string is valid domain name.
If string is not a valid domain name, then exceptions are raised, otherwise
string is returned unchanged.
Args:
string: string to parse
Returns:
String passed via "string" parameter.
Raises:
argparse.ArgumentTypeError: string does not represent a valid domain
name or is a relative name and contains backend_domain string.
"""
match = re.match(r'(([a-z0-9]\-*[a-z0-9]*){1,63}\.?){1,255}$', string)
if not match:
msg = "{0} is not proper domain name.".format(string)
raise argparse.ArgumentTypeError(msg)
try:
return HostnameParser.normalize_hostname(string)
except MalformedInputException as e:
raise argparse.ArgumentTypeError(str(e)) from e
def get_name(string):
"""Checks if string is a valid name (group/host/ippool/etc...)
If string is an invalid name, then exceptions are raised, otherwise
string is returned unchanged.
Args:
string: string to parse
Returns:
String passed via "string" parameter.
Raises:
argparse.ArgumentTypeError: string does not represent a valid
name.
"""
match = re.match(r'[\w\-\.]{2,}$', string)
if not match:
msg = "{0} is not proper name.".format(string)
raise argparse.ArgumentTypeError(msg)
return string
def get_keyval(string):
"""Parse a key-value string into object.
Function validates key-value string depending on the key and
retrns parsed objects.
Args:
string: a string in "key:val" format or just "key" in case of auto-
generated variables.
Returns:
A hash with key "key" being the parsed key and the key "val" being
the parsed value or None in case of auto-generated vars.
Raises:
argparse.ArgumentTypeError: Key and/or val are malformed or the parsed
val does not conform to key requirements.
"""
match = re.match(r'([\w\-]{2,})(?::([\w\-\./\\\@]{2,}))?$', string)
if not match:
msg = "{0} is not proper key-val argument.".format(string)
raise argparse.ArgumentTypeError(msg)
ret = {"key": match.group(1), "val": match.group(2)}
# Integer type k-vals:
if KeyWordValidator.is_integer_keyword(ret["key"]):
try:
ret["val"] = int(ret["val"])
return ret
except ValueError:
msg = "Key param {0}".format(ret["key"])
msg += " requires integer as a value."
raise argparse.ArgumentTypeError(msg)
# ipaddress type k-vals:
if KeyWordValidator.is_ipaddress_keyword(ret["key"]):
# Will be auto-assigned later on:
if ret["val"] is None:
return ret
ret["val"] = get_ipaddr(ret["val"])
return ret
# ipnetwork type k-vals:
if KeyWordValidator.is_ipnetwork_keyword(ret["key"]):
try:
ret["val"] = ip_network(ret["val"])
return ret
except ValueError as e:
msg = "Key param {0} requires proper ".format(ret["key"]) + \
"ipv4/ipv6 network as a value: " + str(e)
raise argparse.ArgumentTypeError(msg)
# ansible_connection type k-val:
if ret["key"] == "ansible_connection":
if not KeyWordValidator.is_connection_keyword(ret["val"]):
msg = "Key param {0} requires ".format(ret["key"]) + \
"one of following connection types: " + \
','.join(KeyWordValidator.get_connection_keywords()) + \
" as a value."
raise argparse.ArgumentTypeError(msg)
else:
return ret
# ansible_ssh_user and others:
return ret
| vespian/inventory_tool | inventory_tool/validators.py | Python | apache-2.0 | 8,724 |
import base64
def model_instance_id_to_base64(instance):
return base64_encode(f'{instance.__class__.__name__}Node:{instance.id}')
def base64_encode(string):
return base64.b64encode(bytes(string, 'utf-8')).decode('utf-8')
| pity7736/olimpiadas | utils/__init__.py | Python | gpl-3.0 | 233 |
__author__ = "Tiago Gomes"
__license__ = "GPL"
__version__ = "0.0.1"
__email__ = "tiago.gomes@ufms.br"
import matplotlib.pyplot as plt
def ler_modelos(arquivo):
x = []
y = []
dataset = open(arquivo, 'r')
for line in dataset:
line = line.strip()
X, Y = line.split(',')
x.append(X)
y.append(Y)
dataset.close()
return x, y
fig = plt.figure()
rect = fig.patch
rect.set_facecolor('w')
x, y = ler_modelos('iasp91.csv')
#x2, y2 = ler_modelos('newbr.csv')
#x3, y3 = ler_modelos('newbr_modificado.csv')
ax1 = fig.add_subplot(1, 1, 1, axisbg='w')
plt.subplots_adjust(bottom=0.20, right=0.65, top=0.80, left=0.35)
ax1.plot(x, y, 'c', linewidth=2.5, linestyle='-', label= 'IASPEI')
#ax1.plot(x2, y2, 'r', linewidth=2.5, linestyle='-', label= 'NewBR')
#ax1.plot(x3, y3, 'b', linewidth=2.5, linestyle='--', label= 'Pantanal')
ax1.legend(loc='lower left')
#ax1.set_title('Modelos de Velocidade IASPEI e NewBR')
ax1.set_xlabel('Velocidade Vp em Km/s')
ax1.set_ylabel('Profundidade h em Km')
ax1.set_ylim(ymin=0, ymax=60)
ax1.scale_x = 1e-9
ax1.scale_y = 1e3
#ax1 = plt.gca()
ax1.invert_yaxis()
ax1.set_xlim(xmin=5)
ax1.grid()
# Add seta na descontinuidade
ax1.annotate('Descontinuidade', xy=(7, 30), xycoords='data', xytext=(0.6, 0.6), textcoords='axes fraction',
arrowprops=dict(facecolor='black', shrink=0.05),
horizontalalignment='left', verticalalignment='bottom', )
plt.show()
| gomes310/1D_Velocity_Model | 1DVelocityModel.py | Python | gpl-3.0 | 1,468 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.