code
stringlengths 2
1.05M
| repo_name
stringlengths 5
104
| path
stringlengths 4
251
| language
stringclasses 1
value | license
stringclasses 15
values | size
int32 2
1.05M
|
|---|---|---|---|---|---|
# Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Milan input generator."""
import functools
import re
from absl import logging
from lingvo import compat as tf
from lingvo.core import base_input_generator
from lingvo.core import py_utils
class MilanInputGenerator(base_input_generator.BaseInputGenerator):
"""Common input generator for Milan.
This class mostly wraps a user-provided `dataset_fn`, which when called
returns a `tf.data.Dataset` of batched examples to use as input. The function
must be callable with a batch_size argument, as
```dataset = p.dataset_fn(batch_size=42, **p.dataset_fn_kwargs)```.
The `preprocessors` param enables features to be transformed through a layer
before being fed to the model. These are configured as a map of feature name
to layer params. For example, setting ::
preprecessors['foo'] = FooPreprocessor.Params()
causes feature `foo` to be replaced with the output of `FooPreprocessor`.
"""
@classmethod
def Params(cls):
"""Returns `Params` object for configuring this input generator.
Callers must set `dataset_fn` before before instantiating the input
generator.
"""
p = super().Params()
p.Define(
'dataset_fn', None, 'Function that constructs a tf.data.Dataset '
'of input examples. Must be callable as: '
'dataset_fn(batch_size=42, **dataset_fn_kwargs).')
p.Define(
'dataset_fn_kwargs', {}, 'Dict of kwargs to pass to dataset_fn(), '
'e.g. to override default options. May not contain "batch_size".')
p.Define(
'features_to_read', [], 'Regular expression(s) of feature names. '
'If empty, defaults to all features.')
p.Define('preprocessors', {},
'Dictionary of input_feature_name => layer_params.')
p.Define('preprocess_parallelism', tf.data.experimental.AUTOTUNE,
'Number of batches to preprocess in parallel.')
# Set reasonable defaults.
p.name = 'milan_input_generator'
p.batch_size = 32
return p
def __init__(self, params):
super().__init__(params)
p = self.params
if 'batch_size' in p.dataset_fn_kwargs:
raise ValueError('dataset_fn_kwargs may not contain "batch_size".')
if not isinstance(p.features_to_read, (tuple, list, type(None))):
raise ValueError(
'Expected sequence type for "features_to_read"; got {}'.format(
type(p.features_to_read)))
if p.preprocessors:
self._preprocessor_input_names, preprocessor_layer_params = list(
zip(*list(p.preprocessors.items())))
self.CreateChildren('_preprocessors', list(preprocessor_layer_params))
def GetPreprocessedInputBatch(self):
p = self.params
# Dataset of parsed examples.
dataset = p.dataset_fn(
batch_size=self.InfeedBatchSize(), **p.dataset_fn_kwargs)
dataset = dataset.map(
# Force retracing if self.do_eval changes.
functools.partial(self._PreprocessInputBatch, do_eval=self.do_eval),
num_parallel_calls=p.preprocess_parallelism)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
iterator = dataset.make_one_shot_iterator()
input_batch = iterator.get_next()
return input_batch
def _FilterFeaturesByName(self, features):
p = self.params
if not p.features_to_read:
return features
union_regex = re.compile('({})'.format('|'.join(p.features_to_read)))
return features.FilterKeyVal(lambda k, _: union_regex.match(k))
@tf.function(experimental_relax_shapes=True)
def _PreprocessInputBatch(self, input_batch, do_eval: bool):
del do_eval # Only exists to force separate train/eval mode traces.
input_batch = py_utils.NestedMap(input_batch)
input_batch = self._FilterFeaturesByName(input_batch)
# Apply preprocessors.
if self.params.preprocessors:
for input_name, preprocessor in zip(self._preprocessor_input_names,
self._preprocessors):
input_batch[input_name] = preprocessor(input_batch[input_name])
# Remove any string features if training on TPU.
if py_utils.use_tpu():
input_batch = input_batch.Filter(lambda t: t.dtype != tf.string)
logging.info('Final input batch: %s', input_batch)
return input_batch
|
tensorflow/lingvo
|
lingvo/tasks/milan/input_generator.py
|
Python
|
apache-2.0
| 4,936
|
# coding=utf-8
import json
import time
import sys
import urllib
import urllib2
from bs4 import BeautifulSoup
try:
import settings
except ImportError:
print("Rename settings.example to settings.py")
sys.exit(0)
def get_playlist(page):
html = urllib2.urlopen(page).read()
data = BeautifulSoup(html)
tracks = []
for one_track in data.find_all('div', attrs={'class': 'track'}):
author = one_track.contents[1].string
track = one_track.contents[2]
tracks.append("{0} {1}".format(author, track))
return tracks
def save_tracks(tracks=None):
try:
for one_track in tracks:
payload = {
'q': one_track,
'access_token': settings.VK_TOKEN
}
request = urllib2.Request(url="https://api.vk.com/method/audio.search?" + urllib.urlencode(payload))
data = json.loads(urllib2.urlopen(request).read())
if data['response'][0] > 0 and data['response'][1]['url'] is not None:
file_name = "{0} - {1}".format(data['response'][1]['artist'].encode('utf-8'), data['response'][1]['title'].encode('utf-8'))
file_url = data['response'][1]['url']
print("Downloading {0}".format(file_name))
urllib.urlretrieve(file_url, settings.SAVE_PATH + "/" + file_name + ".mp3")
else:
print("NOT FOUND — {0}".format(one_track))
time.sleep(0.4)
except TypeError:
print("Empty tracklist")
if __name__ == "__main__":
if settings.VK_TOKEN == "":
print("Open {0} in browser, get token from url and put to variable VK_LINK".format(settings.VK_LINK))
sys.exit(0)
tracklist = get_playlist(settings.ARISTOCRATS_PlAYLIST)
save_tracks(tracklist)
|
Vadimkin/aristocrats-to-mp3
|
run.py
|
Python
|
apache-2.0
| 1,808
|
# Copyright 2019 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sympy
import cirq
from cirq.interop.quirk.cells.testing import assert_url_to_circuit_returns
def test_frequency_space_gates():
a, b, c = cirq.LineQubit.range(3)
assert_url_to_circuit_returns(
'{"cols":[["QFT3"]]}',
cirq.Circuit(
cirq.qft(a, b, c),
),
)
assert_url_to_circuit_returns(
'{"cols":[["QFT†3"]]}',
cirq.Circuit(
cirq.inverse(cirq.qft(a, b, c)),
),
)
assert_url_to_circuit_returns(
'{"cols":[["PhaseGradient3"]]}',
cirq.Circuit(
cirq.PhaseGradientGate(num_qubits=3, exponent=0.5)(a, b, c),
),
)
assert_url_to_circuit_returns(
'{"cols":[["PhaseUngradient3"]]}',
cirq.Circuit(
cirq.PhaseGradientGate(num_qubits=3, exponent=-0.5)(a, b, c),
),
)
t = sympy.Symbol('t')
assert_url_to_circuit_returns(
'{"cols":[["grad^t2"]]}',
cirq.Circuit(
cirq.PhaseGradientGate(num_qubits=2, exponent=2 * t)(a, b),
),
)
assert_url_to_circuit_returns(
'{"cols":[["grad^t3"]]}',
cirq.Circuit(
cirq.PhaseGradientGate(num_qubits=3, exponent=4 * t)(a, b, c),
),
)
assert_url_to_circuit_returns(
'{"cols":[["grad^-t3"]]}',
cirq.Circuit(
cirq.PhaseGradientGate(num_qubits=3, exponent=-4 * t)(a, b, c),
),
)
|
quantumlib/Cirq
|
cirq-core/cirq/interop/quirk/cells/frequency_space_cells_test.py
|
Python
|
apache-2.0
| 2,005
|
from django.conf import settings
from importlib import import_module
from template_preprocess.util.loader import Loader
from template_preprocess.util.content_type import filename_is_html
def process_sub_template(name, seen_templates):
content = Loader().get_template_content(name)
is_html = filename_is_html(name)
return process_template_content(content,
seen_templates,
subcall=True,
is_html=is_html)
def process_template_content(content,
seen_templates=None,
subcall=False,
is_html=False):
# The basic strategy here is to build the template up to it's full
# included/extended size, then work on the minimizing or precomputing
# content from there. That makes it multi-pass, but it avoids having a
# dependency order.
# If anything fails, just return the original template. Worse case is
# django's default behavior.
if seen_templates is None:
seen_templates = {}
original_content = content
processors = get_processors()
for processor in processors:
try:
method = processor["method"]
only_html = processor["html_only"]
if only_html and not is_html:
continue
content = method(content,
seen_templates=seen_templates,
template_processor=process_sub_template,
)
except Exception as ex:
# We want to return the original template content if there are any
# errors. if we're processing an include/extended template, we
# need to kick it back another level
if subcall:
raise
return original_content
return content
def get_default_config():
return [
{"method": "template_preprocess.process.extends.handle_extends"},
{"method": "template_preprocess.process.includes.handle_includes"},
{"method": "template_preprocess.process.compress_statics.process",
"html_only": True
},
{"method": "template_preprocess.process.html_minify.process",
"html_only": True
},
{"method": "template_preprocess.process.static.handle_static_tag",
"html_only": True
},
# minify won't minify content in <script> tags, so this needs
# to be the last thing done
{"method": "template_preprocess.process.handlebars.process"},
]
def get_processors():
config = getattr(settings,
"TEMPLATE_PREPROCESS_PROCESSORS",
get_default_config())
processors = []
for value in config:
name = value["method"]
module, attr = name.rsplit('.', 1)
try:
mod = import_module(module)
except ImportError as e:
raise ImproperlyConfigured('Error importing module %s: "%s"' %
(module, str(e)))
try:
method = getattr(mod, attr)
except AttributeError:
raise ImproperlyConfigured('Module "%s" does not define a '
'"%s" method' % (module, attr))
processor = {"method": method, "html_only": False}
if "html_only" in value and value["html_only"]:
processor["html_only"] = True
processors.append(processor)
return processors
|
vegitron/django-template-preprocess
|
template_preprocess/processor.py
|
Python
|
apache-2.0
| 3,634
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: skip-file
import sys
import os
import mxnet as mx
import numpy as np
import unittest
from mxnet.test_utils import assert_almost_equal, default_context
curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__)))
sys.path.insert(0, os.path.join(curr_path, '../unittest'))
from common import setup_module, with_seed, teardown
shape = (4, 4)
keys = [5, 7, 11]
str_keys = ['b', 'c', 'd']
def init_kv_with_str(stype='default', kv_type='local'):
"""init kv """
kv = mx.kv.create(kv_type)
# single
kv.init('a', mx.nd.zeros(shape, stype=stype))
# list
kv.init(str_keys, [mx.nd.zeros(shape=shape, stype=stype)] * len(keys))
return kv
# Test seed 89411477 (module seed 1829754103) resulted in a py3-gpu CI runner core dump.
# Not reproducible, so this test is back on random seeds.
@with_seed()
def test_rsp_push_pull():
def check_rsp_push_pull(kv_type, is_push_cpu=True):
kv = init_kv_with_str('row_sparse', kv_type)
kv.init('e', mx.nd.ones(shape).tostype('row_sparse'))
push_ctxs = [mx.cpu(i) if is_push_cpu else mx.gpu(i) for i in range(2)]
kv.push('e', [mx.nd.ones(shape, ctx=context).tostype('row_sparse') for context in push_ctxs])
def check_rsp_pull(kv, count, ctxs, is_same_rowid=False, use_slice=False):
num_rows = shape[0]
row_ids = []
all_row_ids = np.arange(num_rows)
vals = [mx.nd.sparse.zeros(shape=shape, ctx=ctxs[i], stype='row_sparse') for i in range(count)]
if is_same_rowid:
row_id = np.random.randint(num_rows, size=num_rows)
row_ids = [mx.nd.array(row_id)] * count
elif use_slice:
total_row_ids = mx.nd.array(np.random.randint(num_rows, size=count*num_rows))
row_ids = [total_row_ids[i*num_rows : (i+1)*num_rows] for i in range(count)]
else:
for i in range(count):
row_id = np.random.randint(num_rows, size=num_rows)
row_ids.append(mx.nd.array(row_id))
row_ids_to_pull = row_ids[0] if (len(row_ids) == 1 or is_same_rowid) else row_ids
vals_to_pull = vals[0] if len(vals) == 1 else vals
kv.row_sparse_pull('e', out=vals_to_pull, row_ids=row_ids_to_pull)
for val, row_id in zip(vals, row_ids):
retained = val.asnumpy()
excluded_row_ids = np.setdiff1d(all_row_ids, row_id.asnumpy())
for row in range(num_rows):
expected_val = np.zeros_like(retained[row])
expected_val += 0 if row in excluded_row_ids else 2
assert_almost_equal(retained[row], expected_val)
check_rsp_pull(kv, 1, [mx.gpu(0)])
check_rsp_pull(kv, 1, [mx.cpu(0)])
check_rsp_pull(kv, 4, [mx.gpu(i//2) for i in range(4)])
check_rsp_pull(kv, 4, [mx.gpu(i//2) for i in range(4)], is_same_rowid=True)
check_rsp_pull(kv, 4, [mx.cpu(i) for i in range(4)])
check_rsp_pull(kv, 4, [mx.cpu(i) for i in range(4)], is_same_rowid=True)
check_rsp_pull(kv, 4, [mx.gpu(i//2) for i in range(4)], use_slice=True)
check_rsp_pull(kv, 4, [mx.cpu(i) for i in range(4)], use_slice=True)
# test fails intermittently. temporarily disabled till it gets fixed. tracked at https://github.com/apache/incubator-mxnet/issues/9384
# check_rsp_push_pull('local')
check_rsp_push_pull('device')
check_rsp_push_pull('device', is_push_cpu=False)
def test_row_sparse_pull_single_device():
kvstore = mx.kv.create('device')
copy = mx.nd.random_normal(shape=(4,4), ctx=mx.gpu(0))
grad = copy.tostype("row_sparse")
key = 0
kvstore.init(key, grad)
idx = grad.indices
kvstore.push(key, grad)
kvstore.row_sparse_pull(key, out=grad, row_ids=idx)
assert_almost_equal(grad.asnumpy(), copy.asnumpy())
def test_rsp_push_pull_large_rowid():
num_rows = 793470
val = mx.nd.ones((num_rows, 1)).tostype('row_sparse').copyto(mx.gpu())
kv = mx.kv.create('device')
kv.init('a', val)
out = mx.nd.zeros((num_rows,1), stype='row_sparse').copyto(mx.gpu())
kv.push('a', val)
kv.row_sparse_pull('a', out=out, row_ids=mx.nd.arange(0, num_rows, dtype='int64'))
assert(out.indices.shape[0] == num_rows)
if __name__ == '__main__':
import nose
nose.runmodule()
|
navrasio/mxnet
|
tests/python/gpu/test_kvstore_gpu.py
|
Python
|
apache-2.0
| 5,174
|
"""
This Program is being written to incorporate all
things Python that I have learned thus far.
I am writing this in windows notepad to better test
my ablility to do proper spacing and indentions
without having a proper tool that will handle small
things for me.
I'm unsure at the time of this writing where this
program will go. My main goal is or it to just work.
Created by: Jason R. Pittman
Creation start Date: 2/22/2016 10:18am
"""
from datetime import datetime
from time import sleep
from math import *
print "Initializing application.............."
Sleep(3)
print "Welcome to Python Adventures............"
sleep(4)
User_name =raw_input("Please enter your name so I know whom to refer to: "
work_bag = [
"Laptop",
"SSD",
"FlashDrive",
"Binder",
"Pens",
"Name Badge"
]
lunch_box = [
"apple,
"soup",
"Ukrop's rolls",
"mountain dew"
]
preparing_for_work = [
"Shower",
"Brush Teeth",
"get dressed"
]
def getting_ready():
print "Good morning " + User_name + " it's time to get ready for work!"
answer = user_input("Are you going to work today? yes or no please: "
if answer.lower() is == "yes":
print "OK make sure you do these things! " + preparing_for_work
else:
print "Well enjoy your day off and we will se you next time!"
return
getting_ready()
|
kejrp23/Python
|
combination.py
|
Python
|
artistic-2.0
| 1,359
|
#!/usr/bin/env python
"""
clustal2phylip.py <input filename> <output filename>
Author: Tony Papenfuss
Date: Mon Jan 8 11:44:37 EST 2007
"""
import os, sys
from optparse import OptionParser
from mungo.align import Alignment
usage = "%prog [options] <input file> <output file>"
parser = OptionParser(usage=usage)
options, args = parser.parse_args(sys.argv)
if len(args)!=3:
sys.exit(__doc__)
iFilename = args[1]
oFilename = args[2]
alignment = Alignment.load(iFilename, format='fasta')
alignment.save(oFilename, format='phylip')
|
PapenfussLab/Mungo
|
bin/fasta2phylip.py
|
Python
|
artistic-2.0
| 541
|
# -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, Anima Istanbul
#
# This module is part of anima-tools and is released under the BSD 2
# License: http://www.opensource.org/licenses/BSD-2-Clause
import sys
import shutil
import tempfile
import logging
import unittest
from anima.env.testing import TestEnvironment
logger = logging.getLogger('anima.ui.version_creator')
logger.setLevel(logging.DEBUG)
from stalker.models.auth import LocalSession
from anima.ui import IS_PYSIDE, IS_PYQT4, SET_PYSIDE, version_creator
SET_PYSIDE()
if IS_PYSIDE():
logger.debug('environment is set to pyside, importing pyside')
from PySide import QtCore, QtGui
from PySide.QtTest import QTest
from PySide.QtCore import Qt
elif IS_PYQT4():
logger.debug('environment is set to pyqt4, importing pyqt4')
import sip
sip.setapi('QString', 2)
sip.setapi('QVariant', 2)
from PyQt4 import QtCore, QtGui
from PyQt4.QtTest import QTest
from PyQt4.QtCore import Qt
from stalker import (db, defaults, User, Project, Repository, Structure,
Status, StatusList, Task, Group, Version)
class VersionCreatorTester(unittest.TestCase):
"""Tests the Version Creator instance
"""
repo_path = ''
@classmethod
def setUpClass(cls):
"""setup once
"""
# remove the transaction manager
db.DBSession.remove()
cls.repo_path = tempfile.mkdtemp()
defaults.local_storage_path = tempfile.mktemp()
db.setup({
'sqlalchemy.url': 'sqlite:///:memory:',
'sqlalchemy.echo': 'false'
})
db.init()
# create Power Users Group
cls.power_users_group = Group(name='Power Users')
db.DBSession.add(cls.power_users_group)
db.DBSession.commit()
# create a LocalSession first
cls.admin = User.query.all()[0]
cls.lsession = LocalSession()
cls.lsession.store_user(cls.admin)
cls.lsession.save()
# create a repository
cls.test_repo1 = Repository(
name='Test Repository',
windows_path='T:/TestRepo/',
linux_path='/mnt/T/TestRepo/',
osx_path='/Volumes/T/TestRepo/'
)
cls.test_structure1 = Structure(
name='Test Project Structure',
templates=[],
custom_template=''
)
cls.status_new = Status.query.filter_by(code='NEW').first()
cls.status_wip = Status.query.filter_by(code='WIP').first()
cls.status_cmpl = Status.query.filter_by(code='CMPL').first()
cls.project_status_list = StatusList(
name='Project Statuses',
statuses=[cls.status_new, cls.status_wip, cls.status_cmpl],
target_entity_type=Project
)
# create a couple of projects
cls.test_project1 = Project(
name='Project 1',
code='P1',
repository=cls.test_repo1,
structure=cls.test_structure1,
status_list=cls.project_status_list
)
cls.test_project2 = Project(
name='Project 2',
code='P2',
repository=cls.test_repo1,
structure=cls.test_structure1,
status_list=cls.project_status_list
)
cls.test_project3 = Project(
name='Project 3',
code='P3',
repository=cls.test_repo1,
structure=cls.test_structure1,
status_list=cls.project_status_list
)
cls.projects = [
cls.test_project1,
cls.test_project2,
cls.test_project3
]
cls.test_user1 = User(
name='Test User',
# groups=[self.power_users_group],
login='tuser',
email='tuser@tusers.com',
password='secret'
)
db.DBSession.add(cls.test_user1)
db.DBSession.commit()
cls.admin.projects.append(cls.test_project1)
cls.admin.projects.append(cls.test_project2)
cls.admin.projects.append(cls.test_project3)
cls.test_user1.projects.append(cls.test_project1)
cls.test_user1.projects.append(cls.test_project2)
cls.test_user1.projects.append(cls.test_project3)
# project 1
cls.test_task1 = Task(
name='Test Task 1',
project=cls.test_project1,
resources=[cls.admin],
)
cls.test_task2 = Task(
name='Test Task 2',
project=cls.test_project1,
resources=[cls.admin],
)
cls.test_task3 = Task(
name='Test Task 2',
project=cls.test_project1,
resources=[cls.admin],
)
# project 2
cls.test_task4 = Task(
name='Test Task 4',
project=cls.test_project2,
resources=[cls.admin],
)
cls.test_task5 = Task(
name='Test Task 5',
project=cls.test_project2,
resources=[cls.admin],
)
cls.test_task6 = Task(
name='Test Task 6',
parent=cls.test_task5,
resources=[cls.admin],
)
cls.test_task7 = Task(
name='Test Task 7',
parent=cls.test_task5,
resources=[],
)
cls.test_task8 = Task(
name='Test Task 8',
parent=cls.test_task5,
resources=[],
)
cls.test_task9 = Task(
name='Test Task 9',
parent=cls.test_task5,
resources=[],
)
# +-> Project 1
# | |
# | +-> Task1
# | |
# | +-> Task2
# | |
# | +-> Task3
# |
# +-> Project 2
# | |
# | +-> Task4
# | |
# | +-> Task5
# | |
# | +-> Task6
# | |
# | +-> Task7 (no resource)
# | |
# | +-> Task8 (no resource)
# | |
# | +-> Task9 (no resource)
# |
# +-> Project 3
# record them all to the db
db.DBSession.add_all([
cls.admin, cls.test_project1, cls.test_project2, cls.test_project3,
cls.test_task1, cls.test_task2, cls.test_task3, cls.test_task4,
cls.test_task5, cls.test_task6, cls.test_task7, cls.test_task8,
cls.test_task9
])
db.DBSession.commit()
cls.all_tasks = [
cls.test_task1, cls.test_task2, cls.test_task3, cls.test_task4,
cls.test_task5, cls.test_task6, cls.test_task7, cls.test_task8,
cls.test_task9
]
# create versions
cls.test_version1 = Version(
cls.test_task1,
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
db.DBSession.add(cls.test_version1)
db.DBSession.commit()
cls.test_version2 = Version(
cls.test_task1,
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
db.DBSession.add(cls.test_version2)
db.DBSession.commit()
cls.test_version3 = Version(
cls.test_task1,
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
cls.test_version3.is_published = True
db.DBSession.add(cls.test_version3)
db.DBSession.commit()
cls.test_version4 = Version(
cls.test_task1,
take_name='Main@GPU',
created_by=cls.admin,
created_with='Test',
description='Test Description'
)
cls.test_version4.is_published = True
db.DBSession.add(cls.test_version4)
db.DBSession.commit()
if not QtGui.QApplication.instance():
logger.debug('creating a new QApplication')
cls.app = QtGui.QApplication(sys.argv)
else:
logger.debug('using the present QApplication: %s' % QtGui.qApp)
# self.app = QtGui.qApp
cls.app = QtGui.QApplication.instance()
# cls.test_environment = TestEnvironment()
cls.dialog = version_creator.MainDialog()
# environment=cls.test_environment
# )
@classmethod
def tearDownClass(cls):
"""teardown once
"""
shutil.rmtree(
defaults.local_storage_path,
True
)
shutil.rmtree(cls.repo_path)
# configure with transaction manager
db.DBSession.remove()
def show_dialog(self, dialog):
"""show the given dialog
"""
dialog.show()
self.app.exec_()
self.app.connect(
self.app,
QtCore.SIGNAL("lastWindowClosed()"),
self.app,
QtCore.SLOT("quit()")
)
def test_close_button_closes_ui(self):
"""testing if the close button is closing the ui
"""
self.dialog.show()
self.assertEqual(self.dialog.isVisible(), True)
# now run the UI
QTest.mouseClick(self.dialog.close_pushButton, Qt.LeftButton)
self.assertEqual(self.dialog.isVisible(), False)
def test_login_dialog_is_shown_if_there_are_no_logged_in_user(self):
"""testing if the login dialog is shown if there is no logged in user
"""
self.fail("Test is not implemented yet")
def test_logged_in_user_field_is_updated_correctly(self):
"""testing if the logged_in_user field is updated correctly
"""
# now expect to see the admin.name on the dialog.logged_in_user_label
self.assertEqual(
self.dialog.logged_in_user_label.text(),
self.admin.name
)
def test_logout_button_shows_the_login_dialog(self):
"""logout dialog shows the login_dialog
"""
self.fail('test is not implemented yet')
def test_tasks_tree_view_is_filled_with_projects(self):
"""testing if the tasks_treeView is filled with projects as root
level items
"""
# now call the dialog and expect to see all these projects as root
# level items in tasks_treeView
self.assertEqual(
len(self.admin.tasks),
5
)
task_tree_model = self.dialog.tasks_treeView.model()
row_count = task_tree_model.rowCount()
self.assertEqual(3, row_count)
index = task_tree_model.index(0, 0)
p1_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p1_item.task, self.test_project1)
index = task_tree_model.index(1, 0)
p2_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p2_item.task, self.test_project2)
index = task_tree_model.index(2, 0)
p3_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p3_item.task, self.test_project3)
# self.show_dialog(dialog)
def test_tasks_tree_view_lists_all_tasks_properly(self):
"""testing if the tasks_treeView lists all the tasks properly
"""
task_tree_model = self.dialog.tasks_treeView.model()
row_count = task_tree_model.rowCount()
self.assertEqual(3, row_count)
# project1
index = task_tree_model.index(0, 0)
p1_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p1_item.task, self.test_project1)
# project2
index = task_tree_model.index(1, 0)
p2_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p2_item.task, self.test_project2)
# project3
index = task_tree_model.index(2, 0)
p3_item = task_tree_model.itemFromIndex(index)
self.assertEqual(p3_item.task, self.test_project3)
# self.show_dialog(self.dialog)
# task1
task1_item = p1_item.child(0, 0)
self.assertEqual(task1_item.task, self.test_task1)
def test_tasks_treeView_lists_only_my_tasks_if_checked(self):
"""testing if the tasks_treeView lists only my tasks if
my_tasks_only_checkBox is checked
"""
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
# check show my tasks only check box
self.dialog.my_tasks_only_checkBox.setChecked(True)
# check if all my tasks are represented in the tree
my_tasks = self.admin.tasks
# generate a list of parent tasks
all_my_parent_tasks = []
for task in my_tasks:
all_my_parent_tasks += task.parents
all_my_parent_tasks = list(set(all_my_parent_tasks))
for task in my_tasks:
self.dialog.find_and_select_entity_item_in_treeView(
task,
self.dialog.tasks_treeView
)
# get the current selection
self.assertEqual(
task,
self.dialog.get_task_id()
)
# check if non of the other tasks or their parents are visible
for task in self.all_tasks:
if task not in my_tasks and task not in all_my_parent_tasks:
self.dialog.find_and_select_entity_item_in_treeView(
task,
self.dialog.tasks_treeView
)
# get the current selection
self.assertTrue(self.dialog.get_task_id() is None)
# now un check it and check if all tasks are shown
self.dialog.my_tasks_only_checkBox.setChecked(False)
# check if all the tasks are present in the tree
for task in self.all_tasks:
self.dialog.find_and_select_entity_item_in_treeView(
task,
self.dialog.tasks_treeView
)
# get the current selection
self.assertEqual(self.dialog.get_task_id(), task)
def test_takes_listWidget_lists_Main_by_default(self):
"""testing if the takes_listWidget lists "Main" by default
"""
dialog = version_creator.MainDialog()
self.assertEqual(
defaults.version_take_name,
dialog.takes_listWidget.currentItem().text()
)
def test_takes_listWidget_lists_Main_by_default_for_tasks_with_no_versions(self):
"""testing if the takes_listWidget lists "Main" by default for a task
with no version
"""
# now call the dialog and expect to see all these projects as root
# level items in tasks_treeView
dialog = version_creator.MainDialog()
# self.show_dialog(dialog)
self.assertEqual(
defaults.version_take_name,
dialog.takes_listWidget.currentItem().text()
)
def test_takes_listWidget_lists_Main_by_default_for_projects_with_no_tasks(self):
"""testing if the takes_listWidget lists "Main" by default for a
project with no tasks
"""
# now call the dialog and expect to see all these projects as root
# level items in tasks_treeView
dialog = version_creator.MainDialog()
# self.show_dialog(dialog)
self.assertEqual(
defaults.version_take_name,
dialog.takes_listWidget.currentItem().text()
)
def test_tasks_treeView_tasks_are_sorted(self):
"""testing if tasks in tasks_treeView are sorted according to their
names
"""
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
self.dialog.tasks_treeView.expand(index)
task1_item = project1_item.child(0, 0)
self.assertEqual(task1_item.text(), self.test_task1.name)
task2_item = project1_item.child(1, 0)
self.assertEqual(task2_item.text(), self.test_task2.name)
def test_tasks_treeView_do_not_cause_a_segfault(self):
"""there was a bug causing a segfault
"""
dialog = version_creator.MainDialog()
dialog = version_creator.MainDialog()
dialog = version_creator.MainDialog()
def test_previous_versions_tableWidget_is_filled_with_proper_info(self):
"""testing if the previous_versions_tableWidget is filled with proper
information
"""
# select the t1
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
# expand it
self.dialog.tasks_treeView.expand(index)
# get first child which is task1
task1_item = project1_item.child(0, 0)
# select task1
selection_model.select(
task1_item.index(),
QtGui.QItemSelectionModel.Select
)
# select the first take
self.dialog.takes_listWidget.setCurrentRow(0)
# the row count should be 2
self.assertEqual(
self.dialog.previous_versions_tableWidget.rowCount(),
3
)
# now check if the previous versions tableWidget has the info
versions = [self.test_version1, self.test_version2, self.test_version3]
for i in range(len(versions)):
self.assertEqual(
int(self.dialog.previous_versions_tableWidget.item(i, 0).text()),
versions[i].version_number
)
self.assertEqual(
self.dialog.previous_versions_tableWidget.item(i, 2).text(),
versions[i].created_by.name
)
self.assertEqual(
self.dialog.previous_versions_tableWidget.item(i, 6).text(),
versions[i].description
)
def test_get_new_version_with_publish_check_box_is_checked_creates_published_version(self):
"""testing if checking publish_checkbox will create a published Version
instance
"""
# select the t1
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
# expand it
self.dialog.tasks_treeView.expand(index)
# get first child which is task1
task1_item = project1_item.child(0, 0)
# select task1
selection_model.select(
task1_item.index(),
QtGui.QItemSelectionModel.Select
)
# first check if unpublished
new_version = self.dialog.get_new_version()
# is_published should be True
self.assertFalse(new_version.is_published)
# check task
self.assertEqual(new_version.task, self.test_task1)
# check the publish checkbox
self.dialog.publish_checkBox.setChecked(True)
new_version = self.dialog.get_new_version()
# check task
self.assertEqual(new_version.task, self.test_task1)
# is_published should be True
self.assertTrue(new_version.is_published)
def test_users_can_change_the_publish_state_if_they_are_the_owner(self):
"""testing if the users are able to change the publish method if it is
their versions
"""
# select the t1
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
# expand it
self.dialog.tasks_treeView.expand(index)
# get first child which is task1
task1_item = project1_item.child(0, 0)
# select task1
selection_model.select(
task1_item.index(),
QtGui.QItemSelectionModel.Select
)
# check if the menu item has a publish method for v8
self.fail('test is not completed yet')
def test_thumbnails_are_displayed_correctly(self):
"""testing if the thumbnails are displayed correctly
"""
self.fail('test is not implemented yet')
def test_representations_combo_box_lists_all_representations_of_current_env(self):
"""testing if representations_comboBox lists all the possible
representations in current environment
"""
test_environment = TestEnvironment()
dialog = version_creator.MainDialog(
environment=test_environment
)
for i in range(len(TestEnvironment.representations)):
repr_name = TestEnvironment.representations[i]
combo_box_text = dialog.representations_comboBox.itemText(i)
self.assertEqual(repr_name, combo_box_text)
def test_repr_as_separate_takes_check_box_is_unchecked_by_default(self):
"""testing if repr_as_separate_takes_checkBox is unchecked by default
"""
self.assertFalse(
self.dialog.repr_as_separate_takes_checkBox.isChecked()
)
def test_repr_as_separate_takes_check_box_is_working_properly(self):
"""testing if when the repr_as_separate_takes_checkBox is checked it
will update the takes_listWidget to also show representation takes
"""
# select project 1 -> task1
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
self.dialog.tasks_treeView.expand(index)
task1_item = project1_item.child(0, 0)
selection_model.select(
task1_item.index(),
QtGui.QItemSelectionModel.Select
)
# expect only one "Main" take listed in take_listWidget
self.assertEqual(
sorted(self.dialog.takes_listWidget.take_names),
['Main']
)
# check the repr_as_separate_takes_checkBox
self.dialog.repr_as_separate_takes_checkBox.setChecked(True)
# expect two takes of "Main" and "Main@GPU"
self.assertEqual(
sorted(self.dialog.takes_listWidget.take_names),
['Main', 'Main@GPU']
)
# self.show_dialog(self.dialog)
def test_takes_with_representations_shows_in_blue(self):
"""testing if takes with representations will be displayed in blue
"""
# select project 1 -> task1
item_model = self.dialog.tasks_treeView.model()
selection_model = self.dialog.tasks_treeView.selectionModel()
index = item_model.index(0, 0)
project1_item = item_model.itemFromIndex(index)
self.dialog.tasks_treeView.expand(index)
task1_item = project1_item.child(0, 0)
selection_model.select(
task1_item.index(),
QtGui.QItemSelectionModel.Select
)
# expect only one "Main" take listed in take_listWidget
main_item = self.dialog.takes_listWidget.item(0)
item_foreground = main_item.foreground()
color = item_foreground.color()
self.assertEqual(
color,
QtGui.QColor(0, 0, 255)
)
|
sergeneren/anima
|
tests/ui/test_version_creator.py
|
Python
|
bsd-2-clause
| 23,329
|
import dragonfly
import dragonfly.pandahive
import bee
from bee import connect
import math, functools
from panda3d.core import NodePath
import dragonfly.scene.unbound
import dragonfly.std
import dragonfly.io
import dragonfly.canvas
# ## random matrix generator
from random import random
def random_matrix_generator():
while 1:
a = NodePath("")
a.setHpr(360 * random(), 0, 0)
a.setPos(15 * random() - 7.5, 15 * random() - 7.5, 0)
yield dragonfly.scene.matrix(a, "NodePath")
def id_generator():
n = 0
while 1:
yield "spawnedpanda" + str(n)
from dragonfly.canvas import box2d, canvasargs
from bee.drone import dummydrone
from libcontext.pluginclasses import plugin_single_required
class parameters: pass
class myscene(bee.frame):
pandaclassname_ = bee.get_parameter("pandaclassname")
pandaname_ = bee.get_parameter("pandaname")
pandaicon_ = bee.get_parameter("pandaicon")
c1 = bee.configure("scene")
c1.import_mesh_EGG("models/environment")
a = NodePath("")
a.setScale(0.25)
a.setPos(-8, 42, 0)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c1.add_model_MATRIX(matrix=m)
c2 = bee.configure("scene")
c2.import_mesh_EGG("models/panda-model")
a = NodePath("")
a.setScale(0.005)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c2.add_actor_MATRIX(matrix=m, entityname=pandaname_)
c2.import_mesh_EGG("models/panda-walk4")
c2.add_animation("walk")
c3 = bee.configure("scene")
c3.import_mesh_EGG("models/panda-model")
a = NodePath("")
a.setScale(0.005)
mat = a.getMat()
m = (mat.getRow3(3), mat.getRow3(0), mat.getRow3(1), mat.getRow3(2))
c3.add_actorclass_MATRIX(matrix=m, actorclassname=pandaclassname_)
c3.import_mesh_EGG("models/panda-walk4")
c3.add_animation("walk")
box = box2d(50, 470, 96, 96)
params = parameters()
params.transparency = True
args = canvasargs("pandaicon.png", pandaicon_, box, params)
plugin = plugin_single_required(args)
pattern = ("canvas", "draw", "init", ("object", "image"))
d1 = dummydrone(plugindict={pattern: plugin})
i1 = bee.init("mousearea")
i1.register(pandaicon_, box)
del a, m, mat, box, params, args, plugin, pattern
class myhive(dragonfly.pandahive.pandahive):
pandaname = "mypanda"
pandaname_ = bee.attribute("pandaname")
pandaclassname = "pandaclass"
pandaclassname_ = bee.attribute("pandaclassname")
pandaicon = "pandaicon"
pandaicon_ = bee.attribute("pandaicon")
canvas = dragonfly.pandahive.pandacanvas()
mousearea = dragonfly.canvas.mousearea()
raiser = bee.raiser()
connect("evexc", raiser)
animation = dragonfly.scene.unbound.animation()
pandaid = dragonfly.std.variable("id")(pandaname_)
walk = dragonfly.std.variable("str")("walk")
connect(pandaid, animation.actor)
connect(walk, animation.animation_name)
key_w = dragonfly.io.keyboardsensor_trigger("W")
connect(key_w, animation.loop)
key_s = dragonfly.io.keyboardsensor_trigger("S")
connect(key_s, animation.stop)
pandaspawn = dragonfly.scene.spawn_actor()
v_panda = dragonfly.std.variable("id")(pandaclassname_)
connect(v_panda, pandaspawn)
panda_id = dragonfly.std.generator("id", id_generator)()
random_matrix = dragonfly.std.generator(("object", "matrix"), random_matrix_generator)()
w_spawn = dragonfly.std.weaver(("id", ("object", "matrix")))()
connect(panda_id, w_spawn.inp1)
connect(random_matrix, w_spawn.inp2)
do_spawn = dragonfly.std.transistor(("id", ("object", "matrix")))()
connect(w_spawn, do_spawn)
connect(do_spawn, pandaspawn.spawn_matrix)
key_z = dragonfly.io.keyboardsensor_trigger("Z")
connect(key_z, do_spawn)
pandaicon_click = dragonfly.io.mouseareasensor(pandaicon_)
connect(pandaicon_click, do_spawn)
myscene = myscene(
scene="scene",
pandaname=pandaname_,
pandaclassname=pandaclassname_,
canvas=canvas,
mousearea=mousearea,
pandaicon=pandaicon_
)
main = myhive().getinstance()
main.build("main")
main.place()
main.close()
main.init()
from direct.task import Task
def spinCameraTask(camera, task):
angleDegrees = task.time * 30.0
angleRadians = angleDegrees * (math.pi / 180.0)
camera.setPos(20 * math.sin(angleRadians), -20.0 * math.cos(angleRadians), 3)
camera.setHpr(angleDegrees, 0, 0)
return Task.cont
main.window.taskMgr.add(functools.partial(spinCameraTask, main.window.camera), "SpinCameraTask")
main.run()
|
agoose77/hivesystem
|
manual/movingpanda/panda-6.py
|
Python
|
bsd-2-clause
| 4,670
|
from optparse import OptionParser
import subprocess
import re
import os
import yaml
import glob
import hashlib
this_directory = os.path.dirname(os.path.realpath(__file__)) + "/"
defined_apps = {}
defined_baseconfigs = {}
defined_xtracfgs = {}
def get_argfoldername( args ):
if args == "" or args == None:
return "NO_ARGS"
else:
foldername = re.sub(r"[^a-z^A-Z^0-9]", "_", str(args).strip())
# For every long arg lists - create a hash of the input args
if len(str(args)) > 256:
foldername = "hashed_args_" + hashlib.md5(args).hexdigest()
return foldername
# Test to see if the passed config adheres to any defined configs and add it to the configrations to run/collect.
def get_config(name, defined_baseconfigs, defined_xtracfgs):
tokens = name.split('-')
if tokens[0] not in defined_baseconfigs:
print "Could not fined {0} in defined basenames {1}".format(tokens[0], defined_baseconfigs)
return None
else:
config = (name, "", defined_baseconfigs[tokens[0]])
for token in tokens[1:]:
if token not in defined_xtracfgs:
print "Could not find {0} in defined xtraconfigs {1}".format(token, defined_xtracfgs)
return None
else:
oldName, oldXtra, oldBasename = config
config = \
(oldName, oldXtra + "\n#{0}\n{1}\n".format(token, defined_xtracfgs[token]), oldBasename)
return config
def load_defined_yamls():
define_yamls = glob.glob(os.path.join(this_directory, 'apps/define-*.yml'))
for def_yaml in define_yamls:
parse_app_definition_yaml( os.path.join(this_directory, 'apps', def_yaml), defined_apps)
define_yamls = glob.glob(os.path.join(this_directory, 'configs/define-*.yml'))
for def_yaml in define_yamls:
parse_config_definition_yaml( os.path.join(this_directory, 'configs', def_yaml), defined_baseconfigs, defined_xtracfgs )
def parse_app_definition_yaml( def_yml, apps ):
benchmark_yaml = yaml.load(open(def_yml), Loader=yaml.FullLoader)
for suite in benchmark_yaml:
apps[suite] = []
for exe in benchmark_yaml[suite]['execs']:
exe_name = exe.keys()[0]
args_list = exe.values()[0]
apps[suite].append(( benchmark_yaml[suite]['exec_dir'],
benchmark_yaml[suite]['data_dirs'],
exe_name, args_list ))
apps[suite + ":" + exe_name] = []
apps[suite + ":" + exe_name].append( ( benchmark_yaml[suite]['exec_dir'],
benchmark_yaml[suite]['data_dirs'],
exe_name, args_list ) )
count = 0
for args in args_list:
apps[suite + ":" + exe_name + ":" + str(count) ] = []
apps[suite + ":" + exe_name + ":" + str(count) ].append( ( benchmark_yaml[suite]['exec_dir'],
benchmark_yaml[suite]['data_dirs'],
exe_name, [args] ) )
count += 1
return
def parse_config_definition_yaml( def_yml, defined_baseconfigs, defined_xtracfgs ):
configs_yaml = yaml.load(open( def_yml ), Loader=yaml.FullLoader)
for config in configs_yaml:
if 'base_file' in configs_yaml[config]:
defined_baseconfigs[config] = os.path.expandvars(configs_yaml[config]['base_file'])
elif 'extra_params' in configs_yaml[config]:
defined_xtracfgs[config] = configs_yaml[config]['extra_params']
return
def gen_apps_from_suite_list( app_list ):
benchmarks = []
for app in app_list:
benchmarks += defined_apps[app]
return benchmarks
def gen_configs_from_list( cfg_list ):
configs = []
for cfg in cfg_list:
configs.append(get_config(cfg, defined_baseconfigs, defined_xtracfgs))
return configs
def get_cuda_version(this_directory):
# Get CUDA version
nvcc_out_filename = os.path.join( this_directory, "nvcc_out.{0}.txt".format(os.getpid()) )
nvcc_out_file = open(nvcc_out_filename, 'w+')
subprocess.call(["nvcc", "--version"],\
stdout=nvcc_out_file)
nvcc_out_file.seek(0)
cuda_version = re.sub(r".*release (\d+\.\d+).*", r"\1", nvcc_out_file.read().strip().replace("\n"," "))
nvcc_out_file.close()
os.remove(nvcc_out_filename)
os.environ['CUDA_VERSION'] = cuda_version
return cuda_version
# This function exists so that this file can accept both absolute and relative paths
# If no name is provided it sets the default
# Either way it does a test if the absolute path exists and if not, tries a relative path
def file_option_test(name, default, this_directory):
if name == "":
if default == "":
return ""
else:
name = os.path.join(this_directory, default)
try:
with open(name): pass
except IOError:
name = os.path.join(os.getcwd(), name)
try:
with open(name): pass
except IOError:
exit("Error - cannot open file {0}".format(name))
return name
def dir_option_test(name, default, this_directory):
if name == "":
name = os.path.join(this_directory, default)
if not os.path.isdir(name):
name = os.path.join(os.getcwd(), name)
if not os.path.isdir(name):
exit("Error - cannot open file {0}".format(name))
return name
def parse_run_simulations_options():
parser = OptionParser()
parser.add_option("-B", "--benchmark_list", dest="benchmark_list",
help="a comma seperated list of benchmark suites to run. See apps/define-*.yml for " +\
"the benchmark suite names.",
default="rodinia_2.0-ft")
parser.add_option("-C", "--configs_list", dest="configs_list",
help="a comma seperated list of configs to run. See configs/define-*.yml for " +\
"the config names.",
default="GTX480")
parser.add_option("-p", "--benchmark_exec_prefix", dest="benchmark_exec_prefix",
help="When submitting the job to torque this string" +\
" is placed before the command line that runs the benchmark. " +\
" Useful when wanting to run valgrind.", default="")
parser.add_option("-r", "--run_directory", dest="run_directory",
help="Name of directory in which to run simulations",
default="")
parser.add_option("-n", "--no_launch", dest="no_launch", action="store_true",
help="When set, no torque jobs are launched. However, all"+\
" the setup for running is performed. ie, the run"+\
" directories are created and are ready to run."+\
" This can be useful when you want to create a new" +\
" configuration, but want to test it locally before "+\
" launching a bunch of jobs.")
parser.add_option("-s", "--so_dir", dest="so_dir",
help="Point this to the directory that your .so is stored in. If nothing is input here - "+\
"the scripts will assume that you are using the so built in GPGPUSIM_ROOT.",
default="")
parser.add_option("-N", "--launch_name", dest="launch_name", default="",
help="Pass if you want to name the launch. This will determine the name of the logfile.\n" +\
"If you do not name the file, it will just use the current date/time.")
parser.add_option("-T", "--trace_dir", dest="trace_dir", default="",
help="Pass this option to run the simulator in trace-driven mode."+\
" The directory passed should be the root of all the trace files.")
parser.add_option("-M", "--job_mem", dest="job_mem", default="",
help="Memory usgae of the job in MB.")
(options, args) = parser.parse_args()
# Parser seems to leave some whitespace on the options, getting rid of it
if options.trace_dir != "":
options.trace_dir = dir_option_test( options.trace_dir.strip(), "", this_directory )
options.configs_list = options.configs_list.strip()
options.benchmark_exec_prefix = options.benchmark_exec_prefix.strip()
options.benchmark_list = options.benchmark_list.strip()
options.run_directory = options.run_directory.strip()
options.so_dir = options.so_dir.strip()
options.launch_name = options.launch_name.strip()
options.job_mem = options.job_mem.strip()
return (options, args)
|
tgrogers/gpgpu-sim_simulations
|
util/job_launching/common.py
|
Python
|
bsd-2-clause
| 8,651
|
from incuna_test_utils.testcases.api_request import BaseAPIRequestTestCase
from . import factories
class APIRequestTestCase(BaseAPIRequestTestCase):
user_factory = factories.UserFactory
|
incuna/rest-framework-push-notifications
|
tests/utils.py
|
Python
|
bsd-2-clause
| 193
|
"""
Hash implementations for Numba types
"""
import math
import numpy as np
import sys
import ctypes
import warnings
from collections import namedtuple
import llvmlite.binding as ll
import llvmlite.llvmpy.core as lc
from llvmlite import ir
from numba.core.extending import (
overload, overload_method, intrinsic, register_jitable)
from numba.core import errors
from numba.core import types, utils
from numba.core.unsafe.bytes import grab_byte, grab_uint64_t
_py38_or_later = utils.PYVERSION >= (3, 8)
# This is Py_hash_t, which is a Py_ssize_t, which has sizeof(size_t):
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyport.h#L91-L96 # noqa: E501
_hash_width = sys.hash_info.width
_Py_hash_t = getattr(types, 'int%s' % _hash_width)
_Py_uhash_t = getattr(types, 'uint%s' % _hash_width)
# Constants from CPython source, obtained by various means:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Include/pyhash.h # noqa: E501
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
_PyHASH_MODULUS = _Py_uhash_t(sys.hash_info.modulus)
_PyHASH_BITS = 31 if types.intp.bitwidth == 32 else 61 # mersenne primes
_PyHASH_MULTIPLIER = 0xf4243 # 1000003UL
_PyHASH_IMAG = _PyHASH_MULTIPLIER
_PyLong_SHIFT = sys.int_info.bits_per_digit
_Py_HASH_CUTOFF = sys.hash_info.cutoff
_Py_hashfunc_name = sys.hash_info.algorithm
# hash(obj) is implemented by calling obj.__hash__()
@overload(hash)
def hash_overload(obj):
def impl(obj):
return obj.__hash__()
return impl
@register_jitable
def process_return(val):
asint = _Py_hash_t(val)
if (asint == int(-1)):
asint = int(-2)
return asint
# This is a translation of CPython's _Py_HashDouble:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L34-L129 # noqa: E501
@register_jitable(locals={'x': _Py_uhash_t,
'y': _Py_uhash_t,
'm': types.double,
'e': types.intc,
'sign': types.intc,
'_PyHASH_MODULUS': _Py_uhash_t,
'_PyHASH_BITS': types.intc})
def _Py_HashDouble(v):
if not np.isfinite(v):
if (np.isinf(v)):
if (v > 0):
return _PyHASH_INF
else:
return -_PyHASH_INF
else:
return _PyHASH_NAN
m, e = math.frexp(v)
sign = 1
if (m < 0):
sign = -1
m = -m
# process 28 bits at a time; this should work well both for binary
# and hexadecimal floating point.
x = 0
while (m):
x = ((x << 28) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - 28)
m *= 268435456.0 # /* 2**28 */
e -= 28
y = int(m) # /* pull out integer part */
m -= y
x += y
if x >= _PyHASH_MODULUS:
x -= _PyHASH_MODULUS
# /* adjust for the exponent; first reduce it modulo _PyHASH_BITS */
if e >= 0:
e = e % _PyHASH_BITS
else:
e = _PyHASH_BITS - 1 - ((-1 - e) % _PyHASH_BITS)
x = ((x << e) & _PyHASH_MODULUS) | x >> (_PyHASH_BITS - e)
x = x * sign
return process_return(x)
@intrinsic
def _fpext(tyctx, val):
def impl(cgctx, builder, signature, args):
val = args[0]
return builder.fpext(val, lc.Type.double())
sig = types.float64(types.float32)
return sig, impl
# This is a translation of CPython's long_hash, but restricted to the numerical
# domain reachable by int64/uint64 (i.e. no BigInt like support):
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/longobject.c#L2934-L2989 # noqa: E501
# obdigit is a uint32_t which is typedef'd to digit
# int32_t is typedef'd to sdigit
@register_jitable(locals={'x': _Py_uhash_t,
'p1': _Py_uhash_t,
'p2': _Py_uhash_t,
'p3': _Py_uhash_t,
'p4': _Py_uhash_t,
'_PyHASH_MODULUS': _Py_uhash_t,
'_PyHASH_BITS': types.int32,
'_PyLong_SHIFT': types.int32,})
def _long_impl(val):
# This function assumes val came from a long int repr with val being a
# uint64_t this means having to split the input into PyLong_SHIFT size
# chunks in an unsigned hash wide type, max numba can handle is a 64bit int
# mask to select low _PyLong_SHIFT bits
_tmp_shift = 32 - _PyLong_SHIFT
mask_shift = (~types.uint32(0x0)) >> _tmp_shift
# a 64bit wide max means Numba only needs 3 x 30 bit values max,
# or 5 x 15 bit values max on 32bit platforms
i = (64 // _PyLong_SHIFT) + 1
# alg as per hash_long
x = 0
p3 = (_PyHASH_BITS - _PyLong_SHIFT)
for idx in range(i - 1, -1, -1):
p1 = x << _PyLong_SHIFT
p2 = p1 & _PyHASH_MODULUS
p4 = x >> p3
x = p2 | p4
# the shift and mask splits out the `ob_digit` parts of a Long repr
x += types.uint32((val >> idx * _PyLong_SHIFT) & mask_shift)
if x >= _PyHASH_MODULUS:
x -= _PyHASH_MODULUS
return _Py_hash_t(x)
# This has no CPython equivalent, CPython uses long_hash.
@overload_method(types.Integer, '__hash__')
@overload_method(types.Boolean, '__hash__')
def int_hash(val):
_HASH_I64_MIN = -2 if sys.maxsize <= 2 ** 32 else -4
_SIGNED_MIN = types.int64(-0x8000000000000000)
# Find a suitable type to hold a "big" value, i.e. iinfo(ty).min/max
# this is to ensure e.g. int32.min is handled ok as it's abs() is its value
_BIG = types.int64 if getattr(val, 'signed', False) else types.uint64
# this is a bit involved due to the CPython repr of ints
def impl(val):
# If the magnitude is under PyHASH_MODULUS, just return the
# value val as the hash, couple of special cases if val == val:
# 1. it's 0, in which case return 0
# 2. it's signed int minimum value, return the value CPython computes
# but Numba cannot as there's no type wide enough to hold the shifts.
#
# If the magnitude is greater than PyHASH_MODULUS then... if the value
# is negative then negate it switch the sign on the hash once computed
# and use the standard wide unsigned hash implementation
val = _BIG(val)
mag = abs(val)
if mag < _PyHASH_MODULUS:
if val == 0:
ret = 0
elif val == _SIGNED_MIN: # e.g. int64 min, -0x8000000000000000
ret = _Py_hash_t(_HASH_I64_MIN)
else:
ret = _Py_hash_t(val)
else:
needs_negate = False
if val < 0:
val = -val
needs_negate = True
ret = _long_impl(val)
if needs_negate:
ret = -ret
return process_return(ret)
return impl
# This is a translation of CPython's float_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/floatobject.c#L528-L532 # noqa: E501
@overload_method(types.Float, '__hash__')
def float_hash(val):
if val.bitwidth == 64:
def impl(val):
hashed = _Py_HashDouble(val)
return hashed
else:
def impl(val):
# widen the 32bit float to 64bit
fpextended = np.float64(_fpext(val))
hashed = _Py_HashDouble(fpextended)
return hashed
return impl
# This is a translation of CPython's complex_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/complexobject.c#L408-L428 # noqa: E501
@overload_method(types.Complex, '__hash__')
def complex_hash(val):
def impl(val):
hashreal = hash(val.real)
hashimag = hash(val.imag)
# Note: if the imaginary part is 0, hashimag is 0 now,
# so the following returns hashreal unchanged. This is
# important because numbers of different types that
# compare equal must have the same hash value, so that
# hash(x + 0*j) must equal hash(x).
combined = hashreal + _PyHASH_IMAG * hashimag
return process_return(combined)
return impl
if _py38_or_later:
# Python 3.8 strengthened its hash alg for tuples.
# This is a translation of CPython's tuplehash for Python >=3.8
# https://github.com/python/cpython/blob/b738237d6792acba85b1f6e6c8993a812c7fd815/Objects/tupleobject.c#L338-L391 # noqa: E501
# These consts are needed for this alg variant, they are from:
# https://github.com/python/cpython/blob/b738237d6792acba85b1f6e6c8993a812c7fd815/Objects/tupleobject.c#L353-L363 # noqa: E501
if _Py_uhash_t.bitwidth // 8 > 4:
_PyHASH_XXPRIME_1 = _Py_uhash_t(11400714785074694791)
_PyHASH_XXPRIME_2 = _Py_uhash_t(14029467366897019727)
_PyHASH_XXPRIME_5 = _Py_uhash_t(2870177450012600261)
@register_jitable(locals={'x': types.uint64})
def _PyHASH_XXROTATE(x):
# Rotate left 31 bits
return ((x << types.uint64(31)) | (x >> types.uint64(33)))
else:
_PyHASH_XXPRIME_1 = _Py_uhash_t(2654435761)
_PyHASH_XXPRIME_2 = _Py_uhash_t(2246822519)
_PyHASH_XXPRIME_5 = _Py_uhash_t(374761393)
@register_jitable(locals={'x': types.uint64})
def _PyHASH_XXROTATE(x):
# Rotate left 13 bits
return ((x << types.uint64(13)) | (x >> types.uint64(16)))
# Python 3.7+ has literal_unroll, this means any homogeneous and
# heterogeneous tuples can use the same alg and just be unrolled.
from numba import literal_unroll
@register_jitable(locals={'acc': _Py_uhash_t, 'lane': _Py_uhash_t,
'_PyHASH_XXPRIME_5': _Py_uhash_t,
'_PyHASH_XXPRIME_1': _Py_uhash_t,
'tl': _Py_uhash_t})
def _tuple_hash(tup):
tl = len(tup)
acc = _PyHASH_XXPRIME_5
for x in literal_unroll(tup):
lane = hash(x)
if lane == _Py_uhash_t(-1):
return -1
acc += lane * _PyHASH_XXPRIME_2
acc = _PyHASH_XXROTATE(acc)
acc *= _PyHASH_XXPRIME_1
acc += tl ^ (_PyHASH_XXPRIME_5 ^ _Py_uhash_t(3527539))
if acc == _Py_uhash_t(-1):
return process_return(1546275796)
return process_return(acc)
else:
# This is a translation of CPython's tuplehash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/tupleobject.c#L347-L369 # noqa: E501
@register_jitable(locals={'x': _Py_uhash_t,
'y': _Py_hash_t,
'mult': _Py_uhash_t,
'l': _Py_hash_t, })
def _tuple_hash(tup):
tl = len(tup)
mult = _PyHASH_MULTIPLIER
x = _Py_uhash_t(0x345678)
# in C this is while(--l >= 0), i is indexing tup instead of *tup++
for i, l in enumerate(range(tl - 1, -1, -1)):
y = hash(tup[i])
xxory = (x ^ y)
x = xxory * mult
mult += _Py_hash_t((_Py_uhash_t(82520) + l + l))
x += _Py_uhash_t(97531)
return process_return(x)
# This is an obfuscated translation of CPython's tuplehash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/tupleobject.c#L347-L369 # noqa: E501
# The obfuscation occurs for a heterogeneous tuple as each tuple member needs
# a potentially different hash() function calling for it. This cannot be done at
# runtime as there's no way to iterate a heterogeneous tuple, so this is
# achieved by essentially unrolling the loop over the members and inserting a
# per-type hash function call for each member, and then simply computing the
# hash value in an inlined/rolling fashion.
@intrinsic
def _tuple_hash_resolve(tyctx, val):
def impl(cgctx, builder, signature, args):
typingctx = cgctx.typing_context
fnty = typingctx.resolve_value_type(hash)
tupty, = signature.args
tup, = args
lty = cgctx.get_value_type(signature.return_type)
x = ir.Constant(lty, 0x345678)
mult = ir.Constant(lty, _PyHASH_MULTIPLIER)
shift = ir.Constant(lty, 82520)
tl = len(tupty)
for i, packed in enumerate(zip(tupty.types, range(tl - 1, -1, -1))):
ty, l = packed
sig = fnty.get_call_type(tyctx, (ty,), {})
impl = cgctx.get_function(fnty, sig)
tuple_val = builder.extract_value(tup, i)
y = impl(builder, (tuple_val,))
xxory = builder.xor(x, y)
x = builder.mul(xxory, mult)
lconst = ir.Constant(lty, l)
mult = builder.add(mult, shift)
mult = builder.add(mult, lconst)
mult = builder.add(mult, lconst)
x = builder.add(x, ir.Constant(lty, 97531))
return x
sig = _Py_hash_t(val)
return sig, impl
@overload_method(types.BaseTuple, '__hash__')
def tuple_hash(val):
if _py38_or_later or isinstance(val, types.Sequence):
def impl(val):
return _tuple_hash(val)
return impl
else:
def impl(val):
hashed = _Py_hash_t(_tuple_hash_resolve(val))
return process_return(hashed)
return impl
# ------------------------------------------------------------------------------
# String/bytes hashing needs hashseed info, this is from:
# https://stackoverflow.com/a/41088757
# with thanks to Martijn Pieters
#
# Developer note:
# CPython makes use of an internal "hashsecret" which is essentially a struct
# containing some state that is set on CPython initialization and contains magic
# numbers used particularly in unicode/string hashing. This code binds to the
# Python runtime libraries in use by the current process and reads the
# "hashsecret" state so that it can be used by Numba. As this is done at runtime
# the behaviour and influence of the PYTHONHASHSEED environment variable is
# accommodated.
from ctypes import ( # noqa
c_size_t,
c_ubyte,
c_uint64,
pythonapi,
Structure,
Union,
) # noqa
class FNV(Structure):
_fields_ = [
('prefix', c_size_t),
('suffix', c_size_t)
]
class SIPHASH(Structure):
_fields_ = [
('k0', c_uint64),
('k1', c_uint64),
]
class DJBX33A(Structure):
_fields_ = [
('padding', c_ubyte * 16),
('suffix', c_size_t),
]
class EXPAT(Structure):
_fields_ = [
('padding', c_ubyte * 16),
('hashsalt', c_size_t),
]
class _Py_HashSecret_t(Union):
_fields_ = [
# ensure 24 bytes
('uc', c_ubyte * 24),
# two Py_hash_t for FNV
('fnv', FNV),
# two uint64 for SipHash24
('siphash', SIPHASH),
# a different (!) Py_hash_t for small string optimization
('djbx33a', DJBX33A),
('expat', EXPAT),
]
_hashsecret_entry = namedtuple('_hashsecret_entry', ['symbol', 'value'])
# Only a few members are needed at present
def _build_hashsecret():
"""Read hash secret from the Python process
Returns
-------
info : dict
- keys are "djbx33a_suffix", "siphash_k0", siphash_k1".
- values are the namedtuple[symbol:str, value:int]
"""
# Read hashsecret and inject it into the LLVM symbol map under the
# prefix `_numba_hashsecret_`.
pyhashsecret = _Py_HashSecret_t.in_dll(pythonapi, '_Py_HashSecret')
info = {}
def inject(name, val):
symbol_name = "_numba_hashsecret_{}".format(name)
val = ctypes.c_uint64(val)
addr = ctypes.addressof(val)
ll.add_symbol(symbol_name, addr)
info[name] = _hashsecret_entry(symbol=symbol_name, value=val)
inject('djbx33a_suffix', pyhashsecret.djbx33a.suffix)
inject('siphash_k0', pyhashsecret.siphash.k0)
inject('siphash_k1', pyhashsecret.siphash.k1)
return info
_hashsecret = _build_hashsecret()
# ------------------------------------------------------------------------------
if _Py_hashfunc_name in ('siphash24', 'fnv'):
# Check for use of the FNV hashing alg, warn users that it's not implemented
# and functionality relying of properties derived from hashing will be fine
# but hash values themselves are likely to be different.
if _Py_hashfunc_name == 'fnv':
msg = ("FNV hashing is not implemented in Numba. See PEP 456 "
"https://www.python.org/dev/peps/pep-0456/ "
"for rationale over not using FNV. Numba will continue to work, "
"but hashes for built in types will be computed using "
"siphash24. This will permit e.g. dictionaries to continue to "
"behave as expected, however anything relying on the value of "
"the hash opposed to hash as a derived property is likely to "
"not work as expected.")
warnings.warn(msg)
# This is a translation of CPython's siphash24 function:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L287-L413 # noqa: E501
# /* *********************************************************************
# <MIT License>
# Copyright (c) 2013 Marek Majkowski <marek@popcount.org>
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense,
# and/or sell copies of the Software, and to permit persons to whom the
# Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
# </MIT License>
# Original location:
# https://github.com/majek/csiphash/
# Solution inspired by code from:
# Samuel Neves (supercop/crypto_auth/siphash24/little)
#djb (supercop/crypto_auth/siphash24/little2)
# Jean-Philippe Aumasson (https://131002.net/siphash/siphash24.c)
# Modified for Python by Christian Heimes:
# - C89 / MSVC compatibility
# - _rotl64() on Windows
# - letoh64() fallback
# */
@register_jitable(locals={'x': types.uint64,
'b': types.uint64, })
def _ROTATE(x, b):
return types.uint64(((x) << (b)) | ((x) >> (types.uint64(64) - (b))))
@register_jitable(locals={'a': types.uint64,
'b': types.uint64,
'c': types.uint64,
'd': types.uint64,
's': types.uint64,
't': types.uint64, })
def _HALF_ROUND(a, b, c, d, s, t):
a += b
c += d
b = _ROTATE(b, s) ^ a
d = _ROTATE(d, t) ^ c
a = _ROTATE(a, 32)
return a, b, c, d
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64, })
def _DOUBLE_ROUND(v0, v1, v2, v3):
v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16)
v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21)
v0, v1, v2, v3 = _HALF_ROUND(v0, v1, v2, v3, 13, 16)
v2, v1, v0, v3 = _HALF_ROUND(v2, v1, v0, v3, 17, 21)
return v0, v1, v2, v3
@register_jitable(locals={'v0': types.uint64,
'v1': types.uint64,
'v2': types.uint64,
'v3': types.uint64,
'b': types.uint64,
'mi': types.uint64,
'tmp': types.Array(types.uint64, 1, 'C'),
't': types.uint64,
'mask': types.uint64,
'jmp': types.uint64,
'ohexefef': types.uint64})
def _siphash24(k0, k1, src, src_sz):
b = types.uint64(src_sz) << 56
v0 = k0 ^ types.uint64(0x736f6d6570736575)
v1 = k1 ^ types.uint64(0x646f72616e646f6d)
v2 = k0 ^ types.uint64(0x6c7967656e657261)
v3 = k1 ^ types.uint64(0x7465646279746573)
idx = 0
while (src_sz >= 8):
mi = grab_uint64_t(src, idx)
idx += 1
src_sz -= 8
v3 ^= mi
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0 ^= mi
# this is the switch fallthrough:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L390-L400 # noqa: E501
t = types.uint64(0x0)
boffset = idx * 8
ohexefef = types.uint64(0xff)
if src_sz >= 7:
jmp = (6 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 6))
<< jmp)
if src_sz >= 6:
jmp = (5 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 5))
<< jmp)
if src_sz >= 5:
jmp = (4 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 4))
<< jmp)
if src_sz >= 4:
t &= types.uint64(0xffffffff00000000)
for i in range(4):
jmp = i * 8
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + i))
<< jmp)
if src_sz >= 3:
jmp = (2 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 2))
<< jmp)
if src_sz >= 2:
jmp = (1 * 8)
mask = ~types.uint64(ohexefef << jmp)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 1))
<< jmp)
if src_sz >= 1:
mask = ~(ohexefef)
t = (t & mask) | (types.uint64(grab_byte(src, boffset + 0)))
b |= t
v3 ^= b
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0 ^= b
v2 ^= ohexefef
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
v0, v1, v2, v3 = _DOUBLE_ROUND(v0, v1, v2, v3)
t = (v0 ^ v1) ^ (v2 ^ v3)
return t
else:
msg = "Unsupported hashing algorithm in use %s" % _Py_hashfunc_name
raise ValueError(msg)
@intrinsic
def _inject_hashsecret_read(tyctx, name):
"""Emit code to load the hashsecret.
"""
if not isinstance(name, types.StringLiteral):
raise errors.TypingError("requires literal string")
sym = _hashsecret[name.literal_value].symbol
resty = types.uint64
sig = resty(name)
def impl(cgctx, builder, sig, args):
mod = builder.module
try:
# Search for existing global
gv = mod.get_global(sym)
except KeyError:
# Inject the symbol if not already exist.
gv = ir.GlobalVariable(mod, ir.IntType(64), name=sym)
v = builder.load(gv)
return v
return sig, impl
def _load_hashsecret(name):
return _hashsecret[name].value
@overload(_load_hashsecret)
def _impl_load_hashsecret(name):
def imp(name):
return _inject_hashsecret_read(name)
return imp
# This is a translation of CPythons's _Py_HashBytes:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Python/pyhash.c#L145-L191 # noqa: E501
@register_jitable(locals={'_hash': _Py_uhash_t})
def _Py_HashBytes(val, _len):
if (_len == 0):
return process_return(0)
if (_len < _Py_HASH_CUTOFF):
# TODO: this branch needs testing, needs a CPython setup for it!
# /* Optimize hashing of very small strings with inline DJBX33A. */
_hash = _Py_uhash_t(5381) # /* DJBX33A starts with 5381 */
for idx in range(_len):
_hash = ((_hash << 5) + _hash) + np.uint8(grab_byte(val, idx))
_hash ^= _len
_hash ^= _load_hashsecret('djbx33a_suffix')
else:
tmp = _siphash24(types.uint64(_load_hashsecret('siphash_k0')),
types.uint64(_load_hashsecret('siphash_k1')),
val, _len)
_hash = process_return(tmp)
return process_return(_hash)
# This is an approximate translation of CPython's unicode_hash:
# https://github.com/python/cpython/blob/d1dd6be613381b996b9071443ef081de8e5f3aff/Objects/unicodeobject.c#L11635-L11663 # noqa: E501
@overload_method(types.UnicodeType, '__hash__')
def unicode_hash(val):
from numba.cpython.unicode import _kind_to_byte_width
def impl(val):
kindwidth = _kind_to_byte_width(val._kind)
_len = len(val)
# use the cache if possible
current_hash = val._hash
if current_hash != -1:
return current_hash
else:
# cannot write hash value to cache in the unicode struct due to
# pass by value on the struct making the struct member immutable
return _Py_HashBytes(val._data, kindwidth * _len)
return impl
|
sklam/numba
|
numba/cpython/hashing.py
|
Python
|
bsd-2-clause
| 26,149
|
#!/usr/bin/env python
import os
import readline
from pprint import pprint
from flask import *
from mavlinkapp import *
os.environ['PYTHONINSPECT'] = 'True'
|
btashton/mavlink-zmq
|
flask/shell.py
|
Python
|
bsd-2-clause
| 158
|
import sys
import unittest
from dynd import nd, ndt
class TestArrayGetItem(unittest.TestCase):
def test_strided_dim(self):
a = nd.empty(100, ndt.int32)
a[...] = nd.range(100)
b = list(range(100))
self.assertEqual(nd.type_of(a), ndt.type('strided * int32'))
self.assertEqual(nd.type_of(a[...]), ndt.type('strided * int32'))
self.assertEqual(nd.type_of(a[0]), ndt.int32)
self.assertEqual(nd.type_of(a[0:1]), ndt.type('strided * int32'))
self.assertEqual(nd.as_py(a[0]), b[0])
self.assertEqual(nd.as_py(a[99]), b[99])
self.assertEqual(nd.as_py(a[-1]), b[-1])
self.assertEqual(nd.as_py(a[-100]), b[-100])
self.assertEqual(nd.as_py(a[-101:]), b[-101:])
self.assertEqual(nd.as_py(a[-5:101:2]), b[-5:101:2])
self.assertRaises(IndexError, lambda x : x[-101], a)
self.assertRaises(IndexError, lambda x : x[100], a)
def test_fixed_dim(self):
a = nd.empty('100 * int32')
a[...] = nd.range(100)
b = list(range(100))
self.assertEqual(nd.type_of(a), ndt.type('100 * int32'))
self.assertEqual(nd.type_of(a[...]), ndt.type('100 * int32'))
self.assertEqual(nd.type_of(a[0]), ndt.int32)
self.assertEqual(nd.type_of(a[0:1]), ndt.type('strided * int32'))
self.assertEqual(nd.as_py(a[0]), b[0])
self.assertEqual(nd.as_py(a[99]), b[99])
self.assertEqual(nd.as_py(a[-1]), b[-1])
self.assertEqual(nd.as_py(a[-100]), b[-100])
self.assertEqual(nd.as_py(a[-101:]), b[-101:])
self.assertEqual(nd.as_py(a[-5:101:2]), b[-5:101:2])
self.assertRaises(IndexError, lambda x : x[-101], a)
self.assertRaises(IndexError, lambda x : x[100], a)
def test_var_dim(self):
a = nd.empty('var * int32')
a[...] = nd.range(100)
b = list(range(100))
self.assertEqual(nd.type_of(a), ndt.type('var * int32'))
self.assertEqual(nd.type_of(a[...]), ndt.type('var * int32'))
self.assertEqual(nd.type_of(a[:]), ndt.type('strided * int32'))
self.assertEqual(nd.type_of(a[0]), ndt.int32)
self.assertEqual(nd.type_of(a[0:1]), ndt.type('strided * int32'))
self.assertEqual(nd.as_py(a[0]), b[0])
self.assertEqual(nd.as_py(a[99]), b[99])
self.assertEqual(nd.as_py(a[-1]), b[-1])
self.assertEqual(nd.as_py(a[-100]), b[-100])
self.assertEqual(nd.as_py(a[-101:]), b[-101:])
self.assertEqual(nd.as_py(a[-5:101:2]), b[-5:101:2])
self.assertRaises(IndexError, lambda x : x[-101], a)
self.assertRaises(IndexError, lambda x : x[100], a)
def test_struct(self):
a = nd.parse_json('{x:int32, y:string, z:float32}',
'{"x":20, "y":"testing one two three", "z":-3.25}')
self.assertEqual(nd.type_of(a), ndt.type('{x:int32, y:string, z:float32}'))
self.assertEqual(nd.type_of(a[...]), ndt.type('{x:int32, y:string, z:float32}'))
self.assertEqual(nd.type_of(a[0]), ndt.int32)
self.assertEqual(nd.type_of(a[1]), ndt.string)
self.assertEqual(nd.type_of(a[2]), ndt.float32)
self.assertEqual(nd.type_of(a[-3]), ndt.int32)
self.assertEqual(nd.type_of(a[-2]), ndt.string)
self.assertEqual(nd.type_of(a[-1]), ndt.float32)
self.assertEqual(nd.type_of(a[1:]), ndt.make_struct([ndt.string, ndt.float32], ['y', 'z']))
self.assertEqual(nd.type_of(a[::-2]), ndt.make_struct([ndt.float32, ndt.int32], ['z', 'x']))
self.assertEqual(nd.as_py(a[0]), 20)
self.assertEqual(nd.as_py(a[1]), "testing one two three")
self.assertEqual(nd.as_py(a[2]), -3.25)
self.assertEqual(nd.as_py(a[1:]), {'y':'testing one two three', 'z':-3.25})
self.assertEqual(nd.as_py(a[::-2]), {'x':20, 'z':-3.25})
if __name__ == '__main__':
unittest.main()
|
aterrel/dynd-python
|
dynd/tests/test_array_getitem.py
|
Python
|
bsd-2-clause
| 3,879
|
#!/usr/bin/env python
from distutils.core import setup
setup(
name="templatedict",
license="BSD",
version="0.1.0",
description="A string dictionary which allows its keys being replaced in its values.",
author="Nico Mandery",
url="https://github.com/geops/python-templatedict",
packages=["templatedict"]
)
|
geops/python-templatedict
|
setup.py
|
Python
|
bsd-2-clause
| 371
|
from ambry.bundle import Bundle
class Bundle(Bundle):
pass
|
CivicKnowledge/ambry
|
test/bundle_tests/example.com/documentation/bundle.py
|
Python
|
bsd-2-clause
| 64
|
from unittest import TestCase
from perjury import generators as g
from perjury import util
from perjury.exceptions import UniqueValueTimeoutError
class TestUniqueDecorator(TestCase):
def test_is_pretty_unique(self):
# This is not the most scientific way to test it, but we have slightly
# more than 400 usernames, if we generate 400 unique usernames 1000
# times, it is probably likely that this works.
for i in xrange(1000):
unique_username = util.unique(g.username)
seen = set()
for i in xrange(400):
username = unique_username()
assert username not in seen
seen.add(username)
def test_overflow(self):
generator = util.unique(g.Choice(choices=(1, 2, 3)))
generator()
generator()
generator()
self.assertRaises(UniqueValueTimeoutError, generator)
class TestIterableUtils(TestCase):
def test_forever(self):
forever_usernames = util.forever(g.username)
count = 0
for username in forever_usernames:
count += 1
# 100,000 is basically forever right?
if count > 100000:
break
def test_times(self):
three_usernames = util.times(g.username, 3)
count = 0
for username in three_usernames:
count += 1
assert count == 3
def test_composability(self):
for i in xrange(1000):
unique_usernames = util.unique(g.username)
many_unique_usernames = util.times(unique_usernames, 400)
seen = set()
count = 0
for username in many_unique_usernames:
count += 1
assert username not in seen
seen.add(username)
assert count == 400
|
pipermerriam/perjury
|
tests/test_util.py
|
Python
|
bsd-2-clause
| 1,843
|
from .decorators import invertibleGenerator, coroutine
def pushFromIterable(iterable, target):
try:
for elem in iterable:
target.send(elem)
target.close()
except StopIteration:
pass
@invertibleGenerator
def genPairs(iterable):
""" Aggregate two consecutive values into pairs """
buf = []
for elem in iterable:
buf.append(elem)
if len(buf) >= 2:
yield tuple(buf)
buf = []
@invertibleGenerator
def genFilter(predicate, iterable):
""" Filter based on predicate """
for elem in iterable:
if predicate(elem):
yield elem
@invertibleGenerator
def genPassthrough(iterable):
""" Pass values through without modification """
for val in iterable:
yield val
@invertibleGenerator
def genMap(func, iterable):
""" Map function on all values """
for val in iterable:
yield func(val)
@coroutine
def coSplit(predicate, trueTarget, falseTarget):
while True:
val = (yield)
if predicate(val):
trueTarget.send(val)
else:
falseTarget.send(val)
trueTarget.close()
falseTarget.close()
@coroutine
def coReceive():
while True:
val = (yield)
print("Got %s" % str(val))
|
KholdStare/generators-to-coroutines
|
generators_to_coroutines/tools.py
|
Python
|
bsd-2-clause
| 1,301
|
#
# Copyright 2012, Couchbase, Inc.
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
class Reader(object):
def __init__(self, fp):
pass
def __iter__(self):
return self
def next(self):
raise NotImplementedError
def close(self):
pass
class Writer(object):
def __init__(self, fp):
pass
def write(self, record):
raise NotImplementedError
def close(self):
pass
|
TOTVS/mdmpublic
|
couchbase-cli/lib/python/couchbase/migrator/migrator.py
|
Python
|
bsd-2-clause
| 969
|
from os.path import basename
from procedure_names import display_name
def load(file):
data = {}
for line in file:
F = line.split(':')
if len(F) == 1:
file = basename(F[0].strip())
if file not in data:
data[file] = []
continue
assert file is not None
name = display_name[F[0].strip()]
if name is None:
continue
F = F[1].split()
time = int(F[1])
data[file].append((name, time))
return data
|
WojciechMula/toys
|
avx512-remove-spaces/scripts/parse_speed.py
|
Python
|
bsd-2-clause
| 534
|
from UCache import UCache
import User
import UserMemo
import json
from errors import *
class UserManager:
users = {}
@staticmethod
def HandleLogin(svc, username, passwd):
user = UserManager.LoadUser(username)
if (user == None):
raise Unauthorized('Login failed')
if (user.Authorize(passwd)):
session = Session(user, svc.client_address[0])
ret = {}
ret['session'] = session.GetID()
svc.writedata(json.dumps(ret))
else:
raise Unauthorized('Login failed')
@staticmethod
def LoadUser(user):
userec = UCache.GetUser(user)
if (userec == None):
return None
user = userec.userid
if (user not in UserManager.users):
ruser = UserManager.LoadNewUser(user)
if (ruser == None):
return None
UserManager.users[user] = ruser
return UserManager.users[user]
@staticmethod
def LoadUserByUid(uid):
userec = UCache.GetUserByUid(uid)
if userec is None:
return None
user = userec.userid
return UserManager.LoadUser(user)
@staticmethod
def LoadNewUser(user):
userec = UCache.GetUser(user)
if (userec == None):
return None
umemo = UserMemo.UserMemoMgr.LoadUsermemo(user)
if (umemo is None):
return None
ruser = User.User(user, userec, umemo)
return ruser
from Session import Session
|
HenryHu/pybbs
|
UserManager.py
|
Python
|
bsd-2-clause
| 1,528
|
# -*- coding: UTF-8 -*-
# Copyright 2014-2018 Rumma & Ko Ltd
# License: GNU Affero General Public License v3 (see file COPYING for details)
from __future__ import unicode_literals
from django.db import models
from lino.api import dd, _, rt
from lino.core.diff import ChangeWatcher
from lino.utils.format_date import fds
from .fields import IBANField, BICField, IBAN_FORMFIELD
from .utils import belgian_nban_to_iban_bic, iban2bic
from .roles import SepaUser, SepaStaff
from lino_xl.lib.contacts.roles import ContactsUser
class Account(dd.Model):
class Meta:
app_label = 'sepa'
abstract = dd.is_abstract_model(__name__, 'Account')
verbose_name = _("Bank account")
verbose_name_plural = _("Bank accounts")
partner = dd.ForeignKey(
'contacts.Partner',
related_name='sepa_accounts', null=True, blank=True)
iban = IBANField(verbose_name=_("IBAN"))
bic = BICField(verbose_name=_("BIC"), blank=True)
remark = models.CharField(_("Remark"), max_length=200, blank=True)
primary = models.BooleanField(
_("Primary"),
default=False,
help_text=_(
"Enabling this field will automatically disable any "
"previous primary account and update "
"the partner's IBAN and BIC"))
allow_cascaded_delete = ['partner']
def __str__(self):
return IBAN_FORMFIELD.prepare_value(self.iban)
# if self.remark:
# return "{0} ({1})".format(self.iban, self.remark)
# return self.iban
def full_clean(self):
if self.iban and not self.bic:
if self.iban[0].isdigit():
iban, bic = belgian_nban_to_iban_bic(self.iban)
self.bic = bic
self.iban = iban
else:
self.bic = iban2bic(self.iban) or ''
super(Account, self).full_clean()
def after_ui_save(self, ar, cw):
super(Account, self).after_ui_save(ar, cw)
if self.primary:
mi = self.partner
for o in mi.sepa_accounts.exclude(id=self.id):
if o.primary:
o.primary = False
o.save()
ar.set_response(refresh_all=True)
watcher = ChangeWatcher(mi)
for k in PRIMARY_FIELDS:
setattr(mi, k, getattr(self, k))
mi.save()
watcher.send_update(ar)
@dd.displayfield(_("Statements"))
def statements(self, ar):
if ar is None or not dd.is_installed('b2c'):
return ''
Account = rt.models.b2c.Account
try:
b2c = Account.objects.get(iban=self.iban)
except Account.DoesNotExist:
return ''
return ar.obj2html(b2c, fds(b2c.last_transaction))
PRIMARY_FIELDS = dd.fields_list(Account, 'iban bic')
class Accounts(dd.Table):
required_roles = dd.login_required(SepaStaff)
model = 'sepa.Account'
class AccountsByPartner(Accounts):
required_roles = dd.login_required((ContactsUser, SepaUser))
master_key = 'partner'
column_names = 'iban bic remark primary *'
order_by = ['iban']
stay_in_grid = True
auto_fit_column_widths = True
insert_layout = """
iban bic
remark
"""
dd.inject_field(
'ledger.Journal',
'sepa_account',
dd.ForeignKey('sepa.Account', blank=True, null=True))
|
lino-framework/xl
|
lino_xl/lib/sepa/models.py
|
Python
|
bsd-2-clause
| 3,391
|
from se34euca.lib.EucaUITestLib_Base import *
class EucaUITestLib_IP_Address(EucaUITestLib_Base):
def test_ui_allocate_ip_address(self, ip_count):
print
print "Started Test: Allocate IP Address: IP_COUNT " + str(ip_count)
print
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
print
print "Test: Go to the Page IP Address"
self.click_element_by_id("dashboard-netsec-eip")
time.sleep(3)
self.click_element_by_id("table-eips-new")
self.verify_element_by_id("eip-allocate-count")
print
print "Test: Allocate IP Address"
self.set_keys_by_id("eip-allocate-count", str(ip_count))
self.click_element_by_id("eip-allocate-btn")
print
print "Finished: Allocate IP Addresses"
print
return 0
def test_ui_check_ip_address_count(self, ip_count):
print
print "Started Test: Check IP Address Count"
print
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
time.sleep(3)
print "Verifying that IP Address Count on Dashboard is " + ip_count
self.verify_text_displayed_by_css("#dashboard-netsec-eip > span", ip_count)
print
print "Finished Test: IP Address Count"
print
return 0
def test_ui_release_ip_address_all(self):
print
print "Started Test: Release IP Address"
print
self.click_element_by_link_text("Dashboard")
self.verify_element_by_link_text("Launch new instance")
print
print "Test: Go to the Page IP Address"
self.click_element_by_css_selector("#dashboard-netsec-eip > span")
time.sleep(3)
self.click_element_by_id("eips-check-all")
self.click_element_by_id("more-actions-eips")
self.click_element_by_link_text("Release to cloud")
self.click_element_by_id("btn-eips-release-release")
print
print "Finished: Release IP Address"
print
return 0
def test_ui_get_available_ip_address(self):
'''
Returns an available IP address at random
'''
print
print "Started Test: Get Available IP Address"
print
self.click_element_by_link_text("Dashboard")
self.click_element_by_link_text("Network & Security")
self.click_element_by_link_text("IP Addresses")
time.sleep(3)
self.click_element_by_css_selector("div.VS-search-inner")
self.click_element_by_link_text("Assignment")
self.click_element_by_link_text("Unassigned")
time.sleep(3)
available_ip = self.get_text_by_xpath("//table[@id='eips']/tbody/tr/td[2]")
print
print "Finished Test: Get Available IP Address. Returning IP: " + available_ip
print
return available_ip
if __name__ == "__main__":
unittest.main()
|
eucalyptus/se34euca
|
se34euca/lib/EucaUITestLib_IP_Address.py
|
Python
|
bsd-2-clause
| 3,011
|
import pytest
from trough.wsgi.segment_manager import server
import ujson
import trough
from trough.settings import settings
import doublethink
import rethinkdb as r
import requests # :-\ urllib3?
import hdfs3
import time
import tempfile
import os
import sqlite3
import logging
import socket
trough.settings.configure_logging()
@pytest.fixture(scope="module")
def segment_manager_server():
server.testing = True
return server.test_client()
def test_simple_provision(segment_manager_server):
result = segment_manager_server.get('/')
assert result.status == '405 METHOD NOT ALLOWED'
# hasn't been provisioned yet
result = segment_manager_server.post('/', data='test_simple_provision_segment')
assert result.status_code == 200
assert result.mimetype == 'text/plain'
assert b''.join(result.response).endswith(b':6222/?segment=test_simple_provision_segment')
# now it has already been provisioned
result = segment_manager_server.post('/', data='test_simple_provision_segment')
assert result.status_code == 200
assert result.mimetype == 'text/plain'
assert b''.join(result.response).endswith(b':6222/?segment=test_simple_provision_segment')
def test_provision(segment_manager_server):
result = segment_manager_server.get('/provision')
assert result.status == '405 METHOD NOT ALLOWED'
# hasn't been provisioned yet
result = segment_manager_server.post(
'/provision', content_type='application/json',
data=ujson.dumps({'segment':'test_provision_segment'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes) # ujson accepts bytes! 😻
assert result_dict['write_url'].endswith(':6222/?segment=test_provision_segment')
# now it has already been provisioned
result = segment_manager_server.post(
'/provision', content_type='application/json',
data=ujson.dumps({'segment':'test_provision_segment'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict['write_url'].endswith(':6222/?segment=test_provision_segment')
def test_provision_with_schema(segment_manager_server):
schema = '''CREATE TABLE test (id INTEGER PRIMARY KEY AUTOINCREMENT, test varchar(4));
INSERT INTO test (test) VALUES ("test");'''
# create a schema by submitting sql
result = segment_manager_server.put(
'/schema/test1/sql', content_type='applicaton/sql', data=schema)
assert result.status_code == 201
# provision a segment with that schema
result = segment_manager_server.post(
'/provision', content_type='application/json',
data=ujson.dumps({'segment':'test_provision_with_schema_1', 'schema':'test1'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes) # ujson accepts bytes! 😻
assert result_dict['write_url'].endswith(':6222/?segment=test_provision_with_schema_1')
# get db read url from rethinkdb
rethinker = doublethink.Rethinker(
servers=settings['RETHINKDB_HOSTS'], db='trough_configuration')
query = rethinker.table('services').get_all('test_provision_with_schema_1', index='segment').filter({'role': 'trough-read'}).filter(lambda svc: r.now().sub(svc['last_heartbeat']).lt(svc['ttl'])).order_by('load')[0]
healthy_segment = query.run()
read_url = healthy_segment.get('url')
assert read_url.endswith(':6444/?segment=test_provision_with_schema_1')
# run a query to check that the schema was used
sql = 'SELECT * FROM test;'
with requests.post(read_url, stream=True, data=sql) as response:
assert response.status_code == 200
result = ujson.loads(response.text)
assert result == [{'test': 'test', 'id': 1}]
# delete the schema from rethinkdb for the sake of other tests
rethinker = doublethink.Rethinker(
servers=settings['RETHINKDB_HOSTS'], db='trough_configuration')
result = rethinker.table('schema').get('test1').delete().run()
assert result == {'deleted': 1, 'inserted': 0, 'skipped': 0, 'errors': 0, 'unchanged': 0, 'replaced': 0}
def test_schemas(segment_manager_server):
# initial list of schemas
result = segment_manager_server.get('/schema')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_list = ujson.loads(result_bytes)
assert set(result_list) == {'default'}
# existent schema as json
result = segment_manager_server.get('/schema/default')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'id': 'default', 'sql': ''}
# existent schema sql
result = segment_manager_server.get('/schema/default/sql')
assert result.status_code == 200
assert result.mimetype == 'application/sql'
result_bytes = b''.join(result.response)
assert result_bytes == b''
# schema doesn't exist yet
result = segment_manager_server.get('/schema/schema1')
assert result.status_code == 404
# schema doesn't exist yet
result = segment_manager_server.get('/schema/schema1/sql')
assert result.status_code == 404
# bad request: POST not accepted (must be PUT)
result = segment_manager_server.post('/schema/schema1', data='{}')
assert result.status_code == 405
result = segment_manager_server.post('/schema/schema1/sql', data='')
assert result.status_code == 405
# bad request: invalid json
result = segment_manager_server.put(
'/schema/schema1', data=']]}what the not valid json' )
assert result.status_code == 400
assert b''.join(result.response) == b'input could not be parsed as json'
# bad request: id in json does not match url
result = segment_manager_server.put(
'/schema/schema1', data=ujson.dumps({'id': 'schema2', 'sql': ''}))
assert result.status_code == 400
assert b''.join(result.response) == b"id in json 'schema2' does not match id in url 'schema1'"
# bad request: missing sql
result = segment_manager_server.put(
'/schema/schema1', data=ujson.dumps({'id': 'schema1'}))
assert result.status_code == 400
assert b''.join(result.response) == b"input json has keys {'id'} (should be {'id', 'sql'})"
# bad request: missing id
result = segment_manager_server.put(
'/schema/schema1', data=ujson.dumps({'sql': ''}))
assert result.status_code == 400
assert b''.join(result.response) == b"input json has keys {'sql'} (should be {'id', 'sql'})"
# bad request: invalid sql
result = segment_manager_server.put(
'/schema/schema1', data=ujson.dumps({'id': 'schema1', 'sql': 'create create table table blah blooofdjaio'}))
assert result.status_code == 400
assert b''.join(result.response) == b'schema sql failed validation: near "create": syntax error'
# create new schema by submitting sql
result = segment_manager_server.put(
'/schema/schema1/sql', content_type='applicaton/sql',
data='create table foo (bar varchar(100));')
assert result.status_code == 201
# get the new schema as json
result = segment_manager_server.get('/schema/schema1')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'id': 'schema1', 'sql': 'create table foo (bar varchar(100));'}
# get the new schema as sql
result = segment_manager_server.get('/schema/schema1/sql')
assert result.status_code == 200
assert result.mimetype == 'application/sql'
result_bytes = b''.join(result.response)
assert result_bytes == b'create table foo (bar varchar(100));'
# create new schema by submitting json
result = segment_manager_server.put(
'/schema/schema2', content_type='applicaton/sql',
data=ujson.dumps({'id': 'schema2', 'sql': 'create table schema2_table (foo varchar(100));'}))
assert result.status_code == 201
# get the new schema as json
result = segment_manager_server.get('/schema/schema2')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'id': 'schema2', 'sql': 'create table schema2_table (foo varchar(100));'}
# get the new schema as sql
result = segment_manager_server.get('/schema/schema2/sql')
assert result.status_code == 200
assert result.mimetype == 'application/sql'
result_bytes = b''.join(result.response)
assert result_bytes == b'create table schema2_table (foo varchar(100));'
# updated list of schemas
result = segment_manager_server.get('/schema')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_list = ujson.loads(result_bytes)
assert set(result_list) == {'default', 'schema1', 'schema2'}
# overwrite schema1 with json api
result = segment_manager_server.put(
'/schema/schema1', content_type='applicaton/json',
data=ujson.dumps({'id': 'schema1', 'sql': 'create table blah (toot varchar(100));'}))
assert result.status_code == 204
# get the modified schema as sql
result = segment_manager_server.get('/schema/schema1/sql')
assert result.status_code == 200
assert result.mimetype == 'application/sql'
result_bytes = b''.join(result.response)
assert result_bytes == b'create table blah (toot varchar(100));'
# overwrite schema1 with sql api
result = segment_manager_server.put(
'/schema/schema1/sql', content_type='applicaton/sql',
data='create table haha (hehehe varchar(100));')
assert result.status_code == 204
# get the modified schema as json
result = segment_manager_server.get('/schema/schema1')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'id': 'schema1', 'sql': 'create table haha (hehehe varchar(100));'}
# updated list of schemas
result = segment_manager_server.get('/schema')
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_list = ujson.loads(result_bytes)
assert set(result_list) == {'default', 'schema1', 'schema2'}
# XXX DELETE?
def test_promotion(segment_manager_server):
hdfs = hdfs3.HDFileSystem(settings['HDFS_HOST'], settings['HDFS_PORT'])
hdfs.rm(settings['HDFS_PATH'])
hdfs.mkdir(settings['HDFS_PATH'])
result = segment_manager_server.get('/promote')
assert result.status == '405 METHOD NOT ALLOWED'
# provision a test segment for write
result = segment_manager_server.post(
'/provision', content_type='application/json',
data=ujson.dumps({'segment':'test_promotion'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict['write_url'].endswith(':6222/?segment=test_promotion')
write_url = result_dict['write_url']
# write something into the db
sql = ('create table foo (bar varchar(100));\n'
'insert into foo (bar) values ("testing segment promotion");\n')
response = requests.post(write_url, sql)
assert response.status_code == 200
# shouldn't be anything in hdfs yet...
expected_remote_path = os.path.join(
settings['HDFS_PATH'], 'test_promot', 'test_promotion.sqlite')
with pytest.raises(FileNotFoundError):
hdfs.ls(expected_remote_path, detail=True)
# now write to the segment and promote it to HDFS
before = time.time()
time.sleep(1.5)
result = segment_manager_server.post(
'/promote', content_type='application/json',
data=ujson.dumps({'segment': 'test_promotion'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'remote_path': expected_remote_path}
# make sure it doesn't think the segment is under promotion
rethinker = doublethink.Rethinker(
servers=settings['RETHINKDB_HOSTS'], db='trough_configuration')
query = rethinker.table('lock').get('write:lock:test_promotion')
result = query.run()
assert not result.get('under_promotion')
# let's see if it's hdfs
listing_after_promotion = hdfs.ls(expected_remote_path, detail=True)
assert len(listing_after_promotion) == 1
assert listing_after_promotion[0]['last_mod'] > before
# grab the file from hdfs and check the content
# n.b. copy created by sqlitebck may have different size, sha1 etc from orig
size = None
with tempfile.TemporaryDirectory() as tmpdir:
local_copy = os.path.join(tmpdir, 'test_promotion.sqlite')
hdfs.get(expected_remote_path, local_copy)
conn = sqlite3.connect(local_copy)
cur = conn.execute('select * from foo')
assert cur.fetchall() == [('testing segment promotion',)]
conn.close()
size = os.path.getsize(local_copy)
# test promotion when there is an assignment in rethinkdb
rethinker.table('assignment').insert({
'assigned_on': doublethink.utcnow(),
'bytes': size,
'hash_ring': 0 ,
'id': 'localhost:test_promotion',
'node': 'localhost',
'remote_path': expected_remote_path,
'segment': 'test_promotion'}).run()
# promote it to HDFS
before = time.time()
time.sleep(1.5)
result = segment_manager_server.post(
'/promote', content_type='application/json',
data=ujson.dumps({'segment': 'test_promotion'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'remote_path': expected_remote_path}
# make sure it doesn't think the segment is under promotion
rethinker = doublethink.Rethinker(
servers=settings['RETHINKDB_HOSTS'], db='trough_configuration')
query = rethinker.table('lock').get('write:lock:test_promotion')
result = query.run()
assert not result.get('under_promotion')
# let's see if it's hdfs
listing_after_promotion = hdfs.ls(expected_remote_path, detail=True)
assert len(listing_after_promotion) == 1
assert listing_after_promotion[0]['last_mod'] > before
# pretend the segment is under promotion
rethinker.table('lock')\
.get('write:lock:test_promotion')\
.update({'under_promotion': True}).run()
assert rethinker.table('lock')\
.get('write:lock:test_promotion').run()\
.get('under_promotion')
with pytest.raises(Exception):
result = segment_manager_server.post(
'/promote', content_type='application/json',
data=ujson.dumps({'segment': 'test_promotion'}))
def test_delete_segment(segment_manager_server):
hdfs = hdfs3.HDFileSystem(settings['HDFS_HOST'], settings['HDFS_PORT'])
rethinker = doublethink.Rethinker(
servers=settings['RETHINKDB_HOSTS'], db='trough_configuration')
# initially, segment doesn't exist
result = segment_manager_server.delete('/segment/test_delete_segment')
assert result.status_code == 404
# provision segment
result = segment_manager_server.post(
'/provision', content_type='application/json',
data=ujson.dumps({'segment':'test_delete_segment'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict['write_url'].endswith(':6222/?segment=test_delete_segment')
write_url = result_dict['write_url']
# write something into the db
sql = ('create table foo (bar varchar(100));\n'
'insert into foo (bar) values ("testing segment deletion");\n')
response = requests.post(write_url, sql)
assert response.status_code == 200
# check that local file exists
local_path = os.path.join(
settings['LOCAL_DATA'], 'test_delete_segment.sqlite')
assert os.path.exists(local_path)
# check that attempted delete while under write returns 400
result = segment_manager_server.delete('/segment/test_delete_segment')
assert result.status_code == 400
# shouldn't be anything in hdfs yet
expected_remote_path = os.path.join(
settings['HDFS_PATH'], 'test_delete_segm',
'test_delete_segment.sqlite')
with pytest.raises(FileNotFoundError):
hdfs.ls(expected_remote_path, detail=True)
# promote segment to hdfs
result = segment_manager_server.post(
'/promote', content_type='application/json',
data=ujson.dumps({'segment': 'test_delete_segment'}))
assert result.status_code == 200
assert result.mimetype == 'application/json'
result_bytes = b''.join(result.response)
result_dict = ujson.loads(result_bytes)
assert result_dict == {'remote_path': expected_remote_path}
# let's see if it's hdfs
hdfs_ls = hdfs.ls(expected_remote_path, detail=True)
assert len(hdfs_ls) == 1
# add an assignment (so we can check it is deleted successfully)
rethinker.table('assignment').insert({
'assigned_on': doublethink.utcnow(),
'bytes': os.path.getsize(local_path),
'hash_ring': 0 ,
'id': '%s:test_delete_segment' % socket.gethostname(),
'node': socket.gethostname(),
'remote_path': expected_remote_path,
'segment': 'test_delete_segment'}).run()
# check that service entries, assignment exist
assert rethinker.table('services')\
.get('trough-read:%s:test_delete_segment' % socket.gethostname())\
.run()
assert rethinker.table('services')\
.get('trough-write:%s:test_delete_segment' % socket.gethostname())\
.run()
assert rethinker.table('assignment')\
.get('%s:test_delete_segment' % socket.gethostname()).run()
# check that attempted delete while under write returns 400
result = segment_manager_server.delete('/segment/test_delete_segment')
assert result.status_code == 400
# delete the write lock
assert rethinker.table('lock')\
.get('write:lock:test_delete_segment').delete().run() == {
'deleted': 1, 'errors': 0, 'inserted': 0,
'replaced': 0 , 'skipped': 0 , 'unchanged': 0, }
# delete the segment
result = segment_manager_server.delete('/segment/test_delete_segment')
assert result.status_code == 204
# check that service entries and assignment are gone
assert not rethinker.table('services')\
.get('trough-read:%s:test_delete_segment' % socket.gethostname())\
.run()
assert not rethinker.table('services')\
.get('trough-write:%s:test_delete_segment' % socket.gethostname())\
.run()
assert not rethinker.table('assignment')\
.get('%s:test_delete_segment' % socket.gethostname()).run()
# check that local file is gone
assert not os.path.exists(local_path)
# check that file is gone from hdfs
with pytest.raises(FileNotFoundError):
hdfs_ls = hdfs.ls(expected_remote_path, detail=True)
|
jkafader/trough
|
tests/wsgi/test_segment_manager.py
|
Python
|
bsd-2-clause
| 20,159
|
import os
import pwd
import sys
import time
import json
import shutil
import random
import subprocess
import numpy as np
from .generic_hasher import GenericHasher
from ..memex_tools.image_dl import mkpath
from ..memex_tools.binary_file import read_binary_file
# should me move the _hasher_obj_py.so?
#from ..hashing_new.python import _hasher_obj_py
import _hasher_obj_py as hop
class HasherSwig(GenericHasher):
def __init__(self,global_conf_filename):
self.global_conf = json.load(open(global_conf_filename,'rt'))
self.base_update_path = os.path.dirname(__file__)
self.base_model_path = os.path.join(os.path.dirname(__file__),'../../data/')
if 'LI_base_update_path' in self.global_conf:
self.base_update_path = self.global_conf['LI_base_update_path']
if 'HA_base_update_path' in self.global_conf:
self.base_update_path = self.global_conf['HA_base_update_path']
if 'HA_path' in self.global_conf:
self.hashing_execpath = os.path.join(os.path.dirname(__file__),self.global_conf['HA_path'])
else:
self.hashing_execpath = os.path.join(os.path.dirname(__file__),'../hashing/')
if 'HA_exec' in self.global_conf:
self.hashing_execfile = self.global_conf['HA_exec']
else:
self.hashing_execfile = 'hashing'
self.features_dim = self.global_conf['FE_features_dim']
self.bits_num = self.global_conf['HA_bits_num']
self.hashing_outpath = os.path.join(self.base_update_path,'hash_bits/')
mkpath(self.hashing_outpath)
# need to be able to set/get master_update file in HasherObjectPy too.
self.master_update_file = "update_list_dev.txt"
if 'HA_master_update_file' in self.global_conf:
print("Setting HA_master_update_file is not yet supported for HasherSwig")
sys.exit(-1)
self.master_update_file = self.global_conf['HA_master_update_file']
self.hasher = hop.new_HasherObjectPy()
hop.HasherObjectPy_set_feature_dim(self.hasher, self.features_dim)
hop.HasherObjectPy_set_bit_num(self.hasher, self.bits_num)
hop.HasherObjectPy_set_base_updatepath(self.hasher, str(self.base_update_path))
#hop.HasherObjectPy_set_base_modelpath(self.hasher, "/home/ubuntu/memex/data/")
# Model files still need to be in self.hashing_execfile for updates...
hop.HasherObjectPy_set_base_modelpath(self.hasher, str(self.base_model_path))
self.init_hasher()
def __del__(self):
# clean exit deleting SWIG object
hop.delete_HasherObjectPy(self.hasher)
def init_hasher(self):
status = hop.HasherObjectPy_initialize(self.hasher)
if status != 0:
print("Hasher was not able to initialize")
sys.exit(-1)
def compute_hashcodes(self,features_filename,ins_num,startid):
""" Compute ITQ hashcodes for the features in 'features_filename'
:param features_filename: filepath for the binary file containing the features
:type features_filename: string
:param ins_num: number of features in 'features_filename'
:type ins_num: integer
:returns hashbits_filepath: filepath for the binary file containing the hashcodes
"""
feature_filepath = features_filename[:-4]+'_norm'
# we could be passing additional arguments here
command = self.hashing_execpath+'hashing_update '+features_filename+' '+str(ins_num)+' '+self.hashing_execpath
proc = subprocess.Popen(command.split(' '), stdout=subprocess.PIPE)
print "[HasherSwig.compute_hashcodes: log] running command: {}".format(command)
sys.stdout.flush()
(out, err) = proc.communicate()
print "[HasherSwig.compute_hashcodes: log] program output:", out
print "[HasherSwig.compute_hashcodes: log] program error:", err
sys.stdout.flush()
#print command
#os.system(command)
hashbits_filepath = os.path.join(self.hashing_outpath,str(startid)+'_itq_norm_'+str(self.bits_num))
itq_output_path = features_filename[:-4] + '_itq_norm_'+str(self.bits_num)
print "[HasherSwig.compute_hashcodes: log] Moving {} to {}.".format(itq_output_path,hashbits_filepath)
shutil.move(itq_output_path, hashbits_filepath)
os.remove(features_filename)
return hashbits_filepath
def get_max_feat_id(self):
""" Returns number of images indexed based on the size of hashcodes files.
"""
total_nb = 0
try:
with open(os.path.join(self.base_update_path,self.master_update_file),'rt') as master_file:
# sum up sizes of files in master_file
for line in master_file:
statinfo = os.stat(os.path.join(self.hashing_outpath,line.strip()+'_itq_norm_'+str(self.bits_num)))
total_nb += statinfo.st_size*8/self.bits_num
except Exception as inst:
print "[HasherSwig.get_max_feat_id: error] {}".format(inst)
return total_nb
def compress_feats(self):
""" Compress the features with zlib.
"""
mkpath(os.path.join(self.base_update_path,'comp_features'))
mkpath(os.path.join(self.base_update_path,'comp_idx'))
args = [self.base_update_path+'/', str(self.features_dim), '1', self.master_update_file, str(self.bits_num)]
subprocess_command = [self.hashing_execpath+"compress_feats"] + args
# this will work only if features to be compressed are present in self.base_update_path/features
proc = subprocess.Popen(subprocess_command, stdout=subprocess.PIPE)
print "[HasherSwig.compress_feats: log] running command: {}".format(subprocess_command)
(out, err) = proc.communicate()
print "[HasherSwig.compress_feats: log] program output:", out
print "[HasherSwig.compress_feats: log] program error:", err
def get_precomp_X(self,list_feats_id,str_precomp,read_dim,read_type):
import struct
query_time = time.time()
# save queries id in binary file
query_precomp_fn = "{}_query_{}_p{}_{}".format(str_precomp, query_time, os.getpid(), random.random())
X_fn = "{}_{}".format(str_precomp,query_time)
with open(query_precomp_fn,"wb") as f_prein:
for feat_id in list_feats_id:
f_prein.write(struct.pack('i',feat_id))
# query for features
command = self.hashing_execpath+"get_precomp_{} {} {} {}".format(str_precomp,query_precomp_fn,X_fn,self.base_update_path)
print("[HasherSwig.get_precomp_X: log] running command: {}".format(command))
sys.stdout.flush()
os.system(command)
# read features/hashcodes
X, ok_ids = read_binary_file(X_fn,str_precomp,list_feats_id,read_dim,read_type)
#print X,X[0].shape
# cleanup
os.remove(query_precomp_fn)
os.remove(X_fn)
return X,ok_ids
def get_precomp_feats(self,list_feats_id):
""" Get precomputed features from 'list_feats_id'
"""
return self.get_precomp_X(list_feats_id,"feats",self.features_dim*4,np.float32)
def get_precomp_hashcodes(self,list_feats_id):
""" Get precomputed hashcodes from 'list_feats_id'
"""
return self.get_precomp_X(list_feats_id,"hashcodes",self.bits_num/8,np.uint8)
def get_similar_images_from_featuresfile(self, featurefilename, ratio, near_dup_th=-1.0):
""" Get similar images of the images with features in 'featurefilename'.
:param featurefilename: features of the query images.
:type featurefilename: string
:param ratio: ratio of images retrieved with hashing that will be reranked.
:type ratio: float
:param near_dup_th: near dup threshold, if positive, only images below this distance value will be returned.
:type near_dup_th: float
:returns simname: filename of the simname text file.
"""
hop.HasherObjectPy_set_ratio(self.hasher, ratio)
# needed?
sys.stdout = sys.stderr
print "[HasherSwig.get_similar_images: log] preparing search for {}".format(featurefilename)
hop.HasherObjectPy_set_near_dup_th(self.hasher, near_dup_th)
hop.HasherObjectPy_set_query_feats_from_disk(self.hasher, featurefilename)
hop.HasherObjectPy_set_outputfile(self.hasher, featurefilename[:-4])
hop.HasherObjectPy_find_knn(self.hasher)
initname = featurefilename[:-4] + '-sim.txt'
simname = featurefilename[:-4] + '-sim_'+str(ratio)+'.txt'
print "[HasherSwig.get_similar_images: log] try to rename {} to {}".format(initname,simname)
# this would raise an error if results have not been computed
os.rename(initname,simname)
return simname
def get_similar_images_from_featuresfile_nodiskout(self, featurefilename, ratio, demote=False):
""" Get similar images of the images with features in 'featurefilename'.
:param featurefilename: features of the query images.
:type featurefilename: string
:param ratio: ratio of images retrieved with hashing that will be reranked.
:type ratio: float
:returns simlist: list of nearest neighbors of each query
"""
hop.HasherObjectPy_set_ratio(self.hasher, ratio)
# needed?
sys.stdout = sys.stderr
hop.HasherObjectPy_set_query_feats_from_disk(self.hasher, featurefilename)
hop.HasherObjectPy_set_outputfile(self.hasher, featurefilename[:-4])
out_res = hop.HasherObjectPy_find_knn_nodiskout(self.hasher)
print "[HasherSwig.get_similar_images_from_featuresfile_nodiskout: log] out_res: {}".format(out_res)
return out_res
|
svebk/DeepSentiBank_memex
|
cu_image_search/hasher/hasher_swig.py
|
Python
|
bsd-2-clause
| 9,815
|
"""
Make this module itself executable as an alias for invoke.
"""
import sys
import subprocess
cmd = ["invoke"]
if len(sys.argv) == 1:
cmd.append("help")
else:
cmd.extend(sys.argv[1:])
subprocess.check_call(cmd)
|
imageio/imageio
|
tasks/__main__.py
|
Python
|
bsd-2-clause
| 224
|
from __future__ import division, absolute_import, print_function
import AppKit
from math import radians
import os
from drawBot.misc import DrawBotError, optimizePath
from fontTools.misc.py23 import basestring
from drawBot.context.imageContext import _makeBitmapImageRep
class ImageObject(object):
"""
An image object with support for filters.
Optional a `path` to an existing image can be provided.
For more info see: `Core Image Filter Reference`_.
.. _Core Image Filter Reference: https://developer.apple.com/library/mac/documentation/GraphicsImaging/Reference/CoreImageFilterReference/index.html
"""
def __init__(self, path=None):
self._filters = []
if path is not None:
self.open(path)
def __del__(self):
del self._filters
if hasattr(self, "_source"):
del self._source
if hasattr(self, "_cachedImage"):
del self._cachedImage
def size(self):
"""
Return the size of the image as a tuple.
"""
(x, y), (w, h) = self._ciImage().extent()
return w, h
def offset(self):
"""
Return the offset of the image, the origin point can change due to filters.
"""
(x, y), (w, h) = self._ciImage().extent()
return x, y
def clearFilters(self):
"""
Clear all filters.
"""
self._filters = []
def open(self, path):
"""
Open an image with a given `path`.
"""
if isinstance(path, AppKit.NSImage):
im = path
elif isinstance(path, basestring):
path = optimizePath(path)
if path.startswith("http"):
url = AppKit.NSURL.URLWithString_(path)
else:
if not os.path.exists(path):
raise DrawBotError("Image path '%s' does not exists." % path)
url = AppKit.NSURL.fileURLWithPath_(path)
im = AppKit.NSImage.alloc().initByReferencingURL_(url)
else:
raise DrawBotError("Cannot read image path '%s'." % path)
rep = _makeBitmapImageRep(im)
ciImage = AppKit.CIImage.imageWithData_(rep.TIFFRepresentation())
self._merge(ciImage, doCrop=True)
def copy(self):
"""
Return a copy.
"""
new = self.__class__()
new._filters = list(self._filters)
if hasattr(self, "_source"):
new._source = self._source.copy()
if hasattr(self, "_cachedImage"):
new._cachedImage = self._cachedImage.copy()
return new
def lockFocus(self):
"""
Set focus on image.
"""
from drawBot.drawBotDrawingTools import _drawBotDrawingTool
# copy/save a state of the existing drawing tool
self._originalTool = _drawBotDrawingTool._copy()
# reset the existing one
_drawBotDrawingTool._reset()
# start a new drawing
_drawBotDrawingTool.newDrawing()
# set the size of the existing image, if there is one
if hasattr(self, "_source"):
w, h = self.size()
_drawBotDrawingTool.size(w, h)
def unlockFocus(self):
"""
Set unlock focus on image.
"""
from drawBot.drawBotDrawingTools import _drawBotDrawingTool, DrawBotDrawingTool
# explicit tell the drawing is done
_drawBotDrawingTool.endDrawing()
# initiate a new drawing Tool
self.imageDrawingTool = DrawBotDrawingTool()
# reset the new drawing tool from the main drawing tool
self.imageDrawingTool._reset(_drawBotDrawingTool)
# reset the main drawing tool with a saved state of the tool
_drawBotDrawingTool._reset(self._originalTool)
# get the pdf data
data = self.imageDrawingTool.pdfImage()
# get the last page
pageCount = data.pageCount()
page = data.pageAtIndex_(pageCount-1)
# create an image
im = AppKit.NSImage.alloc().initWithData_(page.dataRepresentation())
# create an CIImage object
rep = _makeBitmapImageRep(im)
ciImage = AppKit.CIImage.imageWithData_(rep.TIFFRepresentation())
# merge it with the already set data, if there already an image
self._merge(ciImage)
def __enter__(self):
self.lockFocus()
return self
def __exit__(self, type, value, traceback):
self.unlockFocus()
def _ciImage(self):
"""
Return the CIImage object.
"""
if not hasattr(self, "_cachedImage"):
self._applyFilters()
return self._cachedImage
def _nsImage(self):
"""
Return the NSImage object.
"""
rep = AppKit.NSCIImageRep.imageRepWithCIImage_(self._ciImage())
nsImage = AppKit.NSImage.alloc().initWithSize_(rep.size())
nsImage.addRepresentation_(rep)
return nsImage
def _merge(self, ciImage, doCrop=False):
"""
Merge with an other CIImage object by using the sourceOverCompositing filter.
"""
if hasattr(self, "_source"):
imObject = self.__class__()
imObject._source = ciImage
imObject.sourceOverCompositing(backgroundImage=self)
if doCrop:
(x, y), (w, h) = self._ciImage().extent()
imObject.crop(rectangle=(x, y, w, h))
ciImage = imObject._ciImage()
if hasattr(self, "_cachedImage"):
del self._cachedImage
self._source = ciImage
def _addFilter(self, filterDict):
"""
Add an filter.
"""
self._filters.append(filterDict)
if hasattr(self, "_cachedImage"):
del self._cachedImage
def _applyFilters(self):
"""
Apply all filters on the source image.
Keep the _source image intact and store the result in a _cachedImage attribute.
"""
if hasattr(self, "_source"):
self._cachedImage = self._source.copy()
for filterDict in self._filters:
filterName = filterDict.get("name")
ciFilter = AppKit.CIFilter.filterWithName_(filterName)
ciFilter.setDefaults()
for key, value in filterDict.get("attributes", {}).items():
ciFilter.setValue_forKey_(value, key)
if filterDict.get("isGenerator", False):
w, h = filterDict["size"]
dummy = AppKit.NSImage.alloc().initWithSize_((w, h))
generator = ciFilter.valueForKey_("outputImage")
dummy.lockFocus()
ctx = AppKit.NSGraphicsContext.currentContext()
ctx.setShouldAntialias_(False)
ctx.setImageInterpolation_(AppKit.NSImageInterpolationNone)
generator.drawAtPoint_fromRect_operation_fraction_((0, 0), ((0, 0), (w, h)), AppKit.NSCompositeCopy, 1)
dummy.unlockFocus()
rep = _makeBitmapImageRep(dummy)
self._cachedImage = AppKit.CIImage.imageWithData_(rep.TIFFRepresentation())
del dummy
elif hasattr(self, "_cachedImage"):
ciFilter.setValue_forKey_(self._cachedImage, "inputImage")
self._cachedImage = ciFilter.valueForKey_("outputImage")
if not hasattr(self, "_cachedImage"):
raise DrawBotError("Image does not contain any data. Draw into the image object first or set image data from a path.")
# filters
def boxBlur(self, radius=None):
"""
Blurs an image using a box-shaped convolution kernel.
Attributes: `radius` a float.
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIBoxBlur", attributes=attr)
self._addFilter(filterDict)
def discBlur(self, radius=None):
"""
Blurs an image using a disc-shaped convolution kernel.
Attributes: `radius` a float.
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIDiscBlur", attributes=attr)
self._addFilter(filterDict)
def gaussianBlur(self, radius=None):
"""
Spreads source pixels by an amount specified by a Gaussian distribution.
Attributes: `radius` a float.
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIGaussianBlur", attributes=attr)
self._addFilter(filterDict)
def maskedVariableBlur(self, mask=None, radius=None):
"""
Blurs the source image according to the brightness levels in a mask image.
Attributes: `mask` an Image object, `radius` a float.
"""
attr = dict()
if mask:
attr["inputMask"] = mask._ciImage()
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIMaskedVariableBlur", attributes=attr)
self._addFilter(filterDict)
def motionBlur(self, radius=None, angle=None):
"""
Blurs an image to simulate the effect of using a camera that moves a specified angle and distance while capturing the image.
Attributes: `radius` a float, `angle` a float in degrees.
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
if angle:
attr["inputAngle"] = radians(angle)
filterDict = dict(name="CIMotionBlur", attributes=attr)
self._addFilter(filterDict)
def noiseReduction(self, noiseLevel=None, sharpness=None):
"""
Reduces noise using a threshold value to define what is considered noise.
Attributes: `noiseLevel` a float, `sharpness` a float.
"""
attr = dict()
if noiseLevel:
attr["inputNoiseLevel"] = noiseLevel
if sharpness:
attr["inputSharpness"] = sharpness
filterDict = dict(name="CINoiseReduction", attributes=attr)
self._addFilter(filterDict)
def zoomBlur(self, center=None, amount=None):
"""
Simulates the effect of zooming the camera while capturing the image.
Attributes: `center` a tuple (x, y), `amount` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if amount:
attr["inputAmount"] = amount
filterDict = dict(name="CIZoomBlur", attributes=attr)
self._addFilter(filterDict)
def colorClamp(self, minComponents=None, maxComponents=None):
"""
Modifies color values to keep them within a specified range.
Attributes: `minComponents` a tuple (x, y, w, h), `maxComponents` a tuple (x, y, w, h).
"""
attr = dict()
if minComponents:
attr["inputMinComponents"] = AppKit.CIVector.vectorWithValues_count_(minComponents, 4)
if maxComponents:
attr["inputMaxComponents"] = AppKit.CIVector.vectorWithValues_count_(maxComponents, 4)
filterDict = dict(name="CIColorClamp", attributes=attr)
self._addFilter(filterDict)
def colorControls(self, saturation=None, brightness=None, contrast=None):
"""
Adjusts saturation, brightness, and contrast values.
Attributes: `saturation` a float, `brightness` a float, `contrast` a float.
"""
attr = dict()
if saturation:
attr["inputSaturation"] = saturation
if brightness:
attr["inputBrightness"] = brightness
if contrast:
attr["inputContrast"] = contrast
filterDict = dict(name="CIColorControls", attributes=attr)
self._addFilter(filterDict)
def colorMatrix(self, RVector=None, GVector=None, BVector=None, AVector=None, biasVector=None):
"""
Multiplies source color values and adds a bias factor to each color component.
Attributes: `RVector` a tuple (x, y, w, h), `GVector` a tuple (x, y, w, h), `BVector` a tuple (x, y, w, h), `AVector` a tuple (x, y, w, h), `biasVector` a tuple (x, y, w, h).
"""
attr = dict()
if RVector:
attr["inputRVector"] = AppKit.CIVector.vectorWithValues_count_(RVector, 4)
if GVector:
attr["inputGVector"] = AppKit.CIVector.vectorWithValues_count_(GVector, 4)
if BVector:
attr["inputBVector"] = AppKit.CIVector.vectorWithValues_count_(BVector, 4)
if AVector:
attr["inputAVector"] = AppKit.CIVector.vectorWithValues_count_(AVector, 4)
if biasVector:
attr["inputBiasVector"] = AppKit.CIVector.vectorWithValues_count_(biasVector, 4)
filterDict = dict(name="CIColorMatrix", attributes=attr)
self._addFilter(filterDict)
def colorPolynomial(self, redCoefficients=None, greenCoefficients=None, blueCoefficients=None, alphaCoefficients=None):
"""
Modifies the pixel values in an image by applying a set of cubic polynomials.
Attributes: `redCoefficients` a tuple (x, y, w, h), `greenCoefficients` a tuple (x, y, w, h), `blueCoefficients` a tuple (x, y, w, h), `alphaCoefficients` a tuple (x, y, w, h).
"""
attr = dict()
if redCoefficients:
attr["inputRedCoefficients"] = AppKit.CIVector.vectorWithValues_count_(redCoefficients, 4)
if greenCoefficients:
attr["inputGreenCoefficients"] = AppKit.CIVector.vectorWithValues_count_(greenCoefficients, 4)
if blueCoefficients:
attr["inputBlueCoefficients"] = AppKit.CIVector.vectorWithValues_count_(blueCoefficients, 4)
if alphaCoefficients:
attr["inputAlphaCoefficients"] = AppKit.CIVector.vectorWithValues_count_(alphaCoefficients, 4)
filterDict = dict(name="CIColorPolynomial", attributes=attr)
self._addFilter(filterDict)
def exposureAdjust(self, EV=None):
"""
Adjusts the exposure setting for an image similar to the way you control exposure for a camera when you change the F-stop.
Attributes: `EV` a float.
"""
attr = dict()
if EV:
attr["inputEV"] = EV
filterDict = dict(name="CIExposureAdjust", attributes=attr)
self._addFilter(filterDict)
def gammaAdjust(self, power=None):
"""
Adjusts midtone brightness.
Attributes: `power` a float.
"""
attr = dict()
if power:
attr["inputPower"] = power
filterDict = dict(name="CIGammaAdjust", attributes=attr)
self._addFilter(filterDict)
def hueAdjust(self, angle=None):
"""
Changes the overall hue, or tint, of the source pixels.
Attributes: `angle` a float in degrees.
"""
attr = dict()
if angle:
attr["inputAngle"] = radians(angle)
filterDict = dict(name="CIHueAdjust", attributes=attr)
self._addFilter(filterDict)
def linearToSRGBToneCurve(self):
"""
Maps color intensity from a linear gamma curve to the sRGB color space.
"""
attr = dict()
filterDict = dict(name="CILinearToSRGBToneCurve", attributes=attr)
self._addFilter(filterDict)
def SRGBToneCurveToLinear(self):
"""
Maps color intensity from the sRGB color space to a linear gamma curve.
"""
attr = dict()
filterDict = dict(name="CISRGBToneCurveToLinear", attributes=attr)
self._addFilter(filterDict)
def temperatureAndTint(self, neutral=None, targetNeutral=None):
"""
Adapts the reference white point for an image.
Attributes: `neutral` a tuple, `targetNeutral` a tuple.
"""
attr = dict()
if neutral:
attr["inputNeutral"] = AppKit.CIVector.vectorWithValues_count_(neutral, 2)
if targetNeutral:
attr["inputTargetNeutral"] = AppKit.CIVector.vectorWithValues_count_(targetNeutral, 2)
filterDict = dict(name="CITemperatureAndTint", attributes=attr)
self._addFilter(filterDict)
def toneCurve(self, point0=None, point1=None, point2=None, point3=None, point4=None):
"""
Adjusts tone response of the R, G, and B channels of an image.
Attributes: `point0` a tuple (x, y), `point1` a tuple (x, y), `point2` a tuple (x, y), `point3` a tuple (x, y), `point4` a tuple (x, y).
"""
attr = dict()
if point0:
attr["inputPoint0"] = AppKit.CIVector.vectorWithValues_count_(point0, 2)
if point1:
attr["inputPoint1"] = AppKit.CIVector.vectorWithValues_count_(point1, 2)
if point2:
attr["inputPoint2"] = AppKit.CIVector.vectorWithValues_count_(point2, 2)
if point3:
attr["inputPoint3"] = AppKit.CIVector.vectorWithValues_count_(point3, 2)
if point4:
attr["inputPoint4"] = AppKit.CIVector.vectorWithValues_count_(point4, 2)
filterDict = dict(name="CIToneCurve", attributes=attr)
self._addFilter(filterDict)
def vibrance(self, amount=None):
"""
Adjusts the saturation of an image while keeping pleasing skin tones.
Attributes: `amount` a float.
"""
attr = dict()
if amount:
attr["inputAmount"] = amount
filterDict = dict(name="CIVibrance", attributes=attr)
self._addFilter(filterDict)
def whitePointAdjust(self, color=None):
"""
Adjusts the reference white point for an image and maps all colors in the source using the new reference.
Attributes: `color` RGBA tuple Color (r, g, b, a).
"""
attr = dict()
if color:
attr["inputColor"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color[0], color[1], color[2], color[3])
filterDict = dict(name="CIWhitePointAdjust", attributes=attr)
self._addFilter(filterDict)
def colorCrossPolynomial(self, redCoefficients=None, greenCoefficients=None, blueCoefficients=None):
"""
Modifies the pixel values in an image by applying a set of polynomial cross-products.
Attributes: `redCoefficients` a tuple (x, y, w, h), `greenCoefficients` a tuple (x, y, w, h), `blueCoefficients` a tuple (x, y, w, h).
"""
attr = dict()
if redCoefficients:
attr["inputRedCoefficients"] = AppKit.CIVector.vectorWithValues_count_(redCoefficients, 4)
if greenCoefficients:
attr["inputGreenCoefficients"] = AppKit.CIVector.vectorWithValues_count_(greenCoefficients, 4)
if blueCoefficients:
attr["inputBlueCoefficients"] = AppKit.CIVector.vectorWithValues_count_(blueCoefficients, 4)
filterDict = dict(name="CIColorCrossPolynomial", attributes=attr)
self._addFilter(filterDict)
def colorInvert(self):
"""
Inverts the colors in an image.
"""
attr = dict()
filterDict = dict(name="CIColorInvert", attributes=attr)
self._addFilter(filterDict)
def colorMap(self, gradientImage=None):
"""
Performs a nonlinear transformation of source color values using mapping values provided in a table.
Attributes: `gradientImage` an Image object.
"""
attr = dict()
if gradientImage:
attr["inputGradientImage"] = gradientImage._ciImage()
filterDict = dict(name="CIColorMap", attributes=attr)
self._addFilter(filterDict)
def colorMonochrome(self, color=None, intensity=None):
"""
Remaps colors so they fall within shades of a single color.
Attributes: `color` RGBA tuple Color (r, g, b, a), `intensity` a float.
"""
attr = dict()
if color:
attr["inputColor"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color[0], color[1], color[2], color[3])
if intensity:
attr["inputIntensity"] = intensity
filterDict = dict(name="CIColorMonochrome", attributes=attr)
self._addFilter(filterDict)
def colorPosterize(self, levels=None):
"""
Remaps red, green, and blue color components to the number of brightness values you specify for each color component.
Attributes: `levels` a float.
"""
attr = dict()
if levels:
attr["inputLevels"] = levels
filterDict = dict(name="CIColorPosterize", attributes=attr)
self._addFilter(filterDict)
def falseColor(self, color0=None, color1=None):
"""
Maps luminance to a color ramp of two colors.
Attributes: `color0` RGBA tuple Color (r, g, b, a), `color1` RGBA tuple Color (r, g, b, a).
"""
attr = dict()
if color0:
attr["inputColor0"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color0[0], color0[1], color0[2], color0[3])
if color1:
attr["inputColor1"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color1[0], color1[1], color1[2], color1[3])
filterDict = dict(name="CIFalseColor", attributes=attr)
self._addFilter(filterDict)
def maskToAlpha(self):
"""
Converts a grayscale image to a white image that is masked by alpha.
"""
attr = dict()
filterDict = dict(name="CIMaskToAlpha", attributes=attr)
self._addFilter(filterDict)
def maximumComponent(self):
"""
Returns a grayscale image from max(r,g,b).
"""
attr = dict()
filterDict = dict(name="CIMaximumComponent", attributes=attr)
self._addFilter(filterDict)
def minimumComponent(self):
"""
Returns a grayscale image from min(r,g,b).
"""
attr = dict()
filterDict = dict(name="CIMinimumComponent", attributes=attr)
self._addFilter(filterDict)
def photoEffectChrome(self):
"""
Applies a preconfigured set of effects that imitate vintage photography film with exaggerated color.
"""
attr = dict()
filterDict = dict(name="CIPhotoEffectChrome", attributes=attr)
self._addFilter(filterDict)
def photoEffectFade(self):
"""
Applies a preconfigured set of effects that imitate vintage photography film with diminished color.
"""
attr = dict()
filterDict = dict(name="CIPhotoEffectFade", attributes=attr)
self._addFilter(filterDict)
def photoEffectInstant(self):
"""
Applies a preconfigured set of effects that imitate vintage photography film with distorted colors.
"""
attr = dict()
filterDict = dict(name="CIPhotoEffectInstant", attributes=attr)
self._addFilter(filterDict)
def photoEffectMono(self):
"""
Applies a preconfigured set of effects that imitate black-and-white photography film with low contrast.
"""
attr = dict()
filterDict = dict(name="CIPhotoEffectMono", attributes=attr)
self._addFilter(filterDict)
def photoEffectNoir(self):
"""
Applies a preconfigured set of effects that imitate black-and-white photography film with exaggerated contrast.
"""
attr = dict()
filterDict = dict(name="CIPhotoEffectNoir", attributes=attr)
self._addFilter(filterDict)
def photoEffectProcess(self):
"""
Applies a preconfigured set of effects that imitate vintage photography film with emphasized cool colors.
"""
attr = dict()
filterDict = dict(name="CIPhotoEffectProcess", attributes=attr)
self._addFilter(filterDict)
def photoEffectTonal(self):
"""
Applies a preconfigured set of effects that imitate black-and-white photography film without significantly altering contrast.
"""
attr = dict()
filterDict = dict(name="CIPhotoEffectTonal", attributes=attr)
self._addFilter(filterDict)
def photoEffectTransfer(self):
"""
Applies a preconfigured set of effects that imitate vintage photography film with emphasized warm colors.
"""
attr = dict()
filterDict = dict(name="CIPhotoEffectTransfer", attributes=attr)
self._addFilter(filterDict)
def sepiaTone(self, intensity=None):
"""
Maps the colors of an image to various shades of brown.
Attributes: `intensity` a float.
"""
attr = dict()
if intensity:
attr["inputIntensity"] = intensity
filterDict = dict(name="CISepiaTone", attributes=attr)
self._addFilter(filterDict)
def vignette(self, radius=None, intensity=None):
"""
Reduces the brightness of an image at the periphery.
Attributes: `radius` a float, `intensity` a float.
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
if intensity:
attr["inputIntensity"] = intensity
filterDict = dict(name="CIVignette", attributes=attr)
self._addFilter(filterDict)
def vignetteEffect(self, center=None, intensity=None, radius=None):
"""
Modifies the brightness of an image around the periphery of a specified region.
Attributes: `center` a tuple (x, y), `intensity` a float, `radius` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if intensity:
attr["inputIntensity"] = intensity
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIVignetteEffect", attributes=attr)
self._addFilter(filterDict)
def additionCompositing(self, backgroundImage=None):
"""
Adds color components to achieve a brightening effect.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIAdditionCompositing", attributes=attr)
self._addFilter(filterDict)
def colorBlendMode(self, backgroundImage=None):
"""
Uses the luminance values of the background with the hue and saturation values of the source image.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIColorBlendMode", attributes=attr)
self._addFilter(filterDict)
def colorBurnBlendMode(self, backgroundImage=None):
"""
Darkens the background image samples to reflect the source image samples.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIColorBurnBlendMode", attributes=attr)
self._addFilter(filterDict)
def colorDodgeBlendMode(self, backgroundImage=None):
"""
Brightens the background image samples to reflect the source image samples.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIColorDodgeBlendMode", attributes=attr)
self._addFilter(filterDict)
def darkenBlendMode(self, backgroundImage=None):
"""
Creates composite image samples by choosing the darker samples (from either the source image or the background).
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIDarkenBlendMode", attributes=attr)
self._addFilter(filterDict)
def differenceBlendMode(self, backgroundImage=None):
"""
Subtracts either the source image sample color from the background image sample color, or the reverse, depending on which sample has the greater brightness value.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIDifferenceBlendMode", attributes=attr)
self._addFilter(filterDict)
def divideBlendMode(self, backgroundImage=None):
"""
Divides the background image sample color from the source image sample color.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIDivideBlendMode", attributes=attr)
self._addFilter(filterDict)
def exclusionBlendMode(self, backgroundImage=None):
"""
Produces an effect similar to that produced by the `differenceBlendMode` filter but with lower contrast.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIExclusionBlendMode", attributes=attr)
self._addFilter(filterDict)
def hardLightBlendMode(self, backgroundImage=None):
"""
Either multiplies or screens colors, depending on the source image sample color.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIHardLightBlendMode", attributes=attr)
self._addFilter(filterDict)
def hueBlendMode(self, backgroundImage=None):
"""
Uses the luminance and saturation values of the background image with the hue of the input image.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIHueBlendMode", attributes=attr)
self._addFilter(filterDict)
def lightenBlendMode(self, backgroundImage=None):
"""
Creates composite image samples by choosing the lighter samples (either from the source image or the background).
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CILightenBlendMode", attributes=attr)
self._addFilter(filterDict)
def linearBurnBlendMode(self, backgroundImage=None):
"""
Darkens the background image samples to reflect the source image samples while also increasing contrast.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CILinearBurnBlendMode", attributes=attr)
self._addFilter(filterDict)
def linearDodgeBlendMode(self, backgroundImage=None):
"""
Brightens the background image samples to reflect the source image samples while also increasing contrast.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CILinearDodgeBlendMode", attributes=attr)
self._addFilter(filterDict)
def luminosityBlendMode(self, backgroundImage=None):
"""
Uses the hue and saturation of the background image with the luminance of the input image.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CILuminosityBlendMode", attributes=attr)
self._addFilter(filterDict)
def maximumCompositing(self, backgroundImage=None):
"""
Computes the maximum value, by color component, of two input images and creates an output image using the maximum values.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIMaximumCompositing", attributes=attr)
self._addFilter(filterDict)
def minimumCompositing(self, backgroundImage=None):
"""
Computes the minimum value, by color component, of two input images and creates an output image using the minimum values.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIMinimumCompositing", attributes=attr)
self._addFilter(filterDict)
def multiplyBlendMode(self, backgroundImage=None):
"""
Multiplies the input image samples with the background image samples.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIMultiplyBlendMode", attributes=attr)
self._addFilter(filterDict)
def multiplyCompositing(self, backgroundImage=None):
"""
Multiplies the color component of two input images and creates an output image using the multiplied values.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIMultiplyCompositing", attributes=attr)
self._addFilter(filterDict)
def overlayBlendMode(self, backgroundImage=None):
"""
Either multiplies or screens the input image samples with the background image samples, depending on the background color.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIOverlayBlendMode", attributes=attr)
self._addFilter(filterDict)
def pinLightBlendMode(self, backgroundImage=None):
"""
Conditionally replaces background image samples with source image samples depending on the brightness of the source image samples.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIPinLightBlendMode", attributes=attr)
self._addFilter(filterDict)
def saturationBlendMode(self, backgroundImage=None):
"""
Uses the luminance and hue values of the background image with the saturation of the input image.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CISaturationBlendMode", attributes=attr)
self._addFilter(filterDict)
def screenBlendMode(self, backgroundImage=None):
"""
Multiplies the inverse of the input image samples with the inverse of the background image samples.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CIScreenBlendMode", attributes=attr)
self._addFilter(filterDict)
def softLightBlendMode(self, backgroundImage=None):
"""
Either darkens or lightens colors, depending on the input image sample color.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CISoftLightBlendMode", attributes=attr)
self._addFilter(filterDict)
def sourceAtopCompositing(self, backgroundImage=None):
"""
Places the input image over the background image, then uses the luminance of the background image to determine what to show.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CISourceAtopCompositing", attributes=attr)
self._addFilter(filterDict)
def sourceInCompositing(self, backgroundImage=None):
"""
Uses the background image to define what to leave in the input image, effectively cropping the input image.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CISourceInCompositing", attributes=attr)
self._addFilter(filterDict)
def sourceOutCompositing(self, backgroundImage=None):
"""
Uses the background image to define what to take out of the input image.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CISourceOutCompositing", attributes=attr)
self._addFilter(filterDict)
def sourceOverCompositing(self, backgroundImage=None):
"""
Places the input image over the input background image.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CISourceOverCompositing", attributes=attr)
self._addFilter(filterDict)
def subtractBlendMode(self, backgroundImage=None):
"""
Subtracts the background image sample color from the source image sample color.
Attributes: `backgroundImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
filterDict = dict(name="CISubtractBlendMode", attributes=attr)
self._addFilter(filterDict)
def bumpDistortion(self, center=None, radius=None, scale=None):
"""
Creates a bump that originates at a specified point in the image.
Attributes: `center` a tuple (x, y), `radius` a float, `scale` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if radius:
attr["inputRadius"] = radius
if scale:
attr["inputScale"] = scale
filterDict = dict(name="CIBumpDistortion", attributes=attr)
self._addFilter(filterDict)
def bumpDistortionLinear(self, center=None, radius=None, angle=None, scale=None):
"""
Creates a concave or convex distortion that originates from a line in the image.
Attributes: `center` a tuple (x, y), `radius` a float, `angle` a float in degrees, `scale` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if radius:
attr["inputRadius"] = radius
if angle:
attr["inputAngle"] = radians(angle)
if scale:
attr["inputScale"] = scale
filterDict = dict(name="CIBumpDistortionLinear", attributes=attr)
self._addFilter(filterDict)
def circleSplashDistortion(self, center=None, radius=None):
"""
Distorts the pixels starting at the circumference of a circle and emanating outward.
Attributes: `center` a tuple (x, y), `radius` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CICircleSplashDistortion", attributes=attr)
self._addFilter(filterDict)
def circularWrap(self, center=None, radius=None, angle=None):
"""
Wraps an image around a transparent circle.
Attributes: `center` a tuple (x, y), `radius` a float, `angle` a float in degrees.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if radius:
attr["inputRadius"] = radius
if angle:
attr["inputAngle"] = radians(angle)
filterDict = dict(name="CICircularWrap", attributes=attr)
self._addFilter(filterDict)
def droste(self, insetPoint0=None, insetPoint1=None, strands=None, periodicity=None, rotation=None, zoom=None):
"""
Recursively draws a portion of an image in imitation of an M. C. Escher drawing.
Attributes: `insetPoint0` a tuple (x, y), `insetPoint1` a tuple (x, y), `strands` a float, `periodicity` a float, `rotation` a float, `zoom` a float.
"""
attr = dict()
if insetPoint0:
attr["inputInsetPoint0"] = AppKit.CIVector.vectorWithValues_count_(insetPoint0, 2)
if insetPoint1:
attr["inputInsetPoint1"] = AppKit.CIVector.vectorWithValues_count_(insetPoint1, 2)
if strands:
attr["inputStrands"] = strands
if periodicity:
attr["inputPeriodicity"] = periodicity
if rotation:
attr["inputRotation"] = rotation
if zoom:
attr["inputZoom"] = zoom
filterDict = dict(name="CIDroste", attributes=attr)
self._addFilter(filterDict)
def displacementDistortion(self, displacementImage=None, scale=None):
"""
Applies the grayscale values of the second image to the first image.
Attributes: `displacementImage` an Image object, `scale` a float.
"""
attr = dict()
if displacementImage:
attr["inputDisplacementImage"] = displacementImage._ciImage()
if scale:
attr["inputScale"] = scale
filterDict = dict(name="CIDisplacementDistortion", attributes=attr)
self._addFilter(filterDict)
def glassDistortion(self, texture=None, center=None, scale=None):
"""
Distorts an image by applying a glass-like texture.
Attributes: `texture` an Image object, `center` a tuple (x, y), `scale` a float.
"""
attr = dict()
if texture:
attr["inputTexture"] = texture._ciImage()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if scale:
attr["inputScale"] = scale
filterDict = dict(name="CIGlassDistortion", attributes=attr)
self._addFilter(filterDict)
def glassLozenge(self, point0=None, point1=None, radius=None, refraction=None):
"""
Creates a lozenge-shaped lens and distorts the portion of the image over which the lens is placed.
Attributes: `point0` a tuple (x, y), `point1` a tuple (x, y), `radius` a float, `refraction` a float.
"""
attr = dict()
if point0:
attr["inputPoint0"] = AppKit.CIVector.vectorWithValues_count_(point0, 2)
if point1:
attr["inputPoint1"] = AppKit.CIVector.vectorWithValues_count_(point1, 2)
if radius:
attr["inputRadius"] = radius
if refraction:
attr["inputRefraction"] = refraction
filterDict = dict(name="CIGlassLozenge", attributes=attr)
self._addFilter(filterDict)
def holeDistortion(self, center=None, radius=None):
"""
Creates a circular area that pushes the image pixels outward, distorting those pixels closest to the circle the most.
Attributes: `center` a tuple (x, y), `radius` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIHoleDistortion", attributes=attr)
self._addFilter(filterDict)
def pinchDistortion(self, center=None, radius=None, scale=None):
"""
Creates a rectangular area that pinches source pixels inward, distorting those pixels closest to the rectangle the most.
Attributes: `center` a tuple (x, y), `radius` a float, `scale` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if radius:
attr["inputRadius"] = radius
if scale:
attr["inputScale"] = scale
filterDict = dict(name="CIPinchDistortion", attributes=attr)
self._addFilter(filterDict)
def stretchCrop(self, size=None, cropAmount=None, centerStretchAmount=None):
"""
Distorts an image by stretching and or cropping it to fit a target size.
Attributes: `size`, `cropAmount` a float, `centerStretchAmount` a float.
"""
attr = dict()
if size:
attr["inputSize"] = size
if cropAmount:
attr["inputCropAmount"] = cropAmount
if centerStretchAmount:
attr["inputCenterStretchAmount"] = centerStretchAmount
filterDict = dict(name="CIStretchCrop", attributes=attr)
self._addFilter(filterDict)
def torusLensDistortion(self, center=None, radius=None, width=None, refraction=None):
"""
Creates a torus-shaped lens and distorts the portion of the image over which the lens is placed.
Attributes: `center` a tuple (x, y), `radius` a float, `width` a float, `refraction` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if radius:
attr["inputRadius"] = radius
if width:
attr["inputWidth"] = width
if refraction:
attr["inputRefraction"] = refraction
filterDict = dict(name="CITorusLensDistortion", attributes=attr)
self._addFilter(filterDict)
def twirlDistortion(self, center=None, radius=None, angle=None):
"""
Rotates pixels around a point to give a twirling effect.
Attributes: `center` a tuple (x, y), `radius` a float, `angle` a float in degrees.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if radius:
attr["inputRadius"] = radius
if angle:
attr["inputAngle"] = radians(angle)
filterDict = dict(name="CITwirlDistortion", attributes=attr)
self._addFilter(filterDict)
def vortexDistortion(self, center=None, radius=None, angle=None):
"""
Rotates pixels around a point to simulate a vortex.
Attributes: `center` a tuple (x, y), `radius` a float, `angle` a float in degrees.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if radius:
attr["inputRadius"] = radius
if angle:
attr["inputAngle"] = angle
filterDict = dict(name="CIVortexDistortion", attributes=attr)
self._addFilter(filterDict)
def aztecCodeGenerator(self, size, message=None, correctionLevel=None, layers=None, compactStyle=None):
"""
Generates an Aztec code (two-dimensional barcode) from input data.
Attributes: `message` a string, `correctionLevel` a float, `layers` a float, `compactStyle` a bool.
"""
attr = dict()
if message:
attr["inputMessage"] = AppKit.NSData.dataWithBytes_length_(message, len(message))
if correctionLevel:
attr["inputCorrectionLevel"] = correctionLevel
if layers:
attr["inputLayers"] = layers
if compactStyle:
attr["inputCompactStyle"] = compactStyle
filterDict = dict(name="CIAztecCodeGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def QRCodeGenerator(self, size, message=None, correctionLevel=None):
"""
Generates a Quick Response code (two-dimensional barcode) from input data.
Attributes: `message` a string, `correctionLevel` a float.
"""
attr = dict()
if message:
attr["inputMessage"] = AppKit.NSData.dataWithBytes_length_(message, len(message))
if correctionLevel:
attr["inputCorrectionLevel"] = correctionLevel
filterDict = dict(name="CIQRCodeGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def code128BarcodeGenerator(self, size, message=None, quietSpace=None):
"""
Generates a Code 128 one-dimensional barcode from input data.
Attributes: `message` a string, `quietSpace` a float.
"""
attr = dict()
if message:
attr["inputMessage"] = AppKit.NSData.dataWithBytes_length_(message, len(message))
if quietSpace:
attr["inputQuietSpace"] = quietSpace
filterDict = dict(name="CICode128BarcodeGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def checkerboardGenerator(self, size, center=None, color0=None, color1=None, width=None, sharpness=None):
"""
Generates a checkerboard pattern.
Attributes: `center` a tuple (x, y), `color0` RGBA tuple Color (r, g, b, a), `color1` RGBA tuple Color (r, g, b, a), `width` a float, `sharpness` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if color0:
attr["inputColor0"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color0[0], color0[1], color0[2], color0[3])
if color1:
attr["inputColor1"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color1[0], color1[1], color1[2], color1[3])
if width:
attr["inputWidth"] = width
if sharpness:
attr["inputSharpness"] = sharpness
filterDict = dict(name="CICheckerboardGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def constantColorGenerator(self, size, color=None):
"""
Generates a solid color.
Attributes: `color` RGBA tuple Color (r, g, b, a).
"""
attr = dict()
if color:
attr["inputColor"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color[0], color[1], color[2], color[3])
filterDict = dict(name="CIConstantColorGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def lenticularHaloGenerator(self, size, center=None, color=None, haloRadius=None, haloWidth=None, haloOverlap=None, striationStrength=None, striationContrast=None, time=None):
"""
Simulates a lens flare.
Attributes: `center` a tuple (x, y), `color` RGBA tuple Color (r, g, b, a), `haloRadius` a float, `haloWidth` a float, `haloOverlap` a float, `striationStrength` a float, `striationContrast` a float, `time` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if color:
attr["inputColor"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color[0], color[1], color[2], color[3])
if haloRadius:
attr["inputHaloRadius"] = haloRadius
if haloWidth:
attr["inputHaloWidth"] = haloWidth
if haloOverlap:
attr["inputHaloOverlap"] = haloOverlap
if striationStrength:
attr["inputStriationStrength"] = striationStrength
if striationContrast:
attr["inputStriationContrast"] = striationContrast
if time:
attr["inputTime"] = time
filterDict = dict(name="CILenticularHaloGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def PDF417BarcodeGenerator(self, size, message=None, minWidth=None, maxWidth=None, minHeight=None, maxHeight=None, dataColumns=None, rows=None, preferredAspectRatio=None, compactionMode=None, compactStyle=None, correctionLevel=None, alwaysSpecifyCompaction=None):
"""
Generates a PDF417 code (two-dimensional barcode) from input data.
Attributes: `message` a string, `minWidth` a float, `maxWidth` a float, `minHeight` a float, `maxHeight` a float, `dataColumns` a float, `rows` a float, `preferredAspectRatio` a float, `compactionMode` a float, `compactStyle` a bool, `correctionLevel` a float, `alwaysSpecifyCompaction` a bool.
"""
attr = dict()
if message:
attr["inputMessage"] = AppKit.NSData.dataWithBytes_length_(message, len(message))
if minWidth:
attr["inputMinWidth"] = minWidth
if maxWidth:
attr["inputMaxWidth"] = maxWidth
if minHeight:
attr["inputMinHeight"] = minHeight
if maxHeight:
attr["inputMaxHeight"] = maxHeight
if dataColumns:
attr["inputDataColumns"] = dataColumns
if rows:
attr["inputRows"] = rows
if preferredAspectRatio:
attr["inputPreferredAspectRatio"] = preferredAspectRatio
if compactionMode:
attr["inputCompactionMode"] = compactionMode
if compactStyle:
attr["inputCompactStyle"] = compactStyle
if correctionLevel:
attr["inputCorrectionLevel"] = correctionLevel
if alwaysSpecifyCompaction:
attr["inputAlwaysSpecifyCompaction"] = alwaysSpecifyCompaction
filterDict = dict(name="CIPDF417BarcodeGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def randomGenerator(self, size):
"""
Generates an image of infinite extent whose pixel values are made up of four independent, uniformly-distributed random numbers in the 0 to 1 range.
"""
attr = dict()
filterDict = dict(name="CIRandomGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def starShineGenerator(self, size, center=None, color=None, radius=None, crossScale=None, crossAngle=None, crossOpacity=None, crossWidth=None, epsilon=None):
"""
Generates a starburst pattern that is similar to a supernova; can be used to simulate a lens flare.
Attributes: `center` a tuple (x, y), `color` RGBA tuple Color (r, g, b, a), `radius` a float, `crossScale` a float, `crossAngle` a float in degrees, `crossOpacity` a float, `crossWidth` a float, `epsilon` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if color:
attr["inputColor"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color[0], color[1], color[2], color[3])
if radius:
attr["inputRadius"] = radius
if crossScale:
attr["inputCrossScale"] = crossScale
if crossAngle:
attr["inputCrossAngle"] = radians(crossAngle)
if crossOpacity:
attr["inputCrossOpacity"] = crossOpacity
if crossWidth:
attr["inputCrossWidth"] = crossWidth
if epsilon:
attr["inputEpsilon"] = epsilon
filterDict = dict(name="CIStarShineGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def stripesGenerator(self, size, center=None, color0=None, color1=None, width=None, sharpness=None):
"""
Generates a stripe pattern.
Attributes: `center` a tuple (x, y), `color0` RGBA tuple Color (r, g, b, a), `color1` RGBA tuple Color (r, g, b, a), `width` a float, `sharpness` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if color0:
attr["inputColor0"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color0[0], color0[1], color0[2], color0[3])
if color1:
attr["inputColor1"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color1[0], color1[1], color1[2], color1[3])
if width:
attr["inputWidth"] = width
if sharpness:
attr["inputSharpness"] = sharpness
filterDict = dict(name="CIStripesGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def sunbeamsGenerator(self, size, center=None, color=None, sunRadius=None, maxStriationRadius=None, striationStrength=None, striationContrast=None, time=None):
"""
Generates a sun effect.
Attributes: `center` a tuple (x, y), `color` RGBA tuple Color (r, g, b, a), `sunRadius` a float, `maxStriationRadius` a float, `striationStrength` a float, `striationContrast` a float, `time` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if color:
attr["inputColor"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color[0], color[1], color[2], color[3])
if sunRadius:
attr["inputSunRadius"] = sunRadius
if maxStriationRadius:
attr["inputMaxStriationRadius"] = maxStriationRadius
if striationStrength:
attr["inputStriationStrength"] = striationStrength
if striationContrast:
attr["inputStriationContrast"] = striationContrast
if time:
attr["inputTime"] = time
filterDict = dict(name="CISunbeamsGenerator", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def crop(self, rectangle=None):
"""
Applies a crop to an image.
Attributes: `rectangle` a tuple (x, y, w, h).
"""
attr = dict()
if rectangle:
attr["inputRectangle"] = AppKit.CIVector.vectorWithValues_count_(rectangle, 4)
filterDict = dict(name="CICrop", attributes=attr)
self._addFilter(filterDict)
def lanczosScaleTransform(self, scale=None, aspectRatio=None):
"""
Produces a high-quality, scaled version of a source image.
Attributes: `scale` a float, `aspectRatio` a float.
"""
attr = dict()
if scale:
attr["inputScale"] = scale
if aspectRatio:
attr["inputAspectRatio"] = aspectRatio
filterDict = dict(name="CILanczosScaleTransform", attributes=attr)
self._addFilter(filterDict)
def perspectiveCorrection(self, topLeft=None, topRight=None, bottomRight=None, bottomLeft=None):
"""
Applies a perspective correction, transforming an arbitrary quadrilateral region in the source image to a rectangular output image.
Attributes: `topLeft` a tuple (x, y), `topRight` a tuple (x, y), `bottomRight` a tuple (x, y), `bottomLeft` a tuple (x, y).
"""
attr = dict()
if topLeft:
attr["inputTopLeft"] = AppKit.CIVector.vectorWithValues_count_(topLeft, 2)
if topRight:
attr["inputTopRight"] = AppKit.CIVector.vectorWithValues_count_(topRight, 2)
if bottomRight:
attr["inputBottomRight"] = AppKit.CIVector.vectorWithValues_count_(bottomRight, 2)
if bottomLeft:
attr["inputBottomLeft"] = AppKit.CIVector.vectorWithValues_count_(bottomLeft, 2)
filterDict = dict(name="CIPerspectiveCorrection", attributes=attr)
self._addFilter(filterDict)
def perspectiveTransform(self, topLeft=None, topRight=None, bottomRight=None, bottomLeft=None):
"""
Alters the geometry of an image to simulate the observer changing viewing position.
Attributes: `topLeft` a tuple (x, y), `topRight` a tuple (x, y), `bottomRight` a tuple (x, y), `bottomLeft` a tuple (x, y).
"""
attr = dict()
if topLeft:
attr["inputTopLeft"] = AppKit.CIVector.vectorWithValues_count_(topLeft, 2)
if topRight:
attr["inputTopRight"] = AppKit.CIVector.vectorWithValues_count_(topRight, 2)
if bottomRight:
attr["inputBottomRight"] = AppKit.CIVector.vectorWithValues_count_(bottomRight, 2)
if bottomLeft:
attr["inputBottomLeft"] = AppKit.CIVector.vectorWithValues_count_(bottomLeft, 2)
filterDict = dict(name="CIPerspectiveTransform", attributes=attr)
self._addFilter(filterDict)
def straightenFilter(self, angle=None):
"""
Rotates the source image by the specified angle in radians.
Attributes: `angle` a float in degrees.
"""
attr = dict()
if angle:
attr["inputAngle"] = radians(angle)
filterDict = dict(name="CIStraightenFilter", attributes=attr)
self._addFilter(filterDict)
def gaussianGradient(self, size, center=None, color0=None, color1=None, radius=None):
"""
Generates a gradient that varies from one color to another using a Gaussian distribution.
Attributes: `center` a tuple (x, y), `color0` RGBA tuple Color (r, g, b, a), `color1` RGBA tuple Color (r, g, b, a), `radius` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if color0:
attr["inputColor0"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color0[0], color0[1], color0[2], color0[3])
if color1:
attr["inputColor1"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color1[0], color1[1], color1[2], color1[3])
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIGaussianGradient", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def linearGradient(self, size, point0=None, point1=None, color0=None, color1=None):
"""
Generates a gradient that varies along a linear axis between two defined endpoints.
Attributes: `point0` a tuple (x, y), `point1` a tuple (x, y), `color0` RGBA tuple Color (r, g, b, a), `color1` RGBA tuple Color (r, g, b, a).
"""
attr = dict()
if point0:
attr["inputPoint0"] = AppKit.CIVector.vectorWithValues_count_(point0, 2)
if point1:
attr["inputPoint1"] = AppKit.CIVector.vectorWithValues_count_(point1, 2)
if color0:
attr["inputColor0"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color0[0], color0[1], color0[2], color0[3])
if color1:
attr["inputColor1"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color1[0], color1[1], color1[2], color1[3])
filterDict = dict(name="CILinearGradient", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def radialGradient(self, size, center=None, radius0=None, radius1=None, color0=None, color1=None):
"""
Generates a gradient that varies radially between two circles having the same center.
Attributes: `center` a tuple (x, y), `radius0` a float, `radius1` a float, `color0` RGBA tuple Color (r, g, b, a), `color1` RGBA tuple Color (r, g, b, a).
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if radius0:
attr["inputRadius0"] = radius0
if radius1:
attr["inputRadius1"] = radius1
if color0:
attr["inputColor0"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color0[0], color0[1], color0[2], color0[3])
if color1:
attr["inputColor1"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color1[0], color1[1], color1[2], color1[3])
filterDict = dict(name="CIRadialGradient", attributes=attr)
filterDict["size"] = size
filterDict["isGenerator"] = True
self._addFilter(filterDict)
def circularScreen(self, center=None, width=None, sharpness=None):
"""
Simulates a circular-shaped halftone screen.
Attributes: `center` a tuple (x, y), `width` a float, `sharpness` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if width:
attr["inputWidth"] = width
if sharpness:
attr["inputSharpness"] = sharpness
filterDict = dict(name="CICircularScreen", attributes=attr)
self._addFilter(filterDict)
def CMYKHalftone(self, center=None, width=None, angle=None, sharpness=None, GCR=None, UCR=None):
"""
Creates a color, halftoned rendition of the source image, using cyan, magenta, yellow, and black inks over a white page.
Attributes: `center` a tuple (x, y), `width` a float, `angle` a float in degrees, `sharpness` a float, `GCR` a float, `UCR` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if width:
attr["inputWidth"] = width
if angle:
attr["inputAngle"] = radians(angle)
if sharpness:
attr["inputSharpness"] = sharpness
if GCR:
attr["inputGCR"] = GCR
if UCR:
attr["inputUCR"] = UCR
filterDict = dict(name="CICMYKHalftone", attributes=attr)
self._addFilter(filterDict)
def dotScreen(self, center=None, angle=None, width=None, sharpness=None):
"""
Simulates the dot patterns of a halftone screen.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `width` a float, `sharpness` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
if sharpness:
attr["inputSharpness"] = sharpness
filterDict = dict(name="CIDotScreen", attributes=attr)
self._addFilter(filterDict)
def hatchedScreen(self, center=None, angle=None, width=None, sharpness=None):
"""
Simulates the hatched pattern of a halftone screen.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `width` a float, `sharpness` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
if sharpness:
attr["inputSharpness"] = sharpness
filterDict = dict(name="CIHatchedScreen", attributes=attr)
self._addFilter(filterDict)
def lineScreen(self, center=None, angle=None, width=None, sharpness=None):
"""
Simulates the line pattern of a halftone screen.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `width` a float, `sharpness` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
if sharpness:
attr["inputSharpness"] = sharpness
filterDict = dict(name="CILineScreen", attributes=attr)
self._addFilter(filterDict)
def areaAverage(self, extent=None):
"""
Returns a single-pixel image that contains the average color for the region of interest.
Attributes: `extent` a tuple (x, y, w, h).
"""
attr = dict()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
filterDict = dict(name="CIAreaAverage", attributes=attr)
self._addFilter(filterDict)
def areaHistogram(self, extent=None, count=None, scale=None):
"""
Returns a 1D image (inputCount wide by one pixel high) that contains the component-wise histogram computed for the specified rectangular area.
Attributes: `extent` a tuple (x, y, w, h), `count` a float, `scale` a float.
"""
attr = dict()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
if count:
attr["inputCount"] = count
if scale:
attr["inputScale"] = scale
filterDict = dict(name="CIAreaHistogram", attributes=attr)
self._addFilter(filterDict)
def rowAverage(self, extent=None):
"""
Returns a 1-pixel high image that contains the average color for each scan row.
Attributes: `extent` a tuple (x, y, w, h).
"""
attr = dict()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
filterDict = dict(name="CIRowAverage", attributes=attr)
self._addFilter(filterDict)
def columnAverage(self, extent=None):
"""
Returns a 1-pixel high image that contains the average color for each scan column.
Attributes: `extent` a tuple (x, y, w, h).
"""
attr = dict()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
filterDict = dict(name="CIColumnAverage", attributes=attr)
self._addFilter(filterDict)
def histogramDisplayFilter(self, height=None, highLimit=None, lowLimit=None):
"""
Generates a histogram image from the output of the `areaHistogram` filter.
Attributes: `height` a float, `highLimit` a float, `lowLimit` a float.
"""
attr = dict()
if height:
attr["inputHeight"] = height
if highLimit:
attr["inputHighLimit"] = highLimit
if lowLimit:
attr["inputLowLimit"] = lowLimit
filterDict = dict(name="CIHistogramDisplayFilter", attributes=attr)
self._addFilter(filterDict)
def areaMaximum(self, extent=None):
"""
Returns a single-pixel image that contains the maximum color components for the region of interest.
Attributes: `extent` a tuple (x, y, w, h).
"""
attr = dict()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
filterDict = dict(name="CIAreaMaximum", attributes=attr)
self._addFilter(filterDict)
def areaMinimum(self, extent=None):
"""
Returns a single-pixel image that contains the minimum color components for the region of interest.
Attributes: `extent` a tuple (x, y, w, h).
"""
attr = dict()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
filterDict = dict(name="CIAreaMinimum", attributes=attr)
self._addFilter(filterDict)
def areaMaximumAlpha(self, extent=None):
"""
Returns a single-pixel image that contains the color vector with the maximum alpha value for the region of interest.
Attributes: `extent` a tuple (x, y, w, h).
"""
attr = dict()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
filterDict = dict(name="CIAreaMaximumAlpha", attributes=attr)
self._addFilter(filterDict)
def areaMinimumAlpha(self, extent=None):
"""
Returns a single-pixel image that contains the color vector with the minimum alpha value for the region of interest.
Attributes: `extent` a tuple (x, y, w, h).
"""
attr = dict()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
filterDict = dict(name="CIAreaMinimumAlpha", attributes=attr)
self._addFilter(filterDict)
def sharpenLuminance(self, sharpness=None):
"""
Increases image detail by sharpening.
Attributes: `sharpness` a float.
"""
attr = dict()
if sharpness:
attr["inputSharpness"] = sharpness
filterDict = dict(name="CISharpenLuminance", attributes=attr)
self._addFilter(filterDict)
def unsharpMask(self, radius=None, intensity=None):
"""
Increases the contrast of the edges between pixels of different colors in an image.
Attributes: `radius` a float, `intensity` a float.
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
if intensity:
attr["inputIntensity"] = intensity
filterDict = dict(name="CIUnsharpMask", attributes=attr)
self._addFilter(filterDict)
def blendWithAlphaMask(self, backgroundImage=None, maskImage=None):
"""
Uses alpha values from a mask to interpolate between an image and the background.
Attributes: `backgroundImage` an Image object, `maskImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
if maskImage:
attr["inputMaskImage"] = maskImage._ciImage()
filterDict = dict(name="CIBlendWithAlphaMask", attributes=attr)
self._addFilter(filterDict)
def blendWithMask(self, backgroundImage=None, maskImage=None):
"""
Uses values from a grayscale mask to interpolate between an image and the background.
Attributes: `backgroundImage` an Image object, `maskImage` an Image object.
"""
attr = dict()
if backgroundImage:
attr["inputBackgroundImage"] = backgroundImage._ciImage()
if maskImage:
attr["inputMaskImage"] = maskImage._ciImage()
filterDict = dict(name="CIBlendWithMask", attributes=attr)
self._addFilter(filterDict)
def bloom(self, radius=None, intensity=None):
"""
Softens edges and applies a pleasant glow to an image.
Attributes: `radius` a float, `intensity` a float.
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
if intensity:
attr["inputIntensity"] = intensity
filterDict = dict(name="CIBloom", attributes=attr)
self._addFilter(filterDict)
def comicEffect(self):
"""
Simulates a comic book drawing by outlining edges and applying a color halftone effect.
"""
attr = dict()
filterDict = dict(name="CIComicEffect", attributes=attr)
self._addFilter(filterDict)
def convolution3X3(self, weights=None, bias=None):
"""
Modifies pixel values by performing a 3x3 matrix convolution.
Attributes: `weights` a float, `bias` a float.
"""
attr = dict()
if weights:
attr["inputWeights"] = weights
if bias:
attr["inputBias"] = bias
filterDict = dict(name="CIConvolution3X3", attributes=attr)
self._addFilter(filterDict)
def convolution5X5(self, weights=None, bias=None):
"""
Modifies pixel values by performing a 5x5 matrix convolution.
Attributes: `weights` a float, `bias` a float.
"""
attr = dict()
if weights:
attr["inputWeights"] = weights
if bias:
attr["inputBias"] = bias
filterDict = dict(name="CIConvolution5X5", attributes=attr)
self._addFilter(filterDict)
def convolution7X7(self, weights=None, bias=None):
"""
Modifies pixel values by performing a 7x7 matrix convolution.
Attributes: `weights` a float, `bias` a float.
"""
attr = dict()
if weights:
attr["inputWeights"] = weights
if bias:
attr["inputBias"] = bias
filterDict = dict(name="CIConvolution7X7", attributes=attr)
self._addFilter(filterDict)
def convolution9Horizontal(self, weights=None, bias=None):
"""
Modifies pixel values by performing a 9-element horizontal convolution.
Attributes: `weights` a float, `bias` a float.
"""
attr = dict()
if weights:
attr["inputWeights"] = weights
if bias:
attr["inputBias"] = bias
filterDict = dict(name="CIConvolution9Horizontal", attributes=attr)
self._addFilter(filterDict)
def convolution9Vertical(self, weights=None, bias=None):
"""
Modifies pixel values by performing a 9-element vertical convolution.
Attributes: `weights` a float, `bias` a float.
"""
attr = dict()
if weights:
attr["inputWeights"] = weights
if bias:
attr["inputBias"] = bias
filterDict = dict(name="CIConvolution9Vertical", attributes=attr)
self._addFilter(filterDict)
def crystallize(self, radius=None, center=None):
"""
Creates polygon-shaped color blocks by aggregating source pixel-color values.
Attributes: `radius` a float, `center` a tuple (x, y).
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
filterDict = dict(name="CICrystallize", attributes=attr)
self._addFilter(filterDict)
def depthOfField(self, point0=None, point1=None, saturation=None, unsharpMaskRadius=None, unsharpMaskIntensity=None, radius=None):
"""
Simulates a depth of field effect.
Attributes: `point0` a tuple (x, y), `point1` a tuple (x, y), `saturation` a float, `unsharpMaskRadius` a float, `unsharpMaskIntensity` a float, `radius` a float.
"""
attr = dict()
if point0:
attr["inputPoint0"] = AppKit.CIVector.vectorWithValues_count_(point0, 2)
if point1:
attr["inputPoint1"] = AppKit.CIVector.vectorWithValues_count_(point1, 2)
if saturation:
attr["inputSaturation"] = saturation
if unsharpMaskRadius:
attr["inputUnsharpMaskRadius"] = unsharpMaskRadius
if unsharpMaskIntensity:
attr["inputUnsharpMaskIntensity"] = unsharpMaskIntensity
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIDepthOfField", attributes=attr)
self._addFilter(filterDict)
def edges(self, intensity=None):
"""
Finds all edges in an image and displays them in color.
Attributes: `intensity` a float.
"""
attr = dict()
if intensity:
attr["inputIntensity"] = intensity
filterDict = dict(name="CIEdges", attributes=attr)
self._addFilter(filterDict)
def edgeWork(self, radius=None):
"""
Produces a stylized black-and-white rendition of an image that looks similar to a woodblock cutout.
Attributes: `radius` a float.
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIEdgeWork", attributes=attr)
self._addFilter(filterDict)
def gloom(self, radius=None, intensity=None):
"""
Dulls the highlights of an image.
Attributes: `radius` a float, `intensity` a float.
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
if intensity:
attr["inputIntensity"] = intensity
filterDict = dict(name="CIGloom", attributes=attr)
self._addFilter(filterDict)
def heightFieldFromMask(self, radius=None):
"""
Produces a continuous three-dimensional, loft-shaped height field from a grayscale mask.
Attributes: `radius` a float.
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIHeightFieldFromMask", attributes=attr)
self._addFilter(filterDict)
def hexagonalPixellate(self, center=None, scale=None):
"""
Maps an image to colored hexagons whose color is defined by the replaced pixels.
Attributes: `center` a tuple (x, y), `scale` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if scale:
attr["inputScale"] = scale
filterDict = dict(name="CIHexagonalPixellate", attributes=attr)
self._addFilter(filterDict)
def highlightShadowAdjust(self, highlightAmount=None, shadowAmount=None):
"""
Adjust the tonal mapping of an image while preserving spatial detail.
Attributes: `highlightAmount` a float, `shadowAmount` a float.
"""
attr = dict()
if highlightAmount:
attr["inputHighlightAmount"] = highlightAmount
if shadowAmount:
attr["inputShadowAmount"] = shadowAmount
filterDict = dict(name="CIHighlightShadowAdjust", attributes=attr)
self._addFilter(filterDict)
def lineOverlay(self, noiseLevel=None, sharpness=None, edgeIntensity=None, threshold=None, contrast=None):
"""
Creates a sketch that outlines the edges of an image in black.
Attributes: `noiseLevel` a float, `sharpness` a float, `edgeIntensity` a float, `threshold` a float, `contrast` a float.
"""
attr = dict()
if noiseLevel:
attr["inputNRNoiseLevel"] = noiseLevel
if sharpness:
attr["inputNRSharpness"] = sharpness
if edgeIntensity:
attr["inputEdgeIntensity"] = edgeIntensity
if threshold:
attr["inputThreshold"] = threshold
if contrast:
attr["inputContrast"] = contrast
filterDict = dict(name="CILineOverlay", attributes=attr)
self._addFilter(filterDict)
def pixellate(self, center=None, scale=None):
"""
Makes an image blocky by mapping the image to colored squares whose color is defined by the replaced pixels.
Attributes: `center` a tuple (x, y), `scale` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if scale:
attr["inputScale"] = scale
filterDict = dict(name="CIPixellate", attributes=attr)
self._addFilter(filterDict)
def pointillize(self, radius=None, center=None):
"""
Renders the source image in a pointillistic style.
Attributes: `radius` a float, `center` a tuple (x, y).
"""
attr = dict()
if radius:
attr["inputRadius"] = radius
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
filterDict = dict(name="CIPointillize", attributes=attr)
self._addFilter(filterDict)
def shadedMaterial(self, shadingImage=None, scale=None):
"""
Produces a shaded image from a height field.
Attributes: `shadingImage` an Image object, `scale` a float.
"""
attr = dict()
if shadingImage:
attr["inputShadingImage"] = shadingImage._ciImage()
if scale:
attr["inputScale"] = scale
filterDict = dict(name="CIShadedMaterial", attributes=attr)
self._addFilter(filterDict)
def spotColor(self, centerColor1=None, replacementColor1=None, closeness1=None, contrast1=None, centerColor2=None, replacementColor2=None, closeness2=None, contrast2=None, centerColor3=None, replacementColor3=None, closeness3=None, contrast3=None):
"""
Replaces one or more color ranges with spot colors.
Attributes: `centerColor1` RGBA tuple Color (r, g, b, a), `replacementColor1` RGBA tuple Color (r, g, b, a), `closeness1` a float, `contrast1` a float, `centerColor2` RGBA tuple Color (r, g, b, a), `replacementColor2` RGBA tuple Color (r, g, b, a), `closeness2` a float, `contrast2` a float, `centerColor3` RGBA tuple Color (r, g, b, a), `replacementColor3` RGBA tuple Color (r, g, b, a), `closeness3` a float, `contrast3` a float.
"""
attr = dict()
if centerColor1:
attr["inputCenterColor1"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(centerColor1[0], centerColor1[1], centerColor1[2], centerColor1[3])
if replacementColor1:
attr["inputReplacementColor1"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(replacementColor1[0], replacementColor1[1], replacementColor1[2], replacementColor1[3])
if closeness1:
attr["inputCloseness1"] = closeness1
if contrast1:
attr["inputContrast1"] = contrast1
if centerColor2:
attr["inputCenterColor2"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(centerColor2[0], centerColor2[1], centerColor2[2], centerColor2[3])
if replacementColor2:
attr["inputReplacementColor2"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(replacementColor2[0], replacementColor2[1], replacementColor2[2], replacementColor2[3])
if closeness2:
attr["inputCloseness2"] = closeness2
if contrast2:
attr["inputContrast2"] = contrast2
if centerColor3:
attr["inputCenterColor3"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(centerColor3[0], centerColor3[1], centerColor3[2], centerColor3[3])
if replacementColor3:
attr["inputReplacementColor3"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(replacementColor3[0], replacementColor3[1], replacementColor3[2], replacementColor3[3])
if closeness3:
attr["inputCloseness3"] = closeness3
if contrast3:
attr["inputContrast3"] = contrast3
filterDict = dict(name="CISpotColor", attributes=attr)
self._addFilter(filterDict)
def spotLight(self, lightPosition=None, lightPointsAt=None, brightness=None, concentration=None, color=None):
"""
Applies a directional spotlight effect to an image.
Attributes: `lightPosition` a tulple (x, y, z), `lightPointsAt` a tuple (x, y), `brightness` a float, `concentration` a float, `color` RGBA tuple Color (r, g, b, a).
"""
attr = dict()
if lightPosition:
attr["inputLightPosition"] = AppKit.CIVector.vectorWithValues_count_(lightPosition, 3)
if lightPointsAt:
attr["inputLightPointsAt"] = AppKit.CIVector.vectorWithValues_count_(lightPointsAt, 2)
if brightness:
attr["inputBrightness"] = brightness
if concentration:
attr["inputConcentration"] = concentration
if color:
attr["inputColor"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color[0], color[1], color[2], color[3])
filterDict = dict(name="CISpotLight", attributes=attr)
self._addFilter(filterDict)
def affineClamp(self, transform=None):
"""
Performs an affine transform on a source image and then clamps the pixels at the edge of the transformed image, extending them outwards.
Attributes: `transform`.
"""
attr = dict()
if transform:
attr["inputTransform"] = transform
filterDict = dict(name="CIAffineClamp", attributes=attr)
self._addFilter(filterDict)
def affineTile(self, transform=None):
"""
Applies an affine transform to an image and then tiles the transformed image.
Attributes: `transform`.
"""
attr = dict()
if transform:
attr["inputTransform"] = transform
filterDict = dict(name="CIAffineTile", attributes=attr)
self._addFilter(filterDict)
def eightfoldReflectedTile(self, center=None, angle=None, width=None):
"""
Produces a tiled image from a source image by applying an 8-way reflected symmetry.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CIEightfoldReflectedTile", attributes=attr)
self._addFilter(filterDict)
def fourfoldReflectedTile(self, center=None, angle=None, acuteAngle=None, width=None):
"""
Produces a tiled image from a source image by applying a 4-way reflected symmetry.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `acuteAngle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if acuteAngle:
attr["inputAcuteAngle"] = radians(acuteAngle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CIFourfoldReflectedTile", attributes=attr)
self._addFilter(filterDict)
def fourfoldRotatedTile(self, center=None, angle=None, width=None):
"""
Produces a tiled image from a source image by rotating the source image at increments of 90 degrees.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CIFourfoldRotatedTile", attributes=attr)
self._addFilter(filterDict)
def fourfoldTranslatedTile(self, center=None, angle=None, acuteAngle=None, width=None):
"""
Produces a tiled image from a source image by applying 4 translation operations.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `acuteAngle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if acuteAngle:
attr["inputAcuteAngle"] = radians(acuteAngle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CIFourfoldTranslatedTile", attributes=attr)
self._addFilter(filterDict)
def glideReflectedTile(self, center=None, angle=None, width=None):
"""
Produces a tiled image from a source image by translating and smearing the image.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CIGlideReflectedTile", attributes=attr)
self._addFilter(filterDict)
def kaleidoscope(self, count=None, center=None, angle=None):
"""
Produces a kaleidoscopic image from a source image by applying 12-way symmetry.
Attributes: `count` a float, `center` a tuple (x, y), `angle` a float in degrees.
"""
attr = dict()
if count:
attr["inputCount"] = count
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
filterDict = dict(name="CIKaleidoscope", attributes=attr)
self._addFilter(filterDict)
def opTile(self, center=None, scale=None, angle=None, width=None):
"""
Segments an image, applying any specified scaling and rotation, and then assembles the image again to give an op art appearance.
Attributes: `center` a tuple (x, y), `scale` a float, `angle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if scale:
attr["inputScale"] = scale
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CIOpTile", attributes=attr)
self._addFilter(filterDict)
def parallelogramTile(self, center=None, angle=None, acuteAngle=None, width=None):
"""
Warps an image by reflecting it in a parallelogram, and then tiles the result.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `acuteAngle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if acuteAngle:
attr["inputAcuteAngle"] = radians(acuteAngle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CIParallelogramTile", attributes=attr)
self._addFilter(filterDict)
def perspectiveTile(self, topLeft=None, topRight=None, bottomRight=None, bottomLeft=None):
"""
Applies a perspective transform to an image and then tiles the result.
Attributes: `topLeft` a tuple (x, y), `topRight` a tuple (x, y), `bottomRight` a tuple (x, y), `bottomLeft` a tuple (x, y).
"""
attr = dict()
if topLeft:
attr["inputTopLeft"] = AppKit.CIVector.vectorWithValues_count_(topLeft, 2)
if topRight:
attr["inputTopRight"] = AppKit.CIVector.vectorWithValues_count_(topRight, 2)
if bottomRight:
attr["inputBottomRight"] = AppKit.CIVector.vectorWithValues_count_(bottomRight, 2)
if bottomLeft:
attr["inputBottomLeft"] = AppKit.CIVector.vectorWithValues_count_(bottomLeft, 2)
filterDict = dict(name="CIPerspectiveTile", attributes=attr)
self._addFilter(filterDict)
def sixfoldReflectedTile(self, center=None, angle=None, width=None):
"""
Produces a tiled image from a source image by applying a 6-way reflected symmetry.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CISixfoldReflectedTile", attributes=attr)
self._addFilter(filterDict)
def sixfoldRotatedTile(self, center=None, angle=None, width=None):
"""
Produces a tiled image from a source image by rotating the source image at increments of 60 degrees.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CISixfoldRotatedTile", attributes=attr)
self._addFilter(filterDict)
def triangleTile(self, center=None, angle=None, width=None):
"""
Maps a triangular portion of image to a triangular area and then tiles the result.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CITriangleTile", attributes=attr)
self._addFilter(filterDict)
def twelvefoldReflectedTile(self, center=None, angle=None, width=None):
"""
Produces a tiled image from a source image by rotating the source image at increments of 30 degrees.
Attributes: `center` a tuple (x, y), `angle` a float in degrees, `width` a float.
"""
attr = dict()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
filterDict = dict(name="CITwelvefoldReflectedTile", attributes=attr)
self._addFilter(filterDict)
def accordionFoldTransition(self, targetImage=None, bottomHeight=None, numberOfFolds=None, foldShadowAmount=None, time=None):
"""
Transitions from one image to another of differing dimensions by unfolding and crossfading.
Attributes: `targetImage` an Image object, `bottomHeight` a float, `numberOfFolds` a float, `foldShadowAmount` a float, `time` a float.
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if bottomHeight:
attr["inputBottomHeight"] = bottomHeight
if numberOfFolds:
attr["inputNumberOfFolds"] = numberOfFolds
if foldShadowAmount:
attr["inputFoldShadowAmount"] = foldShadowAmount
if time:
attr["inputTime"] = time
filterDict = dict(name="CIAccordionFoldTransition", attributes=attr)
self._addFilter(filterDict)
def barsSwipeTransition(self, targetImage=None, angle=None, width=None, barOffset=None, time=None):
"""
Transitions from one image to another by passing a bar over the source image.
Attributes: `targetImage` an Image object, `angle` a float in degrees, `width` a float, `barOffset` a float, `time` a float.
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
if barOffset:
attr["inputBarOffset"] = barOffset
if time:
attr["inputTime"] = time
filterDict = dict(name="CIBarsSwipeTransition", attributes=attr)
self._addFilter(filterDict)
def copyMachineTransition(self, targetImage=None, extent=None, color=None, time=None, angle=None, width=None, opacity=None):
"""
Transitions from one image to another by simulating the effect of a copy machine.
Attributes: `targetImage` an Image object, `extent` a tuple (x, y, w, h), `color` RGBA tuple Color (r, g, b, a), `time` a float, `angle` a float in degrees, `width` a float, `opacity` a float.
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
if color:
attr["inputColor"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color[0], color[1], color[2], color[3])
if time:
attr["inputTime"] = time
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
if opacity:
attr["inputOpacity"] = opacity
filterDict = dict(name="CICopyMachineTransition", attributes=attr)
self._addFilter(filterDict)
def disintegrateWithMaskTransition(self, targetImage=None, maskImage=None, time=None, shadowRadius=None, shadowDensity=None, shadowOffset=None):
"""
Transitions from one image to another using the shape defined by a mask.
Attributes: `targetImage` an Image object, `maskImage` an Image object, `time` a float, `shadowRadius` a float, `shadowDensity` a float, `shadowOffset` a tuple (x, y).
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if maskImage:
attr["inputMaskImage"] = maskImage._ciImage()
if time:
attr["inputTime"] = time
if shadowRadius:
attr["inputShadowRadius"] = shadowRadius
if shadowDensity:
attr["inputShadowDensity"] = shadowDensity
if shadowOffset:
attr["inputShadowOffset"] = AppKit.CIVector.vectorWithValues_count_(shadowOffset, 2)
filterDict = dict(name="CIDisintegrateWithMaskTransition", attributes=attr)
self._addFilter(filterDict)
def dissolveTransition(self, targetImage=None, time=None):
"""
Uses a dissolve to transition from one image to another.
Attributes: `targetImage` an Image object, `time` a float.
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if time:
attr["inputTime"] = time
filterDict = dict(name="CIDissolveTransition", attributes=attr)
self._addFilter(filterDict)
def flashTransition(self, targetImage=None, center=None, extent=None, color=None, time=None, maxStriationRadius=None, striationStrength=None, striationContrast=None, fadeThreshold=None):
"""
Transitions from one image to another by creating a flash.
Attributes: `targetImage` an Image object, `center` a tuple (x, y), `extent` a tuple (x, y, w, h), `color` RGBA tuple Color (r, g, b, a), `time` a float, `maxStriationRadius` a float, `striationStrength` a float, `striationContrast` a float, `fadeThreshold` a float.
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
if color:
attr["inputColor"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color[0], color[1], color[2], color[3])
if time:
attr["inputTime"] = time
if maxStriationRadius:
attr["inputMaxStriationRadius"] = maxStriationRadius
if striationStrength:
attr["inputStriationStrength"] = striationStrength
if striationContrast:
attr["inputStriationContrast"] = striationContrast
if fadeThreshold:
attr["inputFadeThreshold"] = fadeThreshold
filterDict = dict(name="CIFlashTransition", attributes=attr)
self._addFilter(filterDict)
def modTransition(self, targetImage=None, center=None, time=None, angle=None, radius=None, compression=None):
"""
Transitions from one image to another by revealing the target image through irregularly shaped holes.
Attributes: `targetImage` an Image object, `center` a tuple (x, y), `time` a float, `angle` a float in degrees, `radius` a float, `compression` a float.
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if time:
attr["inputTime"] = time
if angle:
attr["inputAngle"] = radians(angle)
if radius:
attr["inputRadius"] = radius
if compression:
attr["inputCompression"] = compression
filterDict = dict(name="CIModTransition", attributes=attr)
self._addFilter(filterDict)
def pageCurlTransition(self, targetImage=None, backsideImage=None, shadingImage=None, extent=None, time=None, angle=None, radius=None):
"""
Transitions from one image to another by simulating a curling page, revealing the new image as the page curls.
Attributes: `targetImage` an Image object, `backsideImage` an Image object, `shadingImage` an Image object, `extent` a tuple (x, y, w, h), `time` a float, `angle` a float in degrees, `radius` a float.
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if backsideImage:
attr["inputBacksideImage"] = backsideImage._ciImage()
if shadingImage:
attr["inputShadingImage"] = shadingImage._ciImage()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
if time:
attr["inputTime"] = time
if angle:
attr["inputAngle"] = radians(angle)
if radius:
attr["inputRadius"] = radius
filterDict = dict(name="CIPageCurlTransition", attributes=attr)
self._addFilter(filterDict)
def pageCurlWithShadowTransition(self, targetImage=None, backsideImage=None, extent=None, time=None, angle=None, radius=None, shadowSize=None, shadowAmount=None, shadowExtent=None):
"""
Transitions from one image to another by simulating a curling page, revealing the new image as the page curls.
Attributes: `targetImage` an Image object, `backsideImage` an Image object, `extent` a tuple (x, y, w, h), `time` a float, `angle` a float in degrees, `radius` a float, `shadowSize` a float, `shadowAmount` a float, `shadowExtent` a tuple (x, y, w, h).
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if backsideImage:
attr["inputBacksideImage"] = backsideImage._ciImage()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
if time:
attr["inputTime"] = time
if angle:
attr["inputAngle"] = radians(angle)
if radius:
attr["inputRadius"] = radius
if shadowSize:
attr["inputShadowSize"] = shadowSize
if shadowAmount:
attr["inputShadowAmount"] = shadowAmount
if shadowExtent:
attr["inputShadowExtent"] = AppKit.CIVector.vectorWithValues_count_(shadowExtent, 4)
filterDict = dict(name="CIPageCurlWithShadowTransition", attributes=attr)
self._addFilter(filterDict)
def rippleTransition(self, targetImage=None, shadingImage=None, center=None, extent=None, time=None, width=None, scale=None):
"""
Transitions from one image to another by creating a circular wave that expands from the center point, revealing the new image in the wake of the wave.
Attributes: `targetImage` an Image object, `shadingImage` an Image object, `center` a tuple (x, y), `extent` a tuple (x, y, w, h), `time` a float, `width` a float, `scale` a float.
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if shadingImage:
attr["inputShadingImage"] = shadingImage._ciImage()
if center:
attr["inputCenter"] = AppKit.CIVector.vectorWithX_Y_(center[0], center[1])
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
if time:
attr["inputTime"] = time
if width:
attr["inputWidth"] = width
if scale:
attr["inputScale"] = scale
filterDict = dict(name="CIRippleTransition", attributes=attr)
self._addFilter(filterDict)
def swipeTransition(self, targetImage=None, extent=None, color=None, time=None, angle=None, width=None, opacity=None):
"""
Transitions from one image to another by simulating a swiping action.
Attributes: `targetImage` an Image object, `extent` a tuple (x, y, w, h), `color` RGBA tuple Color (r, g, b, a), `time` a float, `angle` a float in degrees, `width` a float, `opacity` a float.
"""
attr = dict()
if targetImage:
attr["inputTargetImage"] = targetImage._ciImage()
if extent:
attr["inputExtent"] = AppKit.CIVector.vectorWithValues_count_(extent, 4)
if color:
attr["inputColor"] = AppKit.CIColor.colorWithRed_green_blue_alpha_(color[0], color[1], color[2], color[3])
if time:
attr["inputTime"] = time
if angle:
attr["inputAngle"] = radians(angle)
if width:
attr["inputWidth"] = width
if opacity:
attr["inputOpacity"] = opacity
filterDict = dict(name="CISwipeTransition", attributes=attr)
self._addFilter(filterDict)
|
schriftgestalt/drawbot
|
drawBot/context/tools/imageObject.py
|
Python
|
bsd-2-clause
| 110,531
|
import re
import inspect
def varname(p):
for line in inspect.getframeinfo(inspect.currentframe().f_back)[3]:
m = re.search(r'\bvarname\s*\(\s*([A-Za-z_][A-Za-z0-9_]*)\s*\)', line)
if m:
return m.group(1)
|
lucasbrunialti/biclustering-experiments
|
experiments/util.py
|
Python
|
bsd-2-clause
| 239
|
import gearman
# setup client, connect to Gearman HQ
def check_request_status(job_request):
if job_request.complete:
print "Job %s finished! Result: %s - %s" % (job_request.job.unique, job_request.state, job_request.result)
elif job_request.timed_out:
print "Job %s timed out!" % job_request.unique
elif job_request.state == JOB_UNKNOWN:
print "Job %s connection failed!" % job_request.unique
gm_client = gearman.GearmanClient(['172.26.183.16:4735', '172.26.183.15:4735'])
word = 'Hello World!'
completed_job_request = gm_client.submit_job("reverse", word)
check_request_status(completed_job_request)
|
vocky/gearman-python-test
|
producer.py
|
Python
|
bsd-2-clause
| 643
|
#!/usr/bin/env python
import fcntl
import usb
ID_VENDOR = 0x16c0
ID_PRODUCT = 0x05dc
USBDEVFS_RESET = 21780
class Device:
def __init__(self):
''
@classmethod
def find(cls, idVendor, idProduct):
print("searching for device (%x:%x)" % (idVendor, idProduct))
for bus in usb.busses():
for dev in bus.devices:
if idVendor == dev.idVendor:
if idProduct == dev.idProduct:
d = Device()
d.bus = bus
d.dev = dev
return d
print("device not found")
@property
def usbfs_filename(self):
return '/dev/bus/usb/%s/%s' % (self.bus.dirname, self.dev.filename)
def reset(self):
print("Resetting USB device %s" % self.usbfs_filename)
with open(self.usbfs_filename, 'w') as fd:
rc = fcntl.ioctl(fd, USBDEVFS_RESET, 0)
if (rc < 0):
print("Error in ioctl")
d = Device.find(ID_VENDOR, ID_PRODUCT)
if d:
d.reset()
print("Reset successful\n")
|
ponty/MyElectronicProjects
|
docs/projects/usbasp/usbasp_reset_old.py
|
Python
|
bsd-2-clause
| 1,096
|
# -*- encoding: utf-8 -*-
# River of Text v0.1.0
# INSERT TAGLINE HERE.
# Copyright © 2013, Kwpolska.
# See /LICENSE for licensing information.
"""
rot.template
~~~~~~~~~~~~
INSERT MODULE DESCRIPTION HERE.
:Copyright: © 2013, Kwpolska.
:License: BSD (see /LICENSE).
"""
|
Kwpolska/rot
|
rot/template.py
|
Python
|
bsd-3-clause
| 295
|
from bitcoin_api import buyHour, sellHour
from datetime import datetime
date = datetime.now()
key = 'HXieJjOkGemshiw8hzl3Iq0Cgd8Ip1gT7JYjsn5myB8JJQ6rBl'
buy_price = buyHour(key)
sell_price = sellHour(key)
f = open('hours.txt', 'a')
if(f is not None):
f.write(date + "\n");
f.write(buy_price + "\n")
f.write(sell_price + "\n")
f.close()
|
aasoliz/Bitcoin-Statistics
|
app/info.py
|
Python
|
bsd-3-clause
| 352
|
# coding: utf-8
"""
Picarto.TV API Documentation
The Picarto.TV API documentation Note, for fixed access tokens, the header that needs to be sent is of the format: `Authorization: Bearer yourTokenHere` This can be generated at https://oauth.picarto.tv/ For chat API, see https://docs.picarto.tv/chat/chat.proto - contact via the email below for implementation details
OpenAPI spec version: 1.2.5
Contact: api@picarto.tv
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class Thumbnail(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'web': 'str',
'web_large': 'str',
'mobile': 'str',
'tablet': 'str'
}
attribute_map = {
'web': 'web',
'web_large': 'web_large',
'mobile': 'mobile',
'tablet': 'tablet'
}
def __init__(self, web=None, web_large=None, mobile=None, tablet=None):
"""
Thumbnail - a model defined in Swagger
"""
self._web = None
self._web_large = None
self._mobile = None
self._tablet = None
if web is not None:
self.web = web
if web_large is not None:
self.web_large = web_large
if mobile is not None:
self.mobile = mobile
if tablet is not None:
self.tablet = tablet
@property
def web(self):
"""
Gets the web of this Thumbnail.
Web size
:return: The web of this Thumbnail.
:rtype: str
"""
return self._web
@web.setter
def web(self, web):
"""
Sets the web of this Thumbnail.
Web size
:param web: The web of this Thumbnail.
:type: str
"""
self._web = web
@property
def web_large(self):
"""
Gets the web_large of this Thumbnail.
Web HD size
:return: The web_large of this Thumbnail.
:rtype: str
"""
return self._web_large
@web_large.setter
def web_large(self, web_large):
"""
Sets the web_large of this Thumbnail.
Web HD size
:param web_large: The web_large of this Thumbnail.
:type: str
"""
self._web_large = web_large
@property
def mobile(self):
"""
Gets the mobile of this Thumbnail.
Mobile size
:return: The mobile of this Thumbnail.
:rtype: str
"""
return self._mobile
@mobile.setter
def mobile(self, mobile):
"""
Sets the mobile of this Thumbnail.
Mobile size
:param mobile: The mobile of this Thumbnail.
:type: str
"""
self._mobile = mobile
@property
def tablet(self):
"""
Gets the tablet of this Thumbnail.
Tablet size
:return: The tablet of this Thumbnail.
:rtype: str
"""
return self._tablet
@tablet.setter
def tablet(self, tablet):
"""
Sets the tablet of this Thumbnail.
Tablet size
:param tablet: The tablet of this Thumbnail.
:type: str
"""
self._tablet = tablet
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, Thumbnail):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
Sythelux/Picarto.bundle
|
Contents/Libraries/Shared/PicartoClientAPI/models/thumbnail.py
|
Python
|
bsd-3-clause
| 5,074
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('products', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='product',
name='due_period',
field=models.PositiveSmallIntegerField(null=True, verbose_name='Due Period (in days)', blank=True),
),
migrations.AddField(
model_name='product',
name='reminder_day_count_after_due_date',
field=models.PositiveSmallIntegerField(verbose_name='Reminder Day Count Before After Date', default=12),
),
migrations.AddField(
model_name='product',
name='reminder_day_count_before_due_date',
field=models.PositiveSmallIntegerField(verbose_name='Reminder Day Count Before Due Date', default=2),
),
]
|
dogukantufekci/supersalon
|
supersalon/products/migrations/0002_auto_20151112_1729.py
|
Python
|
bsd-3-clause
| 944
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.http import HttpResponse, Http404
from django.views.generic.edit import CreateView, UpdateView
from django.views.generic import ListView, TemplateView
from django.core.urlresolvers import reverse
from django.shortcuts import get_object_or_404
from accounts.mixins import OwnerRequiredMixin, CanEditMixin
from magiccontent.mixins import (ListContentMixin, EditableMixin,
CreateContentMixin, )
from magiccontent.views import MagicDeleteView, PictureUpdateView
from magiccontent.models import Widget
from .models import CalendarEventContent
from .forms import EventContentForm, EventContentCreateForm
from .util import events_to_json
class EventContentMixin(object):
model = CalendarEventContent
form_class = EventContentForm
template_name = 'magiccontent/calendareventcontent_form.html'
class EventContentCreateView(CreateContentMixin, EventContentMixin,
EditableMixin, CreateView):
form_class = EventContentCreateForm
class EventContentUpdateView(EventContentMixin, EditableMixin, UpdateView):
pass
class EventContentPictureUpdateView(EventContentMixin, EditableMixin,
PictureUpdateView):
template_name = 'magiccontent/defaultcontent_image_form.html'
class EventContentDeleteView(EventContentMixin, OwnerRequiredMixin,
MagicDeleteView):
pass
class EventContentOrderListView(ListContentMixin, EventContentMixin,
OwnerRequiredMixin, ListView):
pass
class ShowCalendarContentPageView(CanEditMixin, TemplateView):
template_name = "magiccontent/calendar.html"
def get_context_data(self, **kwargs):
context = super(ShowCalendarContentPageView,
self).get_context_data(**kwargs)
widget = get_object_or_404(Widget, pk=self.kwargs.get('pk', None))
context['widget'] = widget
context['content_list'] = widget.get_widget_type.objects.filter(
widget=widget)
event_url = reverse('calendarcontent.events.list',
kwargs={'pk': widget.id})
context['events_list_url'] = event_url
return context
class ShowCalendarContentItemView(CanEditMixin, TemplateView):
template_name = "magiccontent/item.html"
def dispatch(self, request, *args, **kws):
''' verify if the slug really exists avoiding Search Engine to index
anything like /same-content/wiget_pk/pk /same-content-2/wiget_pk/pk
Duplicated content has a huge negative impact in SEO!
'''
slug = self.kwargs.get('slug', None)
if not slug:
raise Http404('no slug')
try:
self.event = CalendarEventContent.site_objects.get(
pk=self.kwargs.get('pk', '$'))
except CalendarEventContent.DoesNotExist:
raise Http404('CalendarEventContent not found')
if self.event.slug != slug:
raise Http404('invalid slug')
return super(ShowCalendarContentItemView, self).dispatch(
request, *args, **kws)
def get_context_data(self, **kwargs):
context = super(ShowCalendarContentItemView,
self).get_context_data(**kwargs)
context['widget'] = self.event.widget
context['object'] = self.event
return context
def events_list(request, **kwargs):
"""
Returns all events from the given widget calendar.
"""
widget = get_object_or_404(Widget, pk=kwargs.get('pk', None))
events = CalendarEventContent.site_objects.filter(widget=widget)
return HttpResponse(events_to_json(events),
content_type='application/json')
|
DjenieLabs/django-magic-content-calendarevents
|
magiccontentcalendarevents/views.py
|
Python
|
bsd-3-clause
| 3,808
|
# Copyright 2008-2009 WebDriver committers
# Copyright 2008-2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""WebElement implementation."""
import os
import zipfile
from StringIO import StringIO
import base64
from command import Command
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
class WebElement(object):
"""Represents an HTML element.
Generally, all interesting operations to do with interacting with a page
will be performed through this interface."""
def __init__(self, parent, id_):
self._parent = parent
self._id = id_
@property
def tag_name(self):
"""Gets this element's tagName property."""
return self._execute(Command.GET_ELEMENT_TAG_NAME)['value']
@property
def text(self):
"""Gets the text of the element."""
return self._execute(Command.GET_ELEMENT_TEXT)['value']
def click(self):
"""Clicks the element."""
self._execute(Command.CLICK_ELEMENT)
def submit(self):
"""Submits a form."""
self._execute(Command.SUBMIT_ELEMENT)
def clear(self):
"""Clears the text if it's a text entry element."""
self._execute(Command.CLEAR_ELEMENT)
def get_attribute(self, name):
"""Gets the attribute value."""
resp = self._execute(Command.GET_ELEMENT_ATTRIBUTE, {'name': name})
attributeValue = ''
if resp['value'] is None:
attributeValue = None
else:
attributeValue = unicode(resp['value'])
if type(resp['value']) is bool:
attributeValue = attributeValue.lower()
return attributeValue
def is_selected(self):
"""Whether the element is selected."""
return self._execute(Command.IS_ELEMENT_SELECTED)['value']
def is_enabled(self):
"""Whether the element is enabled."""
return self._execute(Command.IS_ELEMENT_ENABLED)['value']
def find_element_by_id(self, id_):
"""Finds element by id."""
return self.find_element(by=By.ID, value=id_)
def find_elements_by_id(self, id_):
return self.find_elements(by=By.ID, value=id_)
def find_element_by_name(self, name):
"""Find element by name."""
return self.find_element(by=By.NAME, value=name)
def find_elements_by_name(self, name):
return self.find_elements(by=By.NAME, value=name)
def find_element_by_link_text(self, link_text):
"""Finds element by link text."""
return self.find_element(by=By.LINK_TEXT, value=link_text)
def find_elements_by_link_text(self, link_text):
return self.find_elements(by=By.LINK_TEXT, value=link_text)
def find_element_by_partial_link_text(self, link_text):
return self.find_element(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_elements_by_partial_link_text(self, link_text):
return self.find_elements(by=By.PARTIAL_LINK_TEXT, value=link_text)
def find_element_by_tag_name(self, name):
return self.find_element(by=By.TAG_NAME, value=name)
def find_elements_by_tag_name(self, name):
return self.find_elements(by=By.TAG_NAME, value=name)
def find_element_by_xpath(self, xpath):
"""Finds element by xpath."""
return self.find_element(by=By.XPATH, value=xpath)
def find_elements_by_xpath(self, xpath):
"""Finds elements within the elements by xpath."""
return self.find_elements(by=By.XPATH, value=xpath)
def find_element_by_class_name(self, name):
"""Finds an element by their class name."""
return self.find_element(by=By.CLASS_NAME, value=name)
def find_elements_by_class_name(self, name):
"""Finds elements by their class name."""
return self.find_elements(by=By.CLASS_NAME, value=name)
def find_element_by_css_selector(self, css_selector):
"""Find and return an element by CSS selector."""
return self.find_element(by=By.CSS_SELECTOR, value=css_selector)
def find_elements_by_css_selector(self, css_selector):
"""Find and return list of multiple elements by CSS selector."""
return self.find_elements(by=By.CSS_SELECTOR, value=css_selector)
def send_keys(self, *value):
"""Simulates typing into the element."""
local_file = LocalFileDetector.is_local_file(*value)
if local_file is not None:
value = self._upload(local_file)
typing = []
for val in value:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
self._execute(Command.SEND_KEYS_TO_ELEMENT, {'value': typing})
# RenderedWebElement Items
def is_displayed(self):
"""Whether the element would be visible to a user"""
return self._execute(Command.IS_ELEMENT_DISPLAYED)['value']
@property
def size(self):
""" Returns the size of the element """
size = self._execute(Command.GET_ELEMENT_SIZE)['value']
new_size = {}
new_size["height"] = size["height"]
new_size["width"] = size["width"]
return new_size
def value_of_css_property(self, property_name):
""" Returns the value of a CSS property """
return self._execute(Command.GET_ELEMENT_VALUE_OF_CSS_PROPERTY,
{'propertyName': property_name})['value']
@property
def location(self):
""" Returns the location of the element in the renderable canvas"""
return self._execute(Command.GET_ELEMENT_LOCATION)['value']
@property
def parent(self):
return self._parent
@property
def id(self):
return self._id
# Private Methods
def _execute(self, command, params=None):
"""Executes a command against the underlying HTML element.
Args:
command: The name of the command to _execute as a string.
params: A dictionary of named parameters to send with the command.
Returns:
The command's JSON response loaded into a dictionary object.
"""
if not params:
params = {}
params['id'] = self._id
return self._parent.execute(command, params)
def find_element(self, by=By.ID, value=None):
return self._execute(Command.FIND_CHILD_ELEMENT,
{"using": by, "value": value})['value']
def find_elements(self, by=By.ID, value=None):
return self._execute(Command.FIND_CHILD_ELEMENTS,
{"using": by, "value": value})['value']
def _upload(self, filename):
fp = StringIO()
zipped = zipfile.ZipFile(fp, 'w', zipfile.ZIP_DEFLATED)
zipped.write(filename, os.path.split(filename)[1])
zipped.close()
try:
return self._execute(Command.UPLOAD_FILE,
{'file': base64.encodestring(fp.getvalue())})['value']
except WebDriverException as e:
if "Unrecognized command: POST" in e.__str__():
return filename
elif "Command not found: POST " in e.__str__():
return filename
elif '{"status":405,"value":["GET","HEAD","DELETE"]}' in e.__str__():
return filename
else:
raise e
class LocalFileDetector(object):
@classmethod
def is_local_file(cls, *keys):
file_path = ''
typing = []
for val in keys:
if isinstance(val, Keys):
typing.append(val)
elif isinstance(val, int):
val = str(val)
for i in range(len(val)):
typing.append(val[i])
else:
for i in range(len(val)):
typing.append(val[i])
file_path = ''.join(typing)
if file_path is '':
return None
if os.path.exists(file_path):
return file_path
else:
return None
|
leighpauls/k2cro4
|
third_party/webdriver/pylib/selenium/webdriver/remote/webelement.py
|
Python
|
bsd-3-clause
| 8,800
|
from sklearn2sql_heroku.tests.regression import generic as reg_gen
reg_gen.test_model("GradientBoostingRegressor" , "boston" , "hive")
|
antoinecarme/sklearn2sql_heroku
|
tests/regression/boston/ws_boston_GradientBoostingRegressor_hive_code_gen.py
|
Python
|
bsd-3-clause
| 137
|
"""
Start detached worker node from the Django management utility.
"""
import os
import sys
from celery.bin import celeryd_detach
from djcelery.management.base import CeleryCommand
class Command(CeleryCommand):
"""Run the celery daemon."""
help = 'Runs a detached Celery worker node.'
requires_model_validation = True
option_list = celeryd_detach.OPTION_LIST
def run_from_argv(self, argv):
class detached(celeryd_detach.detached_celeryd):
execv_argv = [os.path.abspath(sys.argv[0]), "celeryd"]
detached().execute_from_commandline(argv)
|
kumar303/rockit
|
vendor-local/djcelery/management/commands/celeryd_detach.py
|
Python
|
bsd-3-clause
| 592
|
import datetime
from flask import Flask, render_template, redirect, url_for, request
import picamera
from time import sleep
app = Flask(__name__)
@app.route('/')
def index():
index_data = {'high': weather['high'], 'low': weather['low'], 'bus': bus_data}
return render_template('index.html', **index_data)
@app.route('/camera')
def camera_page():
return render_template('camera.html')
# Route for sending RF signal to outlets
@app.route('/postmethod', methods=['POST'])
def get_post():
outlet, status = request.form['outlet'], request.form['status']
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d %H:%M")
print('Time: %s | Outlet: %s | Status: %s' % (time, outlet, status))
rf_send(outlet, status)
if blink_settings['blink']:
blink(led_settings[status], led_settings['num'], led_settings['speed'])
return outlet
@app.route('/postcamera', methods=['POST'])
def get_camera():
req = request.form['request']
now = datetime.datetime.now()
time = now.strftime("%Y-%m-%d_%H-%M")
if req == 'photo':
filename = '%s.jpg' % time
camera.capture(filename)
elif req == 'video':
filename = '%s.h264' % time
camera.start_recording(filename)
sleep(5)
camera.stop_recording()
return filename
if __name__ == '__main__':
app.run(host='0.0.0.0', debug=True)
|
kbsezginel/raspberry-pi
|
rpi-web-server/camera/app-camera.py
|
Python
|
bsd-3-clause
| 1,384
|
from custom.ewsghana.comparison_report import ProductsCompareReport, LocationsCompareReport,\
SMSUsersCompareReport, WebUsersCompareReport, SupplyPointsCompareReport
from custom.ewsghana.reports.email_reports import CMSRMSReport, StockSummaryReport
from custom.ewsghana.reports.maps import EWSMapReport
from custom.ewsghana.reports.stock_levels_report import StockLevelsReport
from custom.ewsghana.reports.specific_reports.dashboard_report import DashboardReport
from custom.ewsghana.reports.specific_reports.stock_status_report import StockStatus
from custom.ewsghana.reports.specific_reports.reporting_rates import ReportingRatesReport
from custom.ewsghana.reports.stock_transaction import StockTransactionReport
LOCATION_TYPES = ["country", "region", "district", "facility"]
CUSTOM_REPORTS = (
('Custom reports', (
DashboardReport,
StockStatus,
StockLevelsReport,
ReportingRatesReport,
EWSMapReport,
CMSRMSReport,
StockSummaryReport,
StockTransactionReport
)),
('Compare reports', (
ProductsCompareReport,
LocationsCompareReport,
SupplyPointsCompareReport,
WebUsersCompareReport,
SMSUsersCompareReport,
))
)
|
puttarajubr/commcare-hq
|
custom/ewsghana/__init__.py
|
Python
|
bsd-3-clause
| 1,237
|
# coding=utf-8
#Auxiliar model for obtaining users with their associated role
class UserEventRelation(object):
def __init__(self, user=None, role=''):
self.user = user
self.role = role
|
suselrd/django-notifications
|
notifications/models/usereventrelation.py
|
Python
|
bsd-3-clause
| 209
|
##########################################################################
#
# Copyright (c) 2015, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import Gaffer
import GafferImage
Gaffer.Metadata.registerNode(
GafferImage.ImageSampler,
"description",
"""
Samples image colour at a specified pixel location.
""",
plugs = {
"image" : [
"description",
"""
The image to be sampled.
""",
],
"pixel" : [
"description",
"""
The coordinates of the pixel to sample. These can have
fractional values - the filter will be used to generate
appropriate interpolate values.
""",
],
"filter" : [
"description",
"""
The filter used to generate interpolated pixel values.
""",
],
"color" : [
"description",
"""
The sampled colour.
""",
]
}
)
|
goddardl/gaffer
|
python/GafferImageUI/ImageSamplerUI.py
|
Python
|
bsd-3-clause
| 2,476
|
import re # parsing HTML with regexes LIKE A BOSS.
from django.utils.html import escape
from wagtail.wagtailcore.whitelist import Whitelister
from wagtail.wagtailcore.models import Page
from wagtail.wagtailcore import hooks
# Define a set of 'embed handlers' and 'link handlers'. These handle the translation
# of 'special' HTML elements in rich text - ones which we do not want to include
# verbatim in the DB representation because they embed information which is stored
# elsewhere in the database and is liable to change - from real HTML representation
# to DB representation and back again.
class PageLinkHandler(object):
"""
PageLinkHandler will be invoked whenever we encounter an <a> element in HTML content
with an attribute of data-linktype="page". The resulting element in the database
representation will be:
<a linktype="page" id="42">hello world</a>
"""
@staticmethod
def get_db_attributes(tag):
"""
Given an <a> tag that we've identified as a page link embed (because it has a
data-linktype="page" attribute), return a dict of the attributes we should
have on the resulting <a linktype="page"> element.
"""
return {'id': tag['data-id']}
@staticmethod
def expand_db_attributes(attrs, for_editor):
try:
page = Page.objects.get(id=attrs['id'])
if for_editor:
editor_attrs = 'data-linktype="page" data-id="%d" ' % page.id
else:
editor_attrs = ''
return '<a %shref="%s">' % (editor_attrs, escape(page.url))
except Page.DoesNotExist:
return "<a>"
EMBED_HANDLERS = {}
LINK_HANDLERS = {
'page': PageLinkHandler,
}
has_loaded_embed_handlers = False
has_loaded_link_handlers = False
def get_embed_handler(embed_type):
global EMBED_HANDLERS, has_loaded_embed_handlers
if not has_loaded_embed_handlers:
for hook in hooks.get_hooks('register_rich_text_embed_handler'):
handler_name, handler = hook()
EMBED_HANDLERS[handler_name] = handler
has_loaded_embed_handlers = True
return EMBED_HANDLERS[embed_type]
def get_link_handler(link_type):
global LINK_HANDLERS, has_loaded_link_handlers
if not has_loaded_link_handlers:
for hook in hooks.get_hooks('register_rich_text_link_handler'):
handler_name, handler = hook()
LINK_HANDLERS[handler_name] = handler
has_loaded_link_handlers = True
return LINK_HANDLERS[link_type]
class DbWhitelister(Whitelister):
"""
A custom whitelisting engine to convert the HTML as returned by the rich text editor
into the pseudo-HTML format stored in the database (in which images, documents and other
linked objects are identified by ID rather than URL):
* implements a 'construct_whitelister_element_rules' hook so that other apps can modify
the whitelist ruleset (e.g. to permit additional HTML elements beyond those in the base
Whitelister module);
* replaces any element with a 'data-embedtype' attribute with an <embed> element, with
attributes supplied by the handler for that type as defined in EMBED_HANDLERS;
* rewrites the attributes of any <a> element with a 'data-linktype' attribute, as
determined by the handler for that type defined in LINK_HANDLERS, while keeping the
element content intact.
"""
has_loaded_custom_whitelist_rules = False
@classmethod
def clean(cls, html):
if not cls.has_loaded_custom_whitelist_rules:
for fn in hooks.get_hooks('construct_whitelister_element_rules'):
cls.element_rules = cls.element_rules.copy()
cls.element_rules.update(fn())
cls.has_loaded_custom_whitelist_rules = True
return super(DbWhitelister, cls).clean(html)
@classmethod
def clean_tag_node(cls, doc, tag):
if 'data-embedtype' in tag.attrs:
embed_type = tag['data-embedtype']
# fetch the appropriate embed handler for this embedtype
embed_handler = get_embed_handler(embed_type)
embed_attrs = embed_handler.get_db_attributes(tag)
embed_attrs['embedtype'] = embed_type
embed_tag = doc.new_tag('embed', **embed_attrs)
embed_tag.can_be_empty_element = True
tag.replace_with(embed_tag)
elif tag.name == 'a' and 'data-linktype' in tag.attrs:
# first, whitelist the contents of this tag
for child in tag.contents:
cls.clean_node(doc, child)
link_type = tag['data-linktype']
link_handler = get_link_handler(link_type)
link_attrs = link_handler.get_db_attributes(tag)
link_attrs['linktype'] = link_type
tag.attrs.clear()
tag.attrs.update(**link_attrs)
elif tag.name == 'div':
tag.name = 'p'
else:
super(DbWhitelister, cls).clean_tag_node(doc, tag)
FIND_A_TAG = re.compile(r'<a(\b[^>]*)>')
FIND_EMBED_TAG = re.compile(r'<embed(\b[^>]*)/>')
FIND_ATTRS = re.compile(r'([\w-]+)\="([^"]*)"')
def extract_attrs(attr_string):
"""
helper method to extract tag attributes as a dict. Does not escape HTML entities!
"""
attributes = {}
for name, val in FIND_ATTRS.findall(attr_string):
attributes[name] = val
return attributes
def expand_db_html(html, for_editor=False):
"""
Expand database-representation HTML into proper HTML usable in either
templates or the rich text editor
"""
def replace_a_tag(m):
attrs = extract_attrs(m.group(1))
if 'linktype' not in attrs:
# return unchanged
return m.group(0)
handler = get_link_handler(attrs['linktype'])
return handler.expand_db_attributes(attrs, for_editor)
def replace_embed_tag(m):
attrs = extract_attrs(m.group(1))
handler = get_embed_handler(attrs['embedtype'])
return handler.expand_db_attributes(attrs, for_editor)
html = FIND_A_TAG.sub(replace_a_tag, html)
html = FIND_EMBED_TAG.sub(replace_embed_tag, html)
return html
|
jorge-marques/wagtail
|
wagtail/wagtailcore/rich_text.py
|
Python
|
bsd-3-clause
| 6,198
|
# -*- coding: utf-8 -*-
'''
Production Configurations
- Use djangosecure
- Use Amazon's S3 for storing static files and uploaded media
- Use sendgird to sendemails
- Use MEMCACHIER on Heroku
'''
from configurations import values
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
try:
from S3 import CallingFormat
AWS_CALLING_FORMAT = CallingFormat.SUBDOMAIN
except ImportError:
# TODO: Fix this where even if in Dev this class is called.
pass
from .common import Common
class Production(Common):
# This ensures that Django will be able to detect a secure connection
# properly on Heroku.
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# INSTALLED_APPS
INSTALLED_APPS = Common.INSTALLED_APPS
# END INSTALLED_APPS
# SECRET KEY
SECRET_KEY = values.SecretValue()
# END SECRET KEY
# django-secure
INSTALLED_APPS += ("djangosecure", )
# set this to 60 seconds and then to 518400 when you can prove it works
SECURE_HSTS_SECONDS = 60
SECURE_HSTS_INCLUDE_SUBDOMAINS = values.BooleanValue(True)
SECURE_FRAME_DENY = values.BooleanValue(True)
SECURE_CONTENT_TYPE_NOSNIFF = values.BooleanValue(True)
SECURE_BROWSER_XSS_FILTER = values.BooleanValue(True)
SESSION_COOKIE_SECURE = values.BooleanValue(False)
SESSION_COOKIE_HTTPONLY = values.BooleanValue(True)
SECURE_SSL_REDIRECT = values.BooleanValue(True)
# end django-secure
# SITE CONFIGURATION
# Hosts/domain names that are valid for this site
# See https://docs.djangoproject.com/en/1.6/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ["*"]
# END SITE CONFIGURATION
INSTALLED_APPS += ("gunicorn", )
# STORAGE CONFIGURATION
# See: http://django-storages.readthedocs.org/en/latest/index.html
INSTALLED_APPS += (
'storages',
)
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
STATICFILES_STORAGE = DEFAULT_FILE_STORAGE = 'storages.backends.s3boto.S3BotoStorage'
# See: http://django-storages.readthedocs.org/en/latest/backends/amazon-S3.html#settings
AWS_ACCESS_KEY_ID = values.SecretValue()
AWS_SECRET_ACCESS_KEY = values.SecretValue()
AWS_STORAGE_BUCKET_NAME = values.SecretValue()
AWS_AUTO_CREATE_BUCKET = True
AWS_QUERYSTRING_AUTH = False
# see: https://github.com/antonagestam/collectfast
AWS_PRELOAD_METADATA = True
INSTALLED_APPS += ("collectfast", )
# AWS cache settings, don't change unless you know what you're doing:
AWS_EXPIREY = 60 * 60 * 24 * 7
AWS_HEADERS = {
'Cache-Control': 'max-age=%d, s-maxage=%d, must-revalidate' % (
AWS_EXPIREY, AWS_EXPIREY)
}
# See: https://docs.djangoproject.com/en/dev/ref/settings/#static-url
STATIC_URL = 'https://s3.amazonaws.com/%s/' % AWS_STORAGE_BUCKET_NAME
# END STORAGE CONFIGURATION
# EMAIL
DEFAULT_FROM_EMAIL = values.Value('projector <noreply@example.com>')
EMAIL_HOST = values.Value('smtp.sendgrid.com')
EMAIL_HOST_PASSWORD = values.SecretValue(environ_prefix="", environ_name="SENDGRID_PASSWORD")
EMAIL_HOST_USER = values.SecretValue(environ_prefix="", environ_name="SENDGRID_USERNAME")
EMAIL_PORT = values.IntegerValue(587, environ_prefix="", environ_name="EMAIL_PORT")
EMAIL_SUBJECT_PREFIX = values.Value('[projector] ', environ_name="EMAIL_SUBJECT_PREFIX")
EMAIL_USE_TLS = True
SERVER_EMAIL = EMAIL_HOST_USER
# END EMAIL
# TEMPLATE CONFIGURATION
# See: https://docs.djangoproject.com/en/dev/ref/settings/#template-dirs
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
)),
)
# END TEMPLATE CONFIGURATION
# CACHING
# Only do this here because thanks to django-pylibmc-sasl and pylibmc
# memcacheify is painful to install on windows.
try:
# See: https://github.com/rdegges/django-heroku-memcacheify
from memcacheify import memcacheify
CACHES = memcacheify()
except ImportError:
CACHES = values.CacheURLValue(default="memcached://127.0.0.1:11211")
# END CACHING
# Your production stuff: Below this line define 3rd party libary settings
|
mfwarren/projector
|
projector/config/production.py
|
Python
|
bsd-3-clause
| 4,351
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
import sys
f = open('banned.dirs','r')
banned = [x.strip() for x in f.readlines()]
f.close()
for line in sys.stdin.readlines():
line = line.strip()
good = True
for b in banned:
if line.startswith(b):
good = False
if good:
print line
|
grammarware/slps
|
topics/documents/wiki/filter.py
|
Python
|
bsd-3-clause
| 293
|
"""
Masked arrays add-ons.
A collection of utilities for `numpy.ma`.
:author: Pierre Gerard-Marchant
:contact: pierregm_at_uga_dot_edu
:version: $Id: extras.py 3473 2007-10-29 15:18:13Z jarrod.millman $
"""
__all__ = [
'apply_along_axis', 'apply_over_axes', 'atleast_1d', 'atleast_2d',
'atleast_3d', 'average', 'clump_masked', 'clump_unmasked',
'column_stack', 'compress_cols', 'compress_nd', 'compress_rowcols',
'compress_rows', 'count_masked', 'corrcoef', 'cov', 'diagflat', 'dot',
'dstack', 'ediff1d', 'flatnotmasked_contiguous', 'flatnotmasked_edges',
'hsplit', 'hstack', 'isin', 'in1d', 'intersect1d', 'mask_cols', 'mask_rowcols',
'mask_rows', 'masked_all', 'masked_all_like', 'median', 'mr_',
'notmasked_contiguous', 'notmasked_edges', 'polyfit', 'row_stack',
'setdiff1d', 'setxor1d', 'stack', 'unique', 'union1d', 'vander', 'vstack',
]
import itertools
import warnings
from . import core as ma
from .core import (
MaskedArray, MAError, add, array, asarray, concatenate, filled, count,
getmask, getmaskarray, make_mask_descr, masked, masked_array, mask_or,
nomask, ones, sort, zeros, getdata, get_masked_subclass, dot,
mask_rowcols
)
import numpy as np
from numpy import ndarray, array as nxarray
import numpy.core.umath as umath
from numpy.core.multiarray import normalize_axis_index
from numpy.core.numeric import normalize_axis_tuple
from numpy.lib.function_base import _ureduce
from numpy.lib.index_tricks import AxisConcatenator
def issequence(seq):
"""
Is seq a sequence (ndarray, list or tuple)?
"""
return isinstance(seq, (ndarray, tuple, list))
def count_masked(arr, axis=None):
"""
Count the number of masked elements along the given axis.
Parameters
----------
arr : array_like
An array with (possibly) masked elements.
axis : int, optional
Axis along which to count. If None (default), a flattened
version of the array is used.
Returns
-------
count : int, ndarray
The total number of masked elements (axis=None) or the number
of masked elements along each slice of the given axis.
See Also
--------
MaskedArray.count : Count non-masked elements.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.arange(9).reshape((3,3))
>>> a = ma.array(a)
>>> a[1, 0] = ma.masked
>>> a[1, 2] = ma.masked
>>> a[2, 1] = ma.masked
>>> a
masked_array(
data=[[0, 1, 2],
[--, 4, --],
[6, --, 8]],
mask=[[False, False, False],
[ True, False, True],
[False, True, False]],
fill_value=999999)
>>> ma.count_masked(a)
3
When the `axis` keyword is used an array is returned.
>>> ma.count_masked(a, axis=0)
array([1, 1, 1])
>>> ma.count_masked(a, axis=1)
array([0, 2, 1])
"""
m = getmaskarray(arr)
return m.sum(axis)
def masked_all(shape, dtype=float):
"""
Empty masked array with all elements masked.
Return an empty masked array of the given shape and dtype, where all the
data are masked.
Parameters
----------
shape : tuple
Shape of the required MaskedArray.
dtype : dtype, optional
Data type of the output.
Returns
-------
a : MaskedArray
A masked array with all data masked.
See Also
--------
masked_all_like : Empty masked array modelled on an existing array.
Examples
--------
>>> import numpy.ma as ma
>>> ma.masked_all((3, 3))
masked_array(
data=[[--, --, --],
[--, --, --],
[--, --, --]],
mask=[[ True, True, True],
[ True, True, True],
[ True, True, True]],
fill_value=1e+20,
dtype=float64)
The `dtype` parameter defines the underlying data type.
>>> a = ma.masked_all((3, 3))
>>> a.dtype
dtype('float64')
>>> a = ma.masked_all((3, 3), dtype=np.int32)
>>> a.dtype
dtype('int32')
"""
a = masked_array(np.empty(shape, dtype),
mask=np.ones(shape, make_mask_descr(dtype)))
return a
def masked_all_like(arr):
"""
Empty masked array with the properties of an existing array.
Return an empty masked array of the same shape and dtype as
the array `arr`, where all the data are masked.
Parameters
----------
arr : ndarray
An array describing the shape and dtype of the required MaskedArray.
Returns
-------
a : MaskedArray
A masked array with all data masked.
Raises
------
AttributeError
If `arr` doesn't have a shape attribute (i.e. not an ndarray)
See Also
--------
masked_all : Empty masked array with all elements masked.
Examples
--------
>>> import numpy.ma as ma
>>> arr = np.zeros((2, 3), dtype=np.float32)
>>> arr
array([[0., 0., 0.],
[0., 0., 0.]], dtype=float32)
>>> ma.masked_all_like(arr)
masked_array(
data=[[--, --, --],
[--, --, --]],
mask=[[ True, True, True],
[ True, True, True]],
fill_value=1e+20,
dtype=float32)
The dtype of the masked array matches the dtype of `arr`.
>>> arr.dtype
dtype('float32')
>>> ma.masked_all_like(arr).dtype
dtype('float32')
"""
a = np.empty_like(arr).view(MaskedArray)
a._mask = np.ones(a.shape, dtype=make_mask_descr(a.dtype))
return a
#####--------------------------------------------------------------------------
#---- --- Standard functions ---
#####--------------------------------------------------------------------------
class _fromnxfunction:
"""
Defines a wrapper to adapt NumPy functions to masked arrays.
An instance of `_fromnxfunction` can be called with the same parameters
as the wrapped NumPy function. The docstring of `newfunc` is adapted from
the wrapped function as well, see `getdoc`.
This class should not be used directly. Instead, one of its extensions that
provides support for a specific type of input should be used.
Parameters
----------
funcname : str
The name of the function to be adapted. The function should be
in the NumPy namespace (i.e. ``np.funcname``).
"""
def __init__(self, funcname):
self.__name__ = funcname
self.__doc__ = self.getdoc()
def getdoc(self):
"""
Retrieve the docstring and signature from the function.
The ``__doc__`` attribute of the function is used as the docstring for
the new masked array version of the function. A note on application
of the function to the mask is appended.
Parameters
----------
None
"""
npfunc = getattr(np, self.__name__, None)
doc = getattr(npfunc, '__doc__', None)
if doc:
sig = self.__name__ + ma.get_object_signature(npfunc)
doc = ma.doc_note(doc, "The function is applied to both the _data "
"and the _mask, if any.")
return '\n\n'.join((sig, doc))
return
def __call__(self, *args, **params):
pass
class _fromnxfunction_single(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single array
argument followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
if isinstance(x, ndarray):
_d = func(x.__array__(), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
else:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_seq(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with a single sequence
of arrays followed by auxiliary args that are passed verbatim for
both the data and mask calls.
"""
def __call__(self, x, *args, **params):
func = getattr(np, self.__name__)
_d = func(tuple([np.asarray(a) for a in x]), *args, **params)
_m = func(tuple([getmaskarray(a) for a in x]), *args, **params)
return masked_array(_d, mask=_m)
class _fromnxfunction_args(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. The first non-array-like input marks the beginning of the
arguments that are passed verbatim for both the data and mask calls.
Array arguments are processed independently and the results are
returned in a list. If only one array is found, the return value is
just the processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
arrays = []
args = list(args)
while len(args) > 0 and issequence(args[0]):
arrays.append(args.pop(0))
res = []
for x in arrays:
_d = func(np.asarray(x), *args, **params)
_m = func(getmaskarray(x), *args, **params)
res.append(masked_array(_d, mask=_m))
if len(arrays) == 1:
return res[0]
return res
class _fromnxfunction_allargs(_fromnxfunction):
"""
A version of `_fromnxfunction` that is called with multiple array
arguments. Similar to `_fromnxfunction_args` except that all args
are converted to arrays even if they are not so already. This makes
it possible to process scalars as 1-D arrays. Only keyword arguments
are passed through verbatim for the data and mask calls. Arrays
arguments are processed independently and the results are returned
in a list. If only one arg is present, the return value is just the
processed array instead of a list.
"""
def __call__(self, *args, **params):
func = getattr(np, self.__name__)
res = []
for x in args:
_d = func(np.asarray(x), **params)
_m = func(getmaskarray(x), **params)
res.append(masked_array(_d, mask=_m))
if len(args) == 1:
return res[0]
return res
atleast_1d = _fromnxfunction_allargs('atleast_1d')
atleast_2d = _fromnxfunction_allargs('atleast_2d')
atleast_3d = _fromnxfunction_allargs('atleast_3d')
vstack = row_stack = _fromnxfunction_seq('vstack')
hstack = _fromnxfunction_seq('hstack')
column_stack = _fromnxfunction_seq('column_stack')
dstack = _fromnxfunction_seq('dstack')
stack = _fromnxfunction_seq('stack')
hsplit = _fromnxfunction_single('hsplit')
diagflat = _fromnxfunction_single('diagflat')
#####--------------------------------------------------------------------------
#----
#####--------------------------------------------------------------------------
def flatten_inplace(seq):
"""Flatten a sequence in place."""
k = 0
while (k != len(seq)):
while hasattr(seq[k], '__iter__'):
seq[k:(k + 1)] = seq[k]
k += 1
return seq
def apply_along_axis(func1d, axis, arr, *args, **kwargs):
"""
(This docstring should be overwritten)
"""
arr = array(arr, copy=False, subok=True)
nd = arr.ndim
axis = normalize_axis_index(axis, nd)
ind = [0] * (nd - 1)
i = np.zeros(nd, 'O')
indlist = list(range(nd))
indlist.remove(axis)
i[axis] = slice(None, None)
outshape = np.asarray(arr.shape).take(indlist)
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
# if res is a number, then we have a smaller output array
asscalar = np.isscalar(res)
if not asscalar:
try:
len(res)
except TypeError:
asscalar = True
# Note: we shouldn't set the dtype of the output from the first result
# so we force the type to object, and build a list of dtypes. We'll
# just take the largest, to avoid some downcasting
dtypes = []
if asscalar:
dtypes.append(np.asarray(res).dtype)
outarr = zeros(outshape, object)
outarr[tuple(ind)] = res
Ntot = np.product(outshape)
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= outshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(ind)] = res
dtypes.append(asarray(res).dtype)
k += 1
else:
res = array(res, copy=False, subok=True)
j = i.copy()
j[axis] = ([slice(None, None)] * res.ndim)
j.put(indlist, ind)
Ntot = np.product(outshape)
holdshape = outshape
outshape = list(arr.shape)
outshape[axis] = res.shape
dtypes.append(asarray(res).dtype)
outshape = flatten_inplace(outshape)
outarr = zeros(outshape, object)
outarr[tuple(flatten_inplace(j.tolist()))] = res
k = 1
while k < Ntot:
# increment the index
ind[-1] += 1
n = -1
while (ind[n] >= holdshape[n]) and (n > (1 - nd)):
ind[n - 1] += 1
ind[n] = 0
n -= 1
i.put(indlist, ind)
j.put(indlist, ind)
res = func1d(arr[tuple(i.tolist())], *args, **kwargs)
outarr[tuple(flatten_inplace(j.tolist()))] = res
dtypes.append(asarray(res).dtype)
k += 1
max_dtypes = np.dtype(np.asarray(dtypes).max())
if not hasattr(arr, '_mask'):
result = np.asarray(outarr, dtype=max_dtypes)
else:
result = asarray(outarr, dtype=max_dtypes)
result.fill_value = ma.default_fill_value(result)
return result
apply_along_axis.__doc__ = np.apply_along_axis.__doc__
def apply_over_axes(func, a, axes):
"""
(This docstring will be overwritten)
"""
val = asarray(a)
N = a.ndim
if array(axes).ndim == 0:
axes = (axes,)
for axis in axes:
if axis < 0:
axis = N + axis
args = (val, axis)
res = func(*args)
if res.ndim == val.ndim:
val = res
else:
res = ma.expand_dims(res, axis)
if res.ndim == val.ndim:
val = res
else:
raise ValueError("function is not returning "
"an array of the correct shape")
return val
if apply_over_axes.__doc__ is not None:
apply_over_axes.__doc__ = np.apply_over_axes.__doc__[
:np.apply_over_axes.__doc__.find('Notes')].rstrip() + \
"""
Examples
--------
>>> a = np.ma.arange(24).reshape(2,3,4)
>>> a[:,0,1] = np.ma.masked
>>> a[:,1,:] = np.ma.masked
>>> a
masked_array(
data=[[[0, --, 2, 3],
[--, --, --, --],
[8, 9, 10, 11]],
[[12, --, 14, 15],
[--, --, --, --],
[20, 21, 22, 23]]],
mask=[[[False, True, False, False],
[ True, True, True, True],
[False, False, False, False]],
[[False, True, False, False],
[ True, True, True, True],
[False, False, False, False]]],
fill_value=999999)
>>> np.ma.apply_over_axes(np.ma.sum, a, [0,2])
masked_array(
data=[[[46],
[--],
[124]]],
mask=[[[False],
[ True],
[False]]],
fill_value=999999)
Tuple axis arguments to ufuncs are equivalent:
>>> np.ma.sum(a, axis=(0,2)).reshape((1,-1,1))
masked_array(
data=[[[46],
[--],
[124]]],
mask=[[[False],
[ True],
[False]]],
fill_value=999999)
"""
def average(a, axis=None, weights=None, returned=False):
"""
Return the weighted average of array over the given axis.
Parameters
----------
a : array_like
Data to be averaged.
Masked entries are not taken into account in the computation.
axis : int, optional
Axis along which to average `a`. If None, averaging is done over
the flattened array.
weights : array_like, optional
The importance that each element has in the computation of the average.
The weights array can either be 1-D (in which case its length must be
the size of `a` along the given axis) or of the same shape as `a`.
If ``weights=None``, then all data in `a` are assumed to have a
weight equal to one. The 1-D calculation is::
avg = sum(a * weights) / sum(weights)
The only constraint on `weights` is that `sum(weights)` must not be 0.
returned : bool, optional
Flag indicating whether a tuple ``(result, sum of weights)``
should be returned as output (True), or just the result (False).
Default is False.
Returns
-------
average, [sum_of_weights] : (tuple of) scalar or MaskedArray
The average along the specified axis. When returned is `True`,
return a tuple with the average as the first element and the sum
of the weights as the second element. The return type is `np.float64`
if `a` is of integer type and floats smaller than `float64`, or the
input data-type, otherwise. If returned, `sum_of_weights` is always
`float64`.
Examples
--------
>>> a = np.ma.array([1., 2., 3., 4.], mask=[False, False, True, True])
>>> np.ma.average(a, weights=[3, 1, 0, 0])
1.25
>>> x = np.ma.arange(6.).reshape(3, 2)
>>> x
masked_array(
data=[[0., 1.],
[2., 3.],
[4., 5.]],
mask=False,
fill_value=1e+20)
>>> avg, sumweights = np.ma.average(x, axis=0, weights=[1, 2, 3],
... returned=True)
>>> avg
masked_array(data=[2.6666666666666665, 3.6666666666666665],
mask=[False, False],
fill_value=1e+20)
"""
a = asarray(a)
m = getmask(a)
# inspired by 'average' in numpy/lib/function_base.py
if weights is None:
avg = a.mean(axis)
scl = avg.dtype.type(a.count(axis))
else:
wgt = np.asanyarray(weights)
if issubclass(a.dtype.type, (np.integer, np.bool_)):
result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8')
else:
result_dtype = np.result_type(a.dtype, wgt.dtype)
# Sanity checks
if a.shape != wgt.shape:
if axis is None:
raise TypeError(
"Axis must be specified when shapes of a and weights "
"differ.")
if wgt.ndim != 1:
raise TypeError(
"1D weights expected when shapes of a and weights differ.")
if wgt.shape[0] != a.shape[axis]:
raise ValueError(
"Length of weights not compatible with specified axis.")
# setup wgt to broadcast along axis
wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape)
wgt = wgt.swapaxes(-1, axis)
if m is not nomask:
wgt = wgt*(~a.mask)
scl = wgt.sum(axis=axis, dtype=result_dtype)
avg = np.multiply(a, wgt, dtype=result_dtype).sum(axis)/scl
if returned:
if scl.shape != avg.shape:
scl = np.broadcast_to(scl, avg.shape).copy()
return avg, scl
else:
return avg
def median(a, axis=None, out=None, overwrite_input=False, keepdims=False):
"""
Compute the median along the specified axis.
Returns the median of the array elements.
Parameters
----------
a : array_like
Input array or object that can be converted to an array.
axis : int, optional
Axis along which the medians are computed. The default (None) is
to compute the median along a flattened version of the array.
out : ndarray, optional
Alternative output array in which to place the result. It must
have the same shape and buffer length as the expected output
but the type will be cast if necessary.
overwrite_input : bool, optional
If True, then allow use of memory of input array (a) for
calculations. The input array will be modified by the call to
median. This will save memory when you do not need to preserve
the contents of the input array. Treat the input as undefined,
but it will probably be fully or partially sorted. Default is
False. Note that, if `overwrite_input` is True, and the input
is not already an `ndarray`, an error will be raised.
keepdims : bool, optional
If this is set to True, the axes which are reduced are left
in the result as dimensions with size one. With this option,
the result will broadcast correctly against the input array.
.. versionadded:: 1.10.0
Returns
-------
median : ndarray
A new array holding the result is returned unless out is
specified, in which case a reference to out is returned.
Return data-type is `float64` for integers and floats smaller than
`float64`, or the input data-type, otherwise.
See Also
--------
mean
Notes
-----
Given a vector ``V`` with ``N`` non masked values, the median of ``V``
is the middle value of a sorted copy of ``V`` (``Vs``) - i.e.
``Vs[(N-1)/2]``, when ``N`` is odd, or ``{Vs[N/2 - 1] + Vs[N/2]}/2``
when ``N`` is even.
Examples
--------
>>> x = np.ma.array(np.arange(8), mask=[0]*4 + [1]*4)
>>> np.ma.median(x)
1.5
>>> x = np.ma.array(np.arange(10).reshape(2, 5), mask=[0]*6 + [1]*4)
>>> np.ma.median(x)
2.5
>>> np.ma.median(x, axis=-1, overwrite_input=True)
masked_array(data=[2.0, 5.0],
mask=[False, False],
fill_value=1e+20)
"""
if not hasattr(a, 'mask'):
m = np.median(getdata(a, subok=True), axis=axis,
out=out, overwrite_input=overwrite_input,
keepdims=keepdims)
if isinstance(m, np.ndarray) and 1 <= m.ndim:
return masked_array(m, copy=False)
else:
return m
r, k = _ureduce(a, func=_median, axis=axis, out=out,
overwrite_input=overwrite_input)
if keepdims:
return r.reshape(k)
else:
return r
def _median(a, axis=None, out=None, overwrite_input=False):
# when an unmasked NaN is present return it, so we need to sort the NaN
# values behind the mask
if np.issubdtype(a.dtype, np.inexact):
fill_value = np.inf
else:
fill_value = None
if overwrite_input:
if axis is None:
asorted = a.ravel()
asorted.sort(fill_value=fill_value)
else:
a.sort(axis=axis, fill_value=fill_value)
asorted = a
else:
asorted = sort(a, axis=axis, fill_value=fill_value)
if axis is None:
axis = 0
else:
axis = normalize_axis_index(axis, asorted.ndim)
if asorted.shape[axis] == 0:
# for empty axis integer indices fail so use slicing to get same result
# as median (which is mean of empty slice = nan)
indexer = [slice(None)] * asorted.ndim
indexer[axis] = slice(0, 0)
indexer = tuple(indexer)
return np.ma.mean(asorted[indexer], axis=axis, out=out)
if asorted.ndim == 1:
counts = count(asorted)
idx, odd = divmod(count(asorted), 2)
mid = asorted[idx + odd - 1:idx + 1]
if np.issubdtype(asorted.dtype, np.inexact) and asorted.size > 0:
# avoid inf / x = masked
s = mid.sum(out=out)
if not odd:
s = np.true_divide(s, 2., casting='safe', out=out)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
s = mid.mean(out=out)
# if result is masked either the input contained enough
# minimum_fill_value so that it would be the median or all values
# masked
if np.ma.is_masked(s) and not np.all(asorted.mask):
return np.ma.minimum_fill_value(asorted)
return s
counts = count(asorted, axis=axis, keepdims=True)
h = counts // 2
# duplicate high if odd number of elements so mean does nothing
odd = counts % 2 == 1
l = np.where(odd, h, h-1)
lh = np.concatenate([l,h], axis=axis)
# get low and high median
low_high = np.take_along_axis(asorted, lh, axis=axis)
def replace_masked(s):
# Replace masked entries with minimum_full_value unless it all values
# are masked. This is required as the sort order of values equal or
# larger than the fill value is undefined and a valid value placed
# elsewhere, e.g. [4, --, inf].
if np.ma.is_masked(s):
rep = (~np.all(asorted.mask, axis=axis, keepdims=True)) & s.mask
s.data[rep] = np.ma.minimum_fill_value(asorted)
s.mask[rep] = False
replace_masked(low_high)
if np.issubdtype(asorted.dtype, np.inexact):
# avoid inf / x = masked
s = np.ma.sum(low_high, axis=axis, out=out)
np.true_divide(s.data, 2., casting='unsafe', out=s.data)
s = np.lib.utils._median_nancheck(asorted, s, axis, out)
else:
s = np.ma.mean(low_high, axis=axis, out=out)
return s
def compress_nd(x, axis=None):
"""Suppress slices from multiple dimensions which contain masked values.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with `mask`
set to `nomask`.
axis : tuple of ints or int, optional
Which dimensions to suppress slices from can be configured with this
parameter.
- If axis is a tuple of ints, those are the axes to suppress slices from.
- If axis is an int, then that is the only axis to suppress slices from.
- If axis is None, all axis are selected.
Returns
-------
compress_array : ndarray
The compressed array.
"""
x = asarray(x)
m = getmask(x)
# Set axis to tuple of ints
if axis is None:
axis = tuple(range(x.ndim))
else:
axis = normalize_axis_tuple(axis, x.ndim)
# Nothing is masked: return x
if m is nomask or not m.any():
return x._data
# All is masked: return empty
if m.all():
return nxarray([])
# Filter elements through boolean indexing
data = x._data
for ax in axis:
axes = tuple(list(range(ax)) + list(range(ax + 1, x.ndim)))
data = data[(slice(None),)*ax + (~m.any(axis=axes),)]
return data
def compress_rowcols(x, axis=None):
"""
Suppress the rows and/or columns of a 2-D array that contain
masked values.
The suppression behavior is selected with the `axis` parameter.
- If axis is None, both rows and columns are suppressed.
- If axis is 0, only rows are suppressed.
- If axis is 1 or -1, only columns are suppressed.
Parameters
----------
x : array_like, MaskedArray
The array to operate on. If not a MaskedArray instance (or if no array
elements are masked), `x` is interpreted as a MaskedArray with
`mask` set to `nomask`. Must be a 2D array.
axis : int, optional
Axis along which to perform the operation. Default is None.
Returns
-------
compressed_array : ndarray
The compressed array.
Examples
--------
>>> x = np.ma.array(np.arange(9).reshape(3, 3), mask=[[1, 0, 0],
... [1, 0, 0],
... [0, 0, 0]])
>>> x
masked_array(
data=[[--, 1, 2],
[--, 4, 5],
[6, 7, 8]],
mask=[[ True, False, False],
[ True, False, False],
[False, False, False]],
fill_value=999999)
>>> np.ma.compress_rowcols(x)
array([[7, 8]])
>>> np.ma.compress_rowcols(x, 0)
array([[6, 7, 8]])
>>> np.ma.compress_rowcols(x, 1)
array([[1, 2],
[4, 5],
[7, 8]])
"""
if asarray(x).ndim != 2:
raise NotImplementedError("compress_rowcols works for 2D arrays only.")
return compress_nd(x, axis=axis)
def compress_rows(a):
"""
Suppress whole rows of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 0)``, see
`compress_rowcols` for details.
See Also
--------
compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_rows works for 2D arrays only.")
return compress_rowcols(a, 0)
def compress_cols(a):
"""
Suppress whole columns of a 2-D array that contain masked values.
This is equivalent to ``np.ma.compress_rowcols(a, 1)``, see
`compress_rowcols` for details.
See Also
--------
compress_rowcols
"""
a = asarray(a)
if a.ndim != 2:
raise NotImplementedError("compress_cols works for 2D arrays only.")
return compress_rowcols(a, 1)
def mask_rows(a, axis=np._NoValue):
"""
Mask rows of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 0.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(
data=[[0, 0, 0],
[0, --, 0],
[0, 0, 0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1)
>>> ma.mask_rows(a)
masked_array(
data=[[0, 0, 0],
[--, --, --],
[0, 0, 0]],
mask=[[False, False, False],
[ True, True, True],
[False, False, False]],
fill_value=1)
"""
if axis is not np._NoValue:
# remove the axis argument when this deprecation expires
# NumPy 1.18.0, 2019-11-28
warnings.warn(
"The axis argument has always been ignored, in future passing it "
"will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 0)
def mask_cols(a, axis=np._NoValue):
"""
Mask columns of a 2D array that contain masked values.
This function is a shortcut to ``mask_rowcols`` with `axis` equal to 1.
See Also
--------
mask_rowcols : Mask rows and/or columns of a 2D array.
masked_where : Mask where a condition is met.
Examples
--------
>>> import numpy.ma as ma
>>> a = np.zeros((3, 3), dtype=int)
>>> a[1, 1] = 1
>>> a
array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]])
>>> a = ma.masked_equal(a, 1)
>>> a
masked_array(
data=[[0, 0, 0],
[0, --, 0],
[0, 0, 0]],
mask=[[False, False, False],
[False, True, False],
[False, False, False]],
fill_value=1)
>>> ma.mask_cols(a)
masked_array(
data=[[0, --, 0],
[0, --, 0],
[0, --, 0]],
mask=[[False, True, False],
[False, True, False],
[False, True, False]],
fill_value=1)
"""
if axis is not np._NoValue:
# remove the axis argument when this deprecation expires
# NumPy 1.18.0, 2019-11-28
warnings.warn(
"The axis argument has always been ignored, in future passing it "
"will raise TypeError", DeprecationWarning, stacklevel=2)
return mask_rowcols(a, 1)
#####--------------------------------------------------------------------------
#---- --- arraysetops ---
#####--------------------------------------------------------------------------
def ediff1d(arr, to_end=None, to_begin=None):
"""
Compute the differences between consecutive elements of an array.
This function is the equivalent of `numpy.ediff1d` that takes masked
values into account, see `numpy.ediff1d` for details.
See Also
--------
numpy.ediff1d : Equivalent function for ndarrays.
"""
arr = ma.asanyarray(arr).flat
ed = arr[1:] - arr[:-1]
arrays = [ed]
#
if to_begin is not None:
arrays.insert(0, to_begin)
if to_end is not None:
arrays.append(to_end)
#
if len(arrays) != 1:
# We'll save ourselves a copy of a potentially large array in the common
# case where neither to_begin or to_end was given.
ed = hstack(arrays)
#
return ed
def unique(ar1, return_index=False, return_inverse=False):
"""
Finds the unique elements of an array.
Masked values are considered the same element (masked). The output array
is always a masked array. See `numpy.unique` for more details.
See Also
--------
numpy.unique : Equivalent function for ndarrays.
"""
output = np.unique(ar1,
return_index=return_index,
return_inverse=return_inverse)
if isinstance(output, tuple):
output = list(output)
output[0] = output[0].view(MaskedArray)
output = tuple(output)
else:
output = output.view(MaskedArray)
return output
def intersect1d(ar1, ar2, assume_unique=False):
"""
Returns the unique elements common to both arrays.
Masked values are considered equal one to the other.
The output is always a masked array.
See `numpy.intersect1d` for more details.
See Also
--------
numpy.intersect1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 3, 3, 3], mask=[0, 0, 0, 1])
>>> y = np.ma.array([3, 1, 1, 1], mask=[0, 0, 0, 1])
>>> np.ma.intersect1d(x, y)
masked_array(data=[1, 3, --],
mask=[False, False, True],
fill_value=999999)
"""
if assume_unique:
aux = ma.concatenate((ar1, ar2))
else:
# Might be faster than unique( intersect1d( ar1, ar2 ) )?
aux = ma.concatenate((unique(ar1), unique(ar2)))
aux.sort()
return aux[:-1][aux[1:] == aux[:-1]]
def setxor1d(ar1, ar2, assume_unique=False):
"""
Set exclusive-or of 1-D arrays with unique elements.
The output is always a masked array. See `numpy.setxor1d` for more details.
See Also
--------
numpy.setxor1d : Equivalent function for ndarrays.
"""
if not assume_unique:
ar1 = unique(ar1)
ar2 = unique(ar2)
aux = ma.concatenate((ar1, ar2))
if aux.size == 0:
return aux
aux.sort()
auxf = aux.filled()
# flag = ediff1d( aux, to_end = 1, to_begin = 1 ) == 0
flag = ma.concatenate(([True], (auxf[1:] != auxf[:-1]), [True]))
# flag2 = ediff1d( flag ) == 0
flag2 = (flag[1:] == flag[:-1])
return aux[flag2]
def in1d(ar1, ar2, assume_unique=False, invert=False):
"""
Test whether each element of an array is also present in a second
array.
The output is always a masked array. See `numpy.in1d` for more details.
We recommend using :func:`isin` instead of `in1d` for new code.
See Also
--------
isin : Version of this function that preserves the shape of ar1.
numpy.in1d : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.4.0
"""
if not assume_unique:
ar1, rev_idx = unique(ar1, return_inverse=True)
ar2 = unique(ar2)
ar = ma.concatenate((ar1, ar2))
# We need this to be a stable sort, so always use 'mergesort'
# here. The values from the first array should always come before
# the values from the second array.
order = ar.argsort(kind='mergesort')
sar = ar[order]
if invert:
bool_ar = (sar[1:] != sar[:-1])
else:
bool_ar = (sar[1:] == sar[:-1])
flag = ma.concatenate((bool_ar, [invert]))
indx = order.argsort(kind='mergesort')[:len(ar1)]
if assume_unique:
return flag[indx]
else:
return flag[indx][rev_idx]
def isin(element, test_elements, assume_unique=False, invert=False):
"""
Calculates `element in test_elements`, broadcasting over
`element` only.
The output is always a masked array of the same shape as `element`.
See `numpy.isin` for more details.
See Also
--------
in1d : Flattened version of this function.
numpy.isin : Equivalent function for ndarrays.
Notes
-----
.. versionadded:: 1.13.0
"""
element = ma.asarray(element)
return in1d(element, test_elements, assume_unique=assume_unique,
invert=invert).reshape(element.shape)
def union1d(ar1, ar2):
"""
Union of two arrays.
The output is always a masked array. See `numpy.union1d` for more details.
See Also
--------
numpy.union1d : Equivalent function for ndarrays.
"""
return unique(ma.concatenate((ar1, ar2), axis=None))
def setdiff1d(ar1, ar2, assume_unique=False):
"""
Set difference of 1D arrays with unique elements.
The output is always a masked array. See `numpy.setdiff1d` for more
details.
See Also
--------
numpy.setdiff1d : Equivalent function for ndarrays.
Examples
--------
>>> x = np.ma.array([1, 2, 3, 4], mask=[0, 1, 0, 1])
>>> np.ma.setdiff1d(x, [1, 2])
masked_array(data=[3, --],
mask=[False, True],
fill_value=999999)
"""
if assume_unique:
ar1 = ma.asarray(ar1).ravel()
else:
ar1 = unique(ar1)
ar2 = unique(ar2)
return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
###############################################################################
# Covariance #
###############################################################################
def _covhelper(x, y=None, rowvar=True, allow_masked=True):
"""
Private function for the computation of covariance and correlation
coefficients.
"""
x = ma.array(x, ndmin=2, copy=True, dtype=float)
xmask = ma.getmaskarray(x)
# Quick exit if we can't process masked data
if not allow_masked and xmask.any():
raise ValueError("Cannot process masked data.")
#
if x.shape[0] == 1:
rowvar = True
# Make sure that rowvar is either 0 or 1
rowvar = int(bool(rowvar))
axis = 1 - rowvar
if rowvar:
tup = (slice(None), None)
else:
tup = (None, slice(None))
#
if y is None:
xnotmask = np.logical_not(xmask).astype(int)
else:
y = array(y, copy=False, ndmin=2, dtype=float)
ymask = ma.getmaskarray(y)
if not allow_masked and ymask.any():
raise ValueError("Cannot process masked data.")
if xmask.any() or ymask.any():
if y.shape == x.shape:
# Define some common mask
common_mask = np.logical_or(xmask, ymask)
if common_mask is not nomask:
xmask = x._mask = y._mask = ymask = common_mask
x._sharedmask = False
y._sharedmask = False
x = ma.concatenate((x, y), axis)
xnotmask = np.logical_not(np.concatenate((xmask, ymask), axis)).astype(int)
x -= x.mean(axis=rowvar)[tup]
return (x, xnotmask, rowvar)
def cov(x, y=None, rowvar=True, bias=False, allow_masked=True, ddof=None):
"""
Estimate the covariance matrix.
Except for the handling of missing data this function does the same as
`numpy.cov`. For more details and examples, see `numpy.cov`.
By default, masked values are recognized as such. If `x` and `y` have the
same shape, a common mask is allocated: if ``x[i,j]`` is masked, then
``y[i,j]`` will also be masked.
Setting `allow_masked` to False will raise an exception if values are
missing in either of the input arrays.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : bool, optional
Default normalization (False) is by ``(N-1)``, where ``N`` is the
number of observations given (unbiased estimate). If `bias` is True,
then normalization is by ``N``. This keyword can be overridden by
the keyword ``ddof`` in numpy versions >= 1.5.
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises a `ValueError` exception when some values are missing.
ddof : {None, int}, optional
If not ``None`` normalization is by ``(N - ddof)``, where ``N`` is
the number of observations; this overrides the value implied by
``bias``. The default value is ``None``.
.. versionadded:: 1.5
Raises
------
ValueError
Raised if some values are missing and `allow_masked` is False.
See Also
--------
numpy.cov
"""
# Check inputs
if ddof is not None and ddof != int(ddof):
raise ValueError("ddof must be an integer")
# Set up ddof
if ddof is None:
if bias:
ddof = 0
else:
ddof = 1
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1. - ddof
result = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1. - ddof
result = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
return result
def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, allow_masked=True,
ddof=np._NoValue):
"""
Return Pearson product-moment correlation coefficients.
Except for the handling of missing data this function does the same as
`numpy.corrcoef`. For more details and examples, see `numpy.corrcoef`.
Parameters
----------
x : array_like
A 1-D or 2-D array containing multiple variables and observations.
Each row of `x` represents a variable, and each column a single
observation of all those variables. Also see `rowvar` below.
y : array_like, optional
An additional set of variables and observations. `y` has the same
shape as `x`.
rowvar : bool, optional
If `rowvar` is True (default), then each row represents a
variable, with observations in the columns. Otherwise, the relationship
is transposed: each column represents a variable, while the rows
contain observations.
bias : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
allow_masked : bool, optional
If True, masked values are propagated pair-wise: if a value is masked
in `x`, the corresponding value is masked in `y`.
If False, raises an exception. Because `bias` is deprecated, this
argument needs to be treated as keyword only to avoid a warning.
ddof : _NoValue, optional
Has no effect, do not use.
.. deprecated:: 1.10.0
See Also
--------
numpy.corrcoef : Equivalent function in top-level NumPy module.
cov : Estimate the covariance matrix.
Notes
-----
This function accepts but discards arguments `bias` and `ddof`. This is
for backwards compatibility with previous versions of this function. These
arguments had no effect on the return values of the function and can be
safely ignored in this and previous versions of numpy.
"""
msg = 'bias and ddof have no effect and are deprecated'
if bias is not np._NoValue or ddof is not np._NoValue:
# 2015-03-15, 1.10
warnings.warn(msg, DeprecationWarning, stacklevel=2)
# Get the data
(x, xnotmask, rowvar) = _covhelper(x, y, rowvar, allow_masked)
# Compute the covariance matrix
if not rowvar:
fact = np.dot(xnotmask.T, xnotmask) * 1.
c = (dot(x.T, x.conj(), strict=False) / fact).squeeze()
else:
fact = np.dot(xnotmask, xnotmask.T) * 1.
c = (dot(x, x.T.conj(), strict=False) / fact).squeeze()
# Check whether we have a scalar
try:
diag = ma.diagonal(c)
except ValueError:
return 1
#
if xnotmask.all():
_denom = ma.sqrt(ma.multiply.outer(diag, diag))
else:
_denom = diagflat(diag)
_denom._sharedmask = False # We know return is always a copy
n = x.shape[1 - rowvar]
if rowvar:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(vstack((x[i], x[j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
else:
for i in range(n - 1):
for j in range(i + 1, n):
_x = mask_cols(
vstack((x[:, i], x[:, j]))).var(axis=1)
_denom[i, j] = _denom[j, i] = ma.sqrt(ma.multiply.reduce(_x))
return c / _denom
#####--------------------------------------------------------------------------
#---- --- Concatenation helpers ---
#####--------------------------------------------------------------------------
class MAxisConcatenator(AxisConcatenator):
"""
Translate slice objects to concatenation along an axis.
For documentation on usage, see `mr_class`.
See Also
--------
mr_class
"""
concatenate = staticmethod(concatenate)
@classmethod
def makemat(cls, arr):
# There used to be a view as np.matrix here, but we may eventually
# deprecate that class. In preparation, we use the unmasked version
# to construct the matrix (with copy=False for backwards compatibility
# with the .view)
data = super().makemat(arr.data, copy=False)
return array(data, mask=arr.mask)
def __getitem__(self, key):
# matrix builder syntax, like 'a, b; c, d'
if isinstance(key, str):
raise MAError("Unavailable for masked array.")
return super().__getitem__(key)
class mr_class(MAxisConcatenator):
"""
Translate slice objects to concatenation along the first axis.
This is the masked array version of `lib.index_tricks.RClass`.
See Also
--------
lib.index_tricks.RClass
Examples
--------
>>> np.ma.mr_[np.ma.array([1,2,3]), 0, 0, np.ma.array([4,5,6])]
masked_array(data=[1, 2, 3, ..., 4, 5, 6],
mask=False,
fill_value=999999)
"""
def __init__(self):
MAxisConcatenator.__init__(self, 0)
mr_ = mr_class()
#####--------------------------------------------------------------------------
#---- Find unmasked data ---
#####--------------------------------------------------------------------------
def flatnotmasked_edges(a):
"""
Find the indices of the first and last unmasked values.
Expects a 1-D `MaskedArray`, returns None if all values are masked.
Parameters
----------
a : array_like
Input 1-D `MaskedArray`
Returns
-------
edges : ndarray or None
The indices of first and last non-masked value in the array.
Returns None if all values are masked.
See Also
--------
flatnotmasked_contiguous, notmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 1-D arrays.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_edges(a)
array([0, 9])
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_edges(a)
array([3, 8])
>>> a[:] = np.ma.masked
>>> print(np.ma.flatnotmasked_edges(a))
None
"""
m = getmask(a)
if m is nomask or not np.any(m):
return np.array([0, a.size - 1])
unmasked = np.flatnonzero(~m)
if len(unmasked) > 0:
return unmasked[[0, -1]]
else:
return None
def notmasked_edges(a, axis=None):
"""
Find the indices of the first and last unmasked values along an axis.
If all values are masked, return None. Otherwise, return a list
of two tuples, corresponding to the indices of the first and last
unmasked values respectively.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array.
Returns
-------
edges : ndarray or list
An array of start and end indexes if there are any masked data in
the array. If there are no masked data in the array, `edges` is a
list of the first and last index.
See Also
--------
flatnotmasked_contiguous, flatnotmasked_edges, notmasked_contiguous
clump_masked, clump_unmasked
Examples
--------
>>> a = np.arange(9).reshape((3, 3))
>>> m = np.zeros_like(a)
>>> m[1:, 1:] = 1
>>> am = np.ma.array(a, mask=m)
>>> np.array(am[~am.mask])
array([0, 1, 2, 3, 6])
>>> np.ma.notmasked_edges(am)
array([0, 6])
"""
a = asarray(a)
if axis is None or a.ndim == 1:
return flatnotmasked_edges(a)
m = getmaskarray(a)
idx = array(np.indices(a.shape), mask=np.asarray([m] * a.ndim))
return [tuple([idx[i].min(axis).compressed() for i in range(a.ndim)]),
tuple([idx[i].max(axis).compressed() for i in range(a.ndim)]), ]
def flatnotmasked_contiguous(a):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : narray
The input array.
Returns
-------
slice_list : list
A sorted sequence of `slice` objects (start index, end index).
.. versionchanged:: 1.15.0
Now returns an empty list instead of None for a fully masked array
See Also
--------
flatnotmasked_edges, notmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.ma.arange(10)
>>> np.ma.flatnotmasked_contiguous(a)
[slice(0, 10, None)]
>>> mask = (a < 3) | (a > 8) | (a == 5)
>>> a[mask] = np.ma.masked
>>> np.array(a[~a.mask])
array([3, 4, 6, 7, 8])
>>> np.ma.flatnotmasked_contiguous(a)
[slice(3, 5, None), slice(6, 9, None)]
>>> a[:] = np.ma.masked
>>> np.ma.flatnotmasked_contiguous(a)
[]
"""
m = getmask(a)
if m is nomask:
return [slice(0, a.size)]
i = 0
result = []
for (k, g) in itertools.groupby(m.ravel()):
n = len(list(g))
if not k:
result.append(slice(i, i + n))
i += n
return result
def notmasked_contiguous(a, axis=None):
"""
Find contiguous unmasked data in a masked array along the given axis.
Parameters
----------
a : array_like
The input array.
axis : int, optional
Axis along which to perform the operation.
If None (default), applies to a flattened version of the array, and this
is the same as `flatnotmasked_contiguous`.
Returns
-------
endpoints : list
A list of slices (start and end indexes) of unmasked indexes
in the array.
If the input is 2d and axis is specified, the result is a list of lists.
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
clump_masked, clump_unmasked
Notes
-----
Only accepts 2-D arrays at most.
Examples
--------
>>> a = np.arange(12).reshape((3, 4))
>>> mask = np.zeros_like(a)
>>> mask[1:, :-1] = 1; mask[0, 1] = 1; mask[-1, 0] = 0
>>> ma = np.ma.array(a, mask=mask)
>>> ma
masked_array(
data=[[0, --, 2, 3],
[--, --, --, 7],
[8, --, --, 11]],
mask=[[False, True, False, False],
[ True, True, True, False],
[False, True, True, False]],
fill_value=999999)
>>> np.array(ma[~ma.mask])
array([ 0, 2, 3, 7, 8, 11])
>>> np.ma.notmasked_contiguous(ma)
[slice(0, 1, None), slice(2, 4, None), slice(7, 9, None), slice(11, 12, None)]
>>> np.ma.notmasked_contiguous(ma, axis=0)
[[slice(0, 1, None), slice(2, 3, None)], [], [slice(0, 1, None)], [slice(0, 3, None)]]
>>> np.ma.notmasked_contiguous(ma, axis=1)
[[slice(0, 1, None), slice(2, 4, None)], [slice(3, 4, None)], [slice(0, 1, None), slice(3, 4, None)]]
"""
a = asarray(a)
nd = a.ndim
if nd > 2:
raise NotImplementedError("Currently limited to atmost 2D array.")
if axis is None or nd == 1:
return flatnotmasked_contiguous(a)
#
result = []
#
other = (axis + 1) % 2
idx = [0, 0]
idx[axis] = slice(None, None)
#
for i in range(a.shape[other]):
idx[other] = i
result.append(flatnotmasked_contiguous(a[tuple(idx)]))
return result
def _ezclump(mask):
"""
Finds the clumps (groups of data with the same values) for a 1D bool array.
Returns a series of slices.
"""
if mask.ndim > 1:
mask = mask.ravel()
idx = (mask[1:] ^ mask[:-1]).nonzero()
idx = idx[0] + 1
if mask[0]:
if len(idx) == 0:
return [slice(0, mask.size)]
r = [slice(0, idx[0])]
r.extend((slice(left, right)
for left, right in zip(idx[1:-1:2], idx[2::2])))
else:
if len(idx) == 0:
return []
r = [slice(left, right) for left, right in zip(idx[:-1:2], idx[1::2])]
if mask[-1]:
r.append(slice(idx[-1], mask.size))
return r
def clump_unmasked(a):
"""
Return list of slices corresponding to the unmasked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of unmasked
elements in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
notmasked_contiguous, clump_masked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_unmasked(a)
[slice(3, 6, None), slice(7, 8, None)]
"""
mask = getattr(a, '_mask', nomask)
if mask is nomask:
return [slice(0, a.size)]
return _ezclump(~mask)
def clump_masked(a):
"""
Returns a list of slices corresponding to the masked clumps of a 1-D array.
(A "clump" is defined as a contiguous region of the array).
Parameters
----------
a : ndarray
A one-dimensional masked array.
Returns
-------
slices : list of slice
The list of slices, one for each continuous region of masked elements
in `a`.
Notes
-----
.. versionadded:: 1.4.0
See Also
--------
flatnotmasked_edges, flatnotmasked_contiguous, notmasked_edges
notmasked_contiguous, clump_unmasked
Examples
--------
>>> a = np.ma.masked_array(np.arange(10))
>>> a[[0, 1, 2, 6, 8, 9]] = np.ma.masked
>>> np.ma.clump_masked(a)
[slice(0, 3, None), slice(6, 7, None), slice(8, 10, None)]
"""
mask = ma.getmask(a)
if mask is nomask:
return []
return _ezclump(mask)
###############################################################################
# Polynomial fit #
###############################################################################
def vander(x, n=None):
"""
Masked values in the input array result in rows of zeros.
"""
_vander = np.vander(x, n)
m = getmask(x)
if m is not nomask:
_vander[m] = 0
return _vander
vander.__doc__ = ma.doc_note(np.vander.__doc__, vander.__doc__)
def polyfit(x, y, deg, rcond=None, full=False, w=None, cov=False):
"""
Any masked values in x is propagated in y, and vice-versa.
"""
x = asarray(x)
y = asarray(y)
m = getmask(x)
if y.ndim == 1:
m = mask_or(m, getmask(y))
elif y.ndim == 2:
my = getmask(mask_rows(y))
if my is not nomask:
m = mask_or(m, my[:, 0])
else:
raise TypeError("Expected a 1D or 2D array for y!")
if w is not None:
w = asarray(w)
if w.ndim != 1:
raise TypeError("expected a 1-d array for weights")
if w.shape[0] != y.shape[0]:
raise TypeError("expected w and y to have the same length")
m = mask_or(m, getmask(w))
if m is not nomask:
not_m = ~m
if w is not None:
w = w[not_m]
return np.polyfit(x[not_m], y[not_m], deg, rcond, full, w, cov)
else:
return np.polyfit(x, y, deg, rcond, full, w, cov)
polyfit.__doc__ = ma.doc_note(np.polyfit.__doc__, polyfit.__doc__)
|
pbrod/numpy
|
numpy/ma/extras.py
|
Python
|
bsd-3-clause
| 58,302
|
def extractTekuteku(item):
"""
Parser for 'tekuteku'
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
if 'WATTT' in item['tags']:
return buildReleaseMessageWithType(item, 'WATTT', vol, chp, frag=frag, postfix=postfix)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractTekuteku.py
|
Python
|
bsd-3-clause
| 342
|
# -*- test-case-name: vxapprouter.tests.test_router -*-
import json
from urlparse import urlunparse
from twisted.internet.defer import inlineCallbacks
from vumi import log
from vumi.components.session import SessionManager
from vumi.config import (
ConfigDict, ConfigList, ConfigInt, ConfigText, ConfigUrl)
from vumi.dispatchers.endpoint_dispatchers import Dispatcher
from vumi.message import TransportUserMessage
from vumi.persist.txredis_manager import TxRedisManager
class ApplicationDispatcherConfig(Dispatcher.CONFIG_CLASS):
# Static configuration
session_expiry = ConfigInt(
("Maximum amount of time in seconds to keep session data around. "
"Defaults to 5 minutes."),
default=5 * 60, static=True)
message_expiry = ConfigInt(
("Maximum amount of time in seconds to keep message data around. "
"This is kept to handle async events. Defaults to 2 days."),
default=60 * 60 * 24 * 2, static=True)
redis_manager = ConfigDict(
"Redis client configuration.", default={}, static=True)
# Dynamic, per-message configuration
menu_title = ConfigText(
"Content for the menu title", default="Please select a choice.")
entries = ConfigList(
"A list of application endpoints and associated labels",
default=[])
invalid_input_message = ConfigText(
"Prompt to display when warning about an invalid choice",
default=("That is an incorrect choice. Please enter the number "
"of the menu item you wish to choose."))
try_again_message = ConfigText(
"What text to display when the user needs to try again.",
default="Try Again")
error_message = ConfigText(
("Prompt to display when a configuration change invalidates "
"an active session."),
default=("Oops! We experienced a temporary error. "
"Please try and dial the line again."))
routing_table = ConfigDict(
"Routing table. Keys are connector names, values are dicts mapping "
"endpoint names to [connector, endpoint] pairs.", required=True)
class StateResponse(object):
def __init__(self, state, session_update=None, inbound=(), outbound=()):
self.next_state = state
self.session_update = session_update or {}
self.inbound = inbound
self.outbound = outbound
def mkmenu(options, start=1, format='%s) %s'):
items = [format % (idx, opt) for idx, opt in enumerate(options, start)]
return '\n'.join(items)
def clean(content):
return (content or '').strip()
class ApplicationDispatcher(Dispatcher):
CONFIG_CLASS = ApplicationDispatcherConfig
worker_name = 'application_dispatcher'
STATE_START = "start"
STATE_SELECT = "select"
STATE_SELECTED = "selected"
STATE_BAD_INPUT = "bad_input"
@inlineCallbacks
def setup_dispatcher(self):
yield super(ApplicationDispatcher, self).setup_dispatcher()
self.handlers = {
self.STATE_START: self.handle_state_start,
self.STATE_SELECT: self.handle_state_select,
self.STATE_SELECTED: self.handle_state_selected,
self.STATE_BAD_INPUT: self.handle_state_bad_input,
}
config = self.get_static_config()
txrm = yield TxRedisManager.from_config(config.redis_manager)
self.redis = txrm.sub_manager(self.worker_name)
def session_manager(self, config):
return SessionManager(
self.redis, max_session_length=config.session_expiry)
def forwarded_message(self, msg, **kwargs):
copy = TransportUserMessage(**msg.payload)
for k, v in kwargs.items():
copy[k] = v
return copy
def target_endpoints(self, config):
"""
Make sure the currently active endpoint is still valid.
"""
return set([entry['endpoint'] for entry in config.entries])
def get_endpoint_for_choice(self, msg, session):
"""
Retrieves the candidate endpoint based on the user's numeric choice
"""
endpoints = json.loads(session['endpoints'])
index = self.get_menu_choice(msg, (1, len(endpoints)))
if index is None:
return None
return endpoints[index - 1]
def get_menu_choice(self, msg, valid_range):
"""
Parse user input for selecting a numeric menu choice
"""
try:
value = int(clean(msg['content']))
except ValueError:
return None
else:
if value not in range(valid_range[0], valid_range[1] + 1):
return None
return value
def make_first_reply(self, config, session, msg):
return msg.reply(self.create_menu(config))
def make_invalid_input_reply(self, config, session, msg):
return msg.reply('%s\n\n1. %s' % (
config.invalid_input_message, config.try_again_message))
def handle_state_start(self, config, session, msg):
"""
When presenting the menu, we also store the list of endpoints
in the session data. Later, in the select state, we load
these endpoints and retrieve the candidate endpoint based
on the user's menu choice.
"""
reply_msg = self.make_first_reply(config, session, msg)
endpoints = json.dumps(
[entry['endpoint'] for entry in config.entries]
)
return StateResponse(
self.STATE_SELECT, {'endpoints': endpoints}, outbound=[reply_msg])
def handle_state_select(self, config, session, msg):
endpoint = self.get_endpoint_for_choice(msg, session)
if endpoint is None:
reply_msg = self.make_invalid_input_reply(config, session, msg)
return StateResponse(self.STATE_BAD_INPUT, outbound=[reply_msg])
if endpoint not in self.target_endpoints(config):
log.msg(("Router configuration change forced session "
"termination for user %s" % msg['from_addr']))
error_reply_msg = self.make_error_reply(msg, config)
return StateResponse(None, outbound=[error_reply_msg])
forwarded_msg = self.forwarded_message(
msg, content=None,
session_event=TransportUserMessage.SESSION_NEW)
log.msg("Switched to endpoint '%s' for user %s" %
(endpoint, msg['from_addr']))
return StateResponse(
self.STATE_SELECTED, {'active_endpoint': endpoint},
inbound=[(forwarded_msg, endpoint)])
def handle_state_selected(self, config, session, msg):
active_endpoint = session['active_endpoint']
if active_endpoint not in self.target_endpoints(config):
log.msg(("Router configuration change forced session "
"termination for user %s" % msg['from_addr']))
error_reply_msg = self.make_error_reply(msg, config)
return StateResponse(None, outbound=[error_reply_msg])
else:
return StateResponse(
self.STATE_SELECTED, inbound=[(msg, active_endpoint)])
def handle_state_bad_input(self, config, session, msg):
choice = self.get_menu_choice(msg, (1, 1))
if choice is None:
reply_msg = self.make_invalid_input_reply(config, session, msg)
return StateResponse(self.STATE_BAD_INPUT, outbound=[reply_msg])
else:
return self.handle_state_start(config, session, msg)
@inlineCallbacks
def handle_session_close(self, config, session, msg, connector_name):
user_id = msg['from_addr']
if (session.get('state', None) == self.STATE_SELECTED and
session['active_endpoint'] in self.target_endpoints(config)):
target = self.find_target(config, msg, connector_name, session)
yield self.publish_inbound(msg, target[0], target[1])
session_manager = yield self.session_manager(config)
yield session_manager.clear_session(user_id)
def create_menu(self, config):
labels = [entry['label'] for entry in config.entries]
return (config.menu_title + "\n" + mkmenu(labels))
def make_error_reply(self, msg, config):
return msg.reply(config.error_message, continue_session=False)
def find_target(self, config, msg, connector_name, session={}):
endpoint_name = session.get(
'active_endpoint', msg.get_routing_endpoint())
endpoint_routing = config.routing_table.get(connector_name)
if endpoint_routing is None:
log.warning("No routing information for connector '%s'" % (
connector_name,))
return None
target = endpoint_routing.get(endpoint_name)
if target is None:
log.warning("No routing information for endpoint '%s' on '%s'" % (
endpoint_name, connector_name,))
return None
return target
@inlineCallbacks
def process_inbound(self, config, msg, connector_name):
log.msg("Processing inbound message: %s" % (msg,))
user_id = msg['from_addr']
session_manager = yield self.session_manager(config)
session = yield session_manager.load_session(user_id)
session_event = msg['session_event']
if not session or session_event == TransportUserMessage.SESSION_NEW:
log.msg("Creating session for user %s" % user_id)
session = {}
state = self.STATE_START
yield session_manager.create_session(user_id, state=state)
elif session_event == TransportUserMessage.SESSION_CLOSE:
yield self.handle_session_close(
config, session, msg, connector_name)
return
else:
log.msg("Loading session for user %s: %s" % (user_id, session,))
state = session['state']
try:
# We must assume the state handlers might be async, even if the
# current implementations aren't. There is at least one test that
# depends on asynchrony here to hook into the state transition.
state_resp = yield self.handlers[state](config, session, msg)
if state_resp.next_state is None:
# Session terminated (right now, just in the case of a
# administrator-initiated configuration change
yield session_manager.clear_session(user_id)
else:
session['state'] = state_resp.next_state
session.update(state_resp.session_update)
if state != state_resp.next_state:
log.msg("State transition for user %s: %s => %s" %
(user_id, state, state_resp.next_state))
yield session_manager.save_session(user_id, session)
for msg, endpoint in state_resp.inbound:
target = self.find_target(
config, msg, connector_name, session)
yield self.publish_inbound(msg, target[0], target[1])
for msg in state_resp.outbound:
yield self.process_outbound(config, msg, connector_name)
except:
log.err()
yield session_manager.clear_session(user_id)
yield self.process_outbound(
config, self.make_error_reply(msg, config), connector_name)
@inlineCallbacks
def process_outbound(self, config, msg, connector_name):
log.msg("Processing outbound message: %s" % (msg,))
user_id = msg['to_addr']
session_event = msg['session_event']
session_manager = yield self.session_manager(config)
session = yield session_manager.load_session(user_id)
if session and (session_event == TransportUserMessage.SESSION_CLOSE):
yield session_manager.clear_session(user_id)
yield self.cache_outbound_user_id(msg['message_id'],
msg['to_addr'])
target = self.find_target(config, msg, connector_name)
if target is None:
return
yield self.publish_outbound(msg, target[0], target[1])
def mk_msg_key(self, message_id):
return ':'.join(['cache', message_id])
@inlineCallbacks
def cache_outbound_user_id(self, message_id, user_id):
key = self.mk_msg_key(message_id)
yield self.redis.setex(
key, self.get_static_config().message_expiry, user_id)
def get_cached_user_id(self, message_id):
return self.redis.get(self.mk_msg_key(message_id))
@inlineCallbacks
def process_event(self, config, event, connector_name):
user_id = yield self.get_cached_user_id(event['user_message_id'])
session_manager = yield self.session_manager(config)
session = yield session_manager.load_session(user_id)
if not session.get('active_endpoint'):
target = None
else:
target = self.find_target(config, event, connector_name, session)
if target is None:
return
yield self.publish_event(event, target[0], target[1])
class MessengerApplicationDispatcherConfig(ApplicationDispatcher.CONFIG_CLASS):
sub_title = ConfigText('The subtitle')
image_url = ConfigUrl('The URL for an image')
class MessengerApplicationDispatcher(ApplicationDispatcher):
CONFIG_CLASS = MessengerApplicationDispatcherConfig
def make_first_reply(self, config, session, msg):
msg = super(MessengerApplicationDispatcher, self).make_first_reply(
config, session, msg)
# Magically render a Messenger menu if less than 3 items.
if len(config.entries) <= 3:
msg['helper_metadata']['messenger'] = {
'template_type': 'generic',
'title': config.menu_title,
'subtitle': config.sub_title,
'image_url': urlunparse(config.image_url),
'buttons': [{
'title': entry['label'],
'payload': {
"content": str(index + 1),
"in_reply_to": msg['message_id'],
}
} for (index, entry) in enumerate(config.entries)]
}
return msg
def make_invalid_input_reply(self, config, session, msg):
msg = super(
MessengerApplicationDispatcher, self).make_invalid_input_reply(
config, session, msg)
msg['helper_metadata']['messenger'] = {
'template_type': 'generic',
'title': config.menu_title,
'subtitle': config.invalid_input_message,
'image_url': urlunparse(config.image_url),
'buttons': [{
'title': config.try_again_message,
'payload': {
"content": '1',
"in_reply_to": msg['message_id'],
}
}]
}
return msg
|
smn/vumi-app-router
|
vxapprouter/router.py
|
Python
|
bsd-3-clause
| 14,970
|
'''
:synopsis: Prompt users for information
.. moduleauthor: Paul Diaconescu <p@afajl.com>
'''
import re
try:
import readline
has_readline = True
except ImportError:
has_readline = False
pass
def _indent_out(question):
indent = 0
for c in question:
if c != ' ':
break
indent += 1
def out(msg, to_s=False):
s = ' '*indent + msg
if to_s:
return s
else:
print s
return out
def _to_type(answer, type):
''' Tries to convert the answer to the desired type '''
if type is None:
# Dont convert
return answer, None
if type is int:
try:
return type(answer), None
except ValueError:
return None, 'Answer must be a integer'
if type is float:
try:
return type(answer), None
except ValueError:
return None, 'Answer must be a float'
if type is bool:
if answer[0] in ('y', 'Y', 't', 'j'):
return True, None
elif answer[0] in ('n', 'N', 'f'):
return False, None
else:
return None, 'Answer yes or no'
else:
return type(answer)
return type(answer), None
def _run_checks(answer, checks):
''' Runs checks, (func, help) on answer '''
error = None
for test, help in checks:
if isinstance(test, str):
match = re.match(test, answer)
if not match:
error = help
break
if hasattr(test, 'match'):
match = test.match(answer)
if not match:
error = help
break
if hasattr(test, '__call__'):
if not test(answer):
error = help
break
return error
def ask(question, default='', type=None, checks=()):
''' Ask user a question
:arg question: Question to prompt for. Leading spaces will set the
indent level for the error responses.
:arg default: The default answer as a string.
:arg type: Python type the answer must have. Answers are converted
to the requested type before checks are run. Support for
str, int, float and bool are built in.
If you supply your own function it must take a string as
argument and return a tuple where the first value is the
converted answer or None if if failed. If it fails the
second value is the error message displayd to the user,
example::
def int_list(answer):
try:
ints = [int(i) for i in answer.split(',')]
# Success!
return ints, None
except ValueError:
# Fail!
return None, 'You must supply a list of integers'
sy.prompt.ask('Give me a intlist: ', type=int_list)
Give me a intlist: 1, 2, 3
[1, 2, 3]
:arg checks: List of checks in the form ``[(check, errormsg), (check, ...)]``.
The check can be a regular expression string, a compiled
pattern or a function. The function must take a string as
argument and return True if the check passes. If the check
fails the errormsg is printed to the user.
'''
assert isinstance(default, str), 'Default must be a string'
# Get a print_error function that correctly indents
# the error message
print_error = _indent_out(question)
while True:
answer = raw_input(question).strip()
if not answer:
if default:
answer = default
else:
# ask again
continue
converted = answer
if type:
converted, error = _to_type(answer, type)
if error:
print_error(error)
continue
if checks:
error = _run_checks(converted, checks)
if error:
print_error(error)
continue
return converted
def confirm(question, default=''):
''' Ask a yes or no question
:arg default: True or False
:returns: Boolean answer
'''
if default is True:
default='y'
elif default is False:
default='n'
return ask(question,
default=default,
type=bool)
def choose(question, choices, multichoice=False, default=''):
''' Let user select one or more items from a list
Presents user with the question and the list of choices. Returns the index
of the choice selected. If ``multichoice``
is true the user can pick more then one choice and a list of indexes are
returned::
choice = sy.prompt.choose('Pick one:', ['a', 'b', 'c'])
# Pick one:
# 1) a
# 2) b
# 3) c
# Choice: 1
print choice
0
choices = sy.prompt.choose('Pick one or more:', ['a', 'b', 'c'],
mutlichoice=True)
# Pick one or more:
# 1) a
# 2) b
# 3) c
# Choices: 1, 3
print choices
[0,2]
:arg question: Question to print before list of choices
:arg choices: List of choices. If the choice is not a string an attempt to
convert it to a string with :func:`str()` is made.
:arg multichoice: If True the user can pick multiple choices, separated by
commas. The return value will be a list of indexes in the
choices list that the user picked.
:arg default: Default choice as a string the user would have written, ex:
``"1,2"``.
'''
out = _indent_out(question)
print question
for i, choice in enumerate(choices):
out( '%d) %s' % (i+1, str(choice)) )
print
if multichoice:
choice_q = 'Choices: '
else:
choice_q = 'Choice: '
def to_index_list(answer):
try:
ints = [int(i) - 1 for i in re.split(r'\s*,\s*|\s+', answer)]
for i in ints:
if i < 0 or i >= len(choices):
return None, '%d is not a valid option' % (i + 1)
return ints, None
except ValueError:
return None, 'You must use numbers'
while True:
selected = ask(out(choice_q, to_s=True), type=to_index_list,
default=default)
if selected:
if not multichoice:
if len(selected) > 1:
out('Select one value')
continue
return selected[0]
else:
return selected
|
afajl/sy
|
sy/prompt.py
|
Python
|
bsd-3-clause
| 7,060
|
from .. import *
from bfg9000.arguments.windows import *
class TestWindowsArgParse(TestCase):
def test_empty(self):
parser = ArgumentParser()
self.assertEqual(parser.parse_known([]), ({}, []))
self.assertEqual(parser.parse_known(['extra']), ({}, ['extra']))
self.assertEqual(parser.parse_known(['/extra']), ({}, ['/extra']))
def test_short_bool(self):
parser = ArgumentParser()
parser.add('/a')
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/a']), ({'a': True}, []))
self.assertEqual(parser.parse_known(['/a', '/a']), ({'a': True}, []))
parser = ArgumentParser()
parser.add('/a', '-a')
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/a']), ({'a': True}, []))
self.assertEqual(parser.parse_known(['-a']), ({'a': True}, []))
def test_long_bool(self):
parser = ArgumentParser()
parser.add('/foo')
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo']), ({'foo': True}, []))
self.assertEqual(parser.parse_known(['/foo', '/foo']),
({'foo': True}, []))
parser = ArgumentParser()
parser.add('/foo', '-foo')
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo']), ({'foo': True}, []))
self.assertEqual(parser.parse_known(['-foo']), ({'foo': True}, []))
def test_short_str(self):
parser = ArgumentParser()
parser.add('/a', type=str)
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/afoo', '/a', 'bar']),
({'a': 'bar'}, []))
parser = ArgumentParser()
parser.add('/a', '-a', type=str)
self.assertEqual(parser.parse_known([]), ({'a': None}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['-afoo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['-a', 'foo']), ({'a': 'foo'}, []))
self.assertEqual(parser.parse_known(['/afoo', '-a', 'bar']),
({'a': 'bar'}, []))
def test_long_str(self):
parser = ArgumentParser()
parser.add('/foo', type=str)
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': 'bar'}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '/foo:baz']),
({'foo': 'baz'}, []))
parser = ArgumentParser()
parser.add('/foo', '-foo', type=str)
self.assertEqual(parser.parse_known([]), ({'foo': None}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': 'bar'}, []))
self.assertEqual(parser.parse_known(['-foo:bar']),
({'foo': 'bar'}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '-foo:baz']),
({'foo': 'baz'}, []))
def test_short_list(self):
parser = ArgumentParser()
parser.add('/a', type=list)
self.assertEqual(parser.parse_known([]), ({'a': []}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']),
({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/afoo', '/a', 'bar']),
({'a': ['foo', 'bar']}, []))
parser = ArgumentParser()
parser.add('/a', '-a', type=list)
self.assertEqual(parser.parse_known([]), ({'a': []}, []))
self.assertEqual(parser.parse_known(['/afoo']), ({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['-afoo']), ({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/a', 'foo']),
({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['-a', 'foo']),
({'a': ['foo']}, []))
self.assertEqual(parser.parse_known(['/afoo', '-a', 'bar']),
({'a': ['foo', 'bar']}, []))
def test_long_list(self):
parser = ArgumentParser()
parser.add('/foo', type=list)
self.assertEqual(parser.parse_known([]), ({'foo': []}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': ['bar']}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '/foo:baz']),
({'foo': ['bar', 'baz']}, []))
parser = ArgumentParser()
parser.add('/foo', '-foo', type=list)
self.assertEqual(parser.parse_known([]), ({'foo': []}, []))
self.assertEqual(parser.parse_known(['/foo:bar']),
({'foo': ['bar']}, []))
self.assertEqual(parser.parse_known(['-foo:bar']),
({'foo': ['bar']}, []))
self.assertEqual(parser.parse_known(['/foo:bar', '-foo:baz']),
({'foo': ['bar', 'baz']}, []))
def test_short_dict(self):
parser = ArgumentParser()
warn = parser.add('/W', type=dict, dest='warn')
warn.add('1', '2', '3', '4', 'all', dest='level')
warn.add('X', type=bool, dest='error')
warn.add('X-', type=bool, dest='error', value=False)
warn.add('v', type=str, dest='version')
self.assertEqual(parser.parse_known([]), ({
'warn': {'level': None, 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/W2']), ({
'warn': {'level': '2', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/W2', '/W4']), ({
'warn': {'level': '4', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/W2', '/WX']), ({
'warn': {'level': '2', 'error': True, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Wv17']), ({
'warn': {'level': None, 'error': None, 'version': '17'}
}, []))
self.assertEqual(parser.parse_known(['/Wfoo']), ({
'warn': {'level': None, 'error': None, 'version': None}
}, ['/Wfoo']))
self.assertEqual(parser.parse_known(
['/WX', '/W2', '/WX-', '/Wall', '/Wv17', '/Wfoo']
), ({'warn': {'level': 'all', 'error': False, 'version': '17'}},
['/Wfoo']))
def test_long_dict(self):
parser = ArgumentParser()
warn = parser.add('/Warn', type=dict, dest='warn')
warn.add('1', '2', '3', '4', 'all', dest='level')
warn.add('X', type=bool, dest='error')
warn.add('X-', type=bool, dest='error', value=False)
warn.add('v', type=str, dest='version')
self.assertEqual(parser.parse_known([]), ({
'warn': {'level': None, 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:2']), ({
'warn': {'level': '2', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:2', '/Warn:4']), ({
'warn': {'level': '4', 'error': None, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:2', '/Warn:X']), ({
'warn': {'level': '2', 'error': True, 'version': None}
}, []))
self.assertEqual(parser.parse_known(['/Warn:v17']), ({
'warn': {'level': None, 'error': None, 'version': '17'}
}, []))
self.assertEqual(parser.parse_known(['/Warn:foo']), ({
'warn': {'level': None, 'error': None, 'version': None}
}, ['/Warn:foo']))
self.assertEqual(parser.parse_known(
['/Warn:X', '/Warn:2', '/Warn:X-', '/Warn:all', '/Warn:v17',
'/Warn:foo']
), ({'warn': {'level': 'all', 'error': False, 'version': '17'}},
['/Warn:foo']))
def test_alias(self):
parser = ArgumentParser()
nologo = parser.add('/nologo')
warn = parser.add('/W', type=dict, dest='warn')
warn.add('0', '1', '2', '3', '4', 'all', dest='level')
parser.add('/N', type='alias', base=nologo)
parser.add('/w', type='alias', base=warn, value='0')
self.assertEqual(parser.parse_known([]),
({'nologo': None, 'warn': {'level': None}}, []))
self.assertEqual(parser.parse_known(['/N']),
({'nologo': True, 'warn': {'level': None}}, []))
self.assertEqual(parser.parse_known(['/w']),
({'nologo': None, 'warn': {'level': '0'}}, []))
def test_unnamed(self):
parser = ArgumentParser()
parser.add('/a')
parser.add_unnamed('libs')
self.assertEqual(parser.parse_known([]),
({'a': None, 'libs': []}, []))
self.assertEqual(parser.parse_known(['foo']),
({'a': None, 'libs': ['foo']}, []))
self.assertEqual(parser.parse_known(['foo', '/a', 'bar']),
({'a': True, 'libs': ['foo', 'bar']}, []))
def test_case(self):
parser = ArgumentParser()
parser.add('/s')
parser.add('/long')
self.assertEqual(parser.parse_known(['/s', '/long']),
({'s': True, 'long': True}, []))
self.assertEqual(parser.parse_known(['/S', '/LONG']),
({'s': None, 'long': None}, ['/S', '/LONG']))
parser = ArgumentParser(case_sensitive=False)
parser.add('/s')
parser.add('/long')
self.assertEqual(parser.parse_known(['/s', '/long']),
({'s': True, 'long': True}, []))
self.assertEqual(parser.parse_known(['/S', '/LONG']),
({'s': None, 'long': True}, ['/S']))
def test_collision(self):
parser = ArgumentParser()
parser.add('/a', '/longa')
with self.assertRaises(ValueError):
parser.add('/a')
with self.assertRaises(ValueError):
parser.add('/abc')
with self.assertRaises(ValueError):
parser.add('/longa')
def test_invalid_prefix_char(self):
parser = ArgumentParser()
with self.assertRaises(ValueError):
parser.add('warn')
def test_unexpected_value(self):
parser = ArgumentParser()
parser.add('/a', '/longa')
with self.assertRaises(ValueError):
parser.parse_known(['/afoo'])
with self.assertRaises(ValueError):
parser.parse_known(['/longa:foo'])
def test_expected_value(self):
parser = ArgumentParser()
parser.add('/a', '/longa', type=str)
parser.add('/list', type=list)
warn = parser.add('/warn', type=dict, dest='warn')
warn.add('1', '2', '3', '4', 'all', dest='level')
with self.assertRaises(ValueError):
parser.parse_known(['/a'])
with self.assertRaises(ValueError):
parser.parse_known(['/longa'])
with self.assertRaises(ValueError):
parser.parse_known(['/list'])
with self.assertRaises(ValueError):
parser.parse_known(['/warn'])
def test_invalid_dict_child(self):
parser = ArgumentParser()
warn = parser.add('/W', type=dict, dest='warn')
with self.assertRaises(ValueError):
warn.add('version', type=str)
def test_unexpected_dict_value(self):
parser = ArgumentParser()
warn = parser.add('/W', type=dict, dest='warn', strict=True)
warn.add('1', '2', '3', '4', 'all', dest='level')
with self.assertRaises(ValueError):
parser.parse_known(['/WX'])
def test_invalid_alias_base(self):
parser = ArgumentParser()
warn = parser.add('/W')
with self.assertRaises(TypeError):
parser.add('/w', type='alias', base=warn, value='0')
|
jimporter/bfg9000
|
test/unit/arguments/test_windows.py
|
Python
|
bsd-3-clause
| 12,365
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
from coremltools._deps import _HAS_ONNX, MSG_ONNX_NOT_FOUND
if _HAS_ONNX:
import onnx
from onnx import helper, numpy_helper, TensorProto
from coremltools.converters.onnx._graph import Node, Graph
from ._test_utils import (
_onnx_create_single_node_model,
_onnx_create_model,
_conv_pool_output_size,
_random_array,
)
@unittest.skipUnless(_HAS_ONNX, MSG_ONNX_NOT_FOUND)
class NodeTest(unittest.TestCase):
def test_create_node(self): # type: () -> None
model = _onnx_create_single_node_model(
"Elu", [(1, 3, 224, 224)], [(1, 3, 224, 224)], alpha=0.5
)
graph = model.graph
node = graph.node[0]
node_ = Node.from_onnx(node)
self.assertTrue(len(node_.inputs) == 1)
self.assertTrue(len(node_.outputs) == 1)
self.assertTrue(len(node_.attrs) == 1)
self.assertTrue(node_.attrs["alpha"] == 0.5)
@unittest.skipUnless(_HAS_ONNX, MSG_ONNX_NOT_FOUND)
class GraphTest(unittest.TestCase):
def test_create_graph(self): # type: () -> None
kernel_shape = (3, 2)
strides = (2, 3)
pads = (4, 2, 4, 2)
dilations = (1, 2)
group = 1
weight = numpy_helper.from_array(_random_array((16, 3, 3, 2)), name="weight")
input_shape = (1, 3, 224, 224)
output_size = _conv_pool_output_size(
input_shape, dilations, kernel_shape, pads, strides
)
output_shape = (1, int(weight.dims[0]), output_size[0], output_size[1])
inputs = [("input0", input_shape)]
outputs = [("output0", output_shape, TensorProto.FLOAT)]
conv = helper.make_node(
"Conv",
inputs=[inputs[0][0], "weight"],
outputs=["conv_output"],
dilations=dilations,
group=group,
kernel_shape=kernel_shape,
pads=pads,
strides=strides,
)
relu = helper.make_node(
"Relu", inputs=[conv.output[0]], outputs=[outputs[0][0]]
)
model = _onnx_create_model([conv, relu], inputs, outputs, [weight])
graph_ = Graph.from_onnx(model.graph, onnx_ir_version=5)
self.assertTrue(len(graph_.inputs) == 1)
self.assertEqual(graph_.inputs[0][2], input_shape)
self.assertTrue(len(graph_.outputs) == 1)
self.assertEqual(graph_.outputs[0][2], output_shape)
self.assertTrue(len(graph_.nodes) == 2)
self.assertEqual(len(graph_.nodes[0].parents), 0)
self.assertEqual(len(graph_.nodes[1].parents), 1)
self.assertEqual(len(graph_.nodes[0].children), 1)
self.assertEqual(len(graph_.nodes[1].children), 0)
|
apple/coremltools
|
coremltools/converters/onnx/_tests/test_graph.py
|
Python
|
bsd-3-clause
| 2,852
|
from __future__ import unicode_literals
from io import TextIOBase
try:
TextIOBase = file
except NameError:
pass # Forward compatibility with Py3k
from bs4 import BeautifulSoup
import re
from hocrgeo.models import HOCRDocument
import logging
logger = logging.getLogger(__name__)
logger.addHandler(logging.NullHandler())
logger.setLevel(logging.INFO)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
logger.addHandler(ch)
class HOCRParser:
"""
Parse hOCR documents
Takes either a file-like object or a filename
"""
def __init__(self, fs=None):
'''
Initializes a HOCRParser
:param input: Optional file-like object to read or hOCR as a string.
'''
self._rawdata = None
self._bboxreg = re.compile(r'bbox (?P<x0>\d+) (?P<y0>\d+) (?P<x1>\d+) (?P<y1>\d+)')
self._imagereg = re.compile(r'image (\'|\")(.*)\1')
self._pagenoreg = re.compile(r'ppageno (\d+)')
self._doc = None
self._parseddata = None
if fs:
self._rawdata = self._get_data_string(fs)
def _get_data_string(self, fs):
if isinstance(fs, TextIOBase):
return fs.read()
else:
try:
if isinstance(fs, unicode):
return fs
else:
clean_fs = unicode(fs, encoding='utf-8')
if isinstance(clean_fs, unicode):
return clean_fs
except NameError:
if isinstance(fs, str):
return fs
raise TypeError('Input is not a readable string or file object')
def load(self, fsfile):
'''Load a file from a filepath or a file-like instance'''
fp = None
if isinstance(fsfile, str):
try:
fp = open(fsfile, 'rb')
except IOError as e:
raise e
elif isinstance(fs, TextIOBase):
fp = fsfile
else:
raise TypeError('argument must be a file object or a valid filepath')
self._rawdata = self._get_data_string(fp)
def loads(self, fs):
if isinstance(fs, str):
self._rawdata = self._get_data_string(fs)
else:
raise TypeError('argument must be a string or unicode instance')
@property
def document(self):
'''Parsed HOCR document'''
return self._doc
def parse(self):
'''Parse hOCR document into a python object.'''
def _extract_objects_from_element(root, el_name, el_class):
nodes = root.find_all(el_name, el_class)
objects = []
for n in nodes:
obj = _extract_features(n)
objects.append(obj)
return (nodes, objects)
def _extract_bbox(fs_str):
'''Regular expression matching on a fs_str that should contain hOCR bbox coordinates.'''
match = self._bboxreg.search(fs_str)
if match:
match_tup = match.groups()
match_list = []
for value in match_tup:
match_list.append(int(value))
return tuple(match_list)
return None
def _extract_features(element):
'''Extract basic hOCR features from a given element.'''
features = {}
features['id'] = element.get('id')
title_el = element.get('title', '')
image_match = self._imagereg.search(title_el)
if image_match:
features['image'] = image_match.group(2)
pageno_match = self._pagenoreg.search(title_el)
if pageno_match:
features['pageno'] = int(pageno_match.group(1))
features['bbox'] = _extract_bbox(title_el)
return features
if not self._rawdata:
raise Exception('No fsfile specified. You must specify an fs file when instantiating or as an argument to the parse method')
soup = BeautifulSoup(self._rawdata, "lxml")
self._parseddata = {}
# Extract ocr system metadata
ocr_system = soup.find('meta', attrs={'name': 'ocr-system'})
self._parseddata['system'] = ocr_system.get('content', None) if ocr_system else None
# Extract capabilities
ocr_capabilities = soup.find('meta', attrs={'name': 'ocr-capabilities'})
self._parseddata['capabilities'] = ocr_capabilities.get('content', ' ').split(' ') if ocr_capabilities else None
page_nodes, page_objects = _extract_objects_from_element(soup, 'div', 'ocr_page')
page_tup = list(zip(page_nodes, page_objects))
logger.info('Found {0} page(s)'.format(len(page_tup)))
for page_node, page_obj in page_tup:
carea_nodes, carea_objects = _extract_objects_from_element(page_node, 'div', 'ocr_carea')
careas_tup = list(zip(carea_nodes, carea_objects))
for c_node, c_obj in careas_tup:
para_nodes, para_objects = _extract_objects_from_element(c_node, 'p', 'ocr_par')
paras_tup = list(zip(para_nodes, para_objects))
for para_node, para_obj in paras_tup:
line_nodes, line_objects = _extract_objects_from_element(para_node, 'span', 'ocr_line')
lines_tup = list(zip(line_nodes, line_objects))
for l_node, l_obj in lines_tup:
word_nodes, word_objects = _extract_objects_from_element(l_node, 'span', 'ocrx_word')
words_tup = list(zip(word_nodes, word_objects))
for w_node, w_obj in words_tup:
word_str = w_node.get_text(strip=True)
if word_str:
# logger.info(word_str)
w_obj['text'] = w_node.get_text()
l_obj['words'] = word_objects
para_obj['lines'] = line_objects
c_obj['paragraphs'] = para_objects
page_obj['careas'] = carea_objects
self._parseddata['pages'] = page_objects
self._doc = HOCRDocument(self._parseddata)
|
pdfliberation/python-hocrgeo
|
hocrgeo/parsers/hocr.py
|
Python
|
bsd-3-clause
| 6,224
|
import socket
import fcntl
import struct
import zipfile
import tarfile
import readline
from subprocess import Popen, PIPE
from conpaas.core.https.server import FileUploadField
def file_get_contents(filepath):
f = open(filepath, 'r')
filecontent = f.read()
f.close()
return filecontent
def file_write_contents(filepath, filecontent):
f = open(filepath, 'w')
f.write(filecontent)
f.close()
def verify_port(port):
'''Raise Type Error if port is not an integer.
Raise ValueError if port is an invlid integer value.
'''
if type(port) != int: raise TypeError('port should be an integer')
if port < 1 or port > 65535: raise ValueError('port should be a valid port number')
def verify_ip_or_domain(ip):
'''Raise TypeError f ip is not a string.
Raise ValueError if ip is an invalid IP address in dot notation.
'''
if (type(ip) != str and type(ip) != unicode):
raise TypeError('IP is should be a string')
try:
socket.gethostbyname(ip)
except Exception as e:
raise ValueError('Invalid IP string "%s" -- %s' % (ip, e))
def verify_ip_port_list(l):
'''Check l is a list of [IP, PORT]. Raise appropriate Error if invalid types
or values were found
'''
if type(l) != list:
raise TypeError('Expected a list of [IP, PORT]')
for pair in l:
### FIXME HECTOR ...
#if len(pair) != 2:
if len(pair) < 2:
raise TypeError('List should contain IP,PORT values')
if 'ip' not in pair or 'port' not in pair:
raise TypeError('List should contain IP,PORT values')
verify_ip_or_domain(pair['ip'])
verify_port(pair['port'])
def archive_get_type(name):
if tarfile.is_tarfile(name):
return 'tar'
elif zipfile.is_zipfile(name):
return 'zip'
else: return None
def archive_open(name):
if tarfile.is_tarfile(name):
return tarfile.open(name)
elif zipfile.is_zipfile(name):
return zipfile.ZipFile(name)
else: return None
def archive_get_members(arch):
if isinstance(arch, zipfile.ZipFile):
members = arch.namelist()
elif isinstance(arch, tarfile.TarFile):
members = [ i.name for i in arch.getmembers() ]
return members
def archive_extract(arch, path):
if isinstance(arch, zipfile.ZipFile):
arch.extractall(path)
elif isinstance(arch, tarfile.TarFile):
arch.extractall(path=path)
def archive_close(arch):
if isinstance(arch, zipfile.ZipFile)\
or isinstance(arch, tarfile.TarFile):
arch.close()
def get_ip_address(ifname):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
return socket.inet_ntoa(fcntl.ioctl(
s.fileno(),
0x8915, # SIOCGIFADDR
struct.pack('256s', ifname[:15])
)[20:24])
def run_cmd(cmd, directory='/'):
pipe = Popen(cmd, shell=True, cwd=directory, stdout=PIPE, stderr=PIPE)
out, error = pipe.communicate()
_return_code = pipe.wait()
return out, error
def run_cmd_code(cmd, directory='/'):
"""Same as run_cmd but it returns also the return code.
Parameters
----------
cmd : string
command to run in a shell
directory : string, default to '/'
directory where to run the command
Returns
-------
std_out, std_err, return_code
a triplet with standard output, standard error output and return code.
std_out : string
std_err : string
return_code : int
"""
pipe = Popen(cmd, shell=True, cwd=directory, stdout=PIPE, stderr=PIPE)
out, error = pipe.communicate()
return_code = pipe.wait()
return out, error, return_code
def rlinput(prompt, prefill=''):
readline.set_startup_hook(lambda: readline.insert_text(prefill))
try:
return raw_input(prompt)
finally:
readline.set_startup_hook()
def list_lines(lines):
"""Returns the list of trimmed lines.
@param lines Multi-line string
"""
return list(filter(None, (x.strip() for x in lines.splitlines())))
def is_constraint(constraint, filter_res, errmsg):
def filter_constraint(arg):
if constraint(arg):
return filter_res(arg)
else:
raise Exception(errmsg(arg))
return filter_constraint
def represents_int(s):
try:
int(s)
return True
except ValueError:
return False
def is_int(argument):
return is_constraint(lambda arg: represents_int(arg),
lambda arg: int(arg),
lambda arg: "'%s' has type '%s', should be integer" % (arg, type(arg).__name__))(argument)
def represents_bool(s):
return str(s).lower() in ("yes", "y", "true", "t", "1",
"no", "n", "false", "f", "0")
def is_bool(argument):
return is_constraint(lambda arg: represents_bool(arg),
lambda arg: str(arg).lower() in ("yes", "y", "true", "t", "1"),
lambda arg: "'%s' has type '%s', should be bool" % (arg, type(arg).__name__))(argument)
def is_more_than(minval):
return is_constraint(lambda arg: arg > minval,
lambda arg: arg,
lambda arg: "%s is not more than %s" % (arg, minval))
def is_more_or_eq_than(minval):
return is_constraint(lambda arg: arg >= minval,
lambda arg: arg,
lambda arg: "%s is not more or equal than %s" % (arg, minval))
def is_between(minval, maxval):
return is_constraint(lambda arg: arg >= minval and arg <= maxval,
lambda arg: arg,
lambda arg: "%s is not between %s and %s" % (arg, minval, maxval))
def is_pos_int(argument):
argint = is_int(argument)
return is_more_than(0)(argint)
def is_pos_nul_int(argument):
argint = is_int(argument)
return is_more_or_eq_than(0)(argint)
def is_in_list(exp_list):
return is_constraint(lambda arg: arg in exp_list,
lambda arg: arg,
lambda arg: "'%s' must be one of %s" % (arg, exp_list))
def is_not_in_list(exp_list):
return is_constraint(lambda arg: arg not in exp_list,
lambda arg: arg,
lambda arg: "'%s' must not be one of %s" % (arg, exp_list))
def is_string(argument):
return is_constraint(lambda arg: isinstance(arg, str) or isinstance(arg, unicode),
lambda arg: arg,
lambda arg: "'%s' has type '%s', should be string" % (arg, type(arg).__name__))(argument)
def is_non_empty_list(argument):
return is_constraint(lambda arg: isinstance(arg, list) and len(arg) > 0,
lambda arg: arg,
lambda arg: "'%s' has type '%s', should be non-empty list" % (arg, type(arg).__name__))(argument)
def is_list(argument):
return is_constraint(lambda arg: isinstance(arg, list),
lambda arg: arg,
lambda arg: "'%s' has type '%s', should be list" % (arg, type(arg).__name__))(argument)
def is_dict(argument):
return is_constraint(lambda arg: isinstance(arg, dict),
lambda arg: arg,
lambda arg: "'%s' has type '%s', should be dict" % (arg, type(arg).__name__))(argument)
def is_uploaded_file(argument):
return is_constraint(lambda arg: isinstance(arg, FileUploadField),
lambda arg: arg,
lambda arg: "'%s' has type '%s', should be uploaded file" % (arg, type(arg).__name__))(argument)
def is_dict2(mandatory_keys, optional_keys=None):
def _dict2(argument):
argdict = is_dict(argument)
keys = argument.keys()
for mand_key in mandatory_keys:
try:
keys.remove(mand_key)
except:
raise Exception("Was expecting key '%s' in dict '%s'" \
% (mand_key, argdict))
if optional_keys is None:
_optional_keys = []
else:
_optional_keys = optional_keys
for opt_key in _optional_keys:
try:
keys.remove(opt_key)
except:
continue
if len(keys) > 0:
raise Exception("Unexpected key in dict '%s': '%s'" % (argdict, keys))
return argdict
return _dict2
def is_list_dict(argument):
mylist = is_list(argument)
for arg in mylist:
_dict = is_dict(arg)
return mylist
def is_list_dict2(mandatory_keys, optional_keys=None):
def _list_dict2(argument):
mylist = is_list_dict(argument)
for arg in mylist:
_dict = is_dict2(mandatory_keys, optional_keys)(arg)
return mylist
return _list_dict2
def check_arguments(expected_params, args):
""" Check, convert POST arguments provided as dict.
Parameter
---------
expected_params: list
list of expected parameters where a parameter is a tuple
(name, constraint) for mandatory argument
(name, constraint, default_value) for optional parameter
where constraint is 'string', 'int', 'posint', 'posnulint', 'list'
args: dict
args[name] = value
Returns
-------
A list of all correct and converted parameters in the same order
as the expected_params argument.
Or raise an exception if one of the expected arguments is not there,
or does not respect the corresponding constraint, or was not expected.
"""
parsed_args = []
for param in expected_params:
if len(param) >= 2:
name = param[0]
constraint = param[1]
if name in args:
value = args.pop(name)
try:
parsed_value = constraint(value)
parsed_args.append(parsed_value)
except Exception as ex:
raise Exception("Parameter '%s': %s." % (name, ex))
else:
if len(param) >= 3:
default_value = param[2]
# TODO: decide whether the default value should satisfy the constraint
parsed_args.append(default_value)
else:
raise Exception("Missing the mandatory parameter '%s'." % name)
else:
raise Exception("Unexpected number of arguments describing a parameter: %s" % param)
if len(args) > 0:
raise Exception("Unexpected parameters: %s." % args)
if len(parsed_args) == 1:
return parsed_args[0]
else:
return parsed_args
|
ConPaaS-team/conpaas
|
conpaas-services/src/conpaas/core/misc.py
|
Python
|
bsd-3-clause
| 10,674
|
"""
This module houses the ctypes function prototypes for OGR DataSource
related data structures. OGR_Dr_*, OGR_DS_*, OGR_L_*, OGR_F_*,
OGR_Fld_* routines are relevant here.
"""
from ctypes import c_char_p, c_int, c_long, c_void_p, POINTER
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.generation import \
const_string_output, double_output, geom_output, int_output, \
srs_output, void_output, voidptr_output
c_int_p = POINTER(c_int) # shortcut type
### Driver Routines ###
register_all = void_output(lgdal.OGRRegisterAll, [], errcheck=False)
cleanup_all = void_output(lgdal.OGRCleanupAll, [], errcheck=False)
get_driver = voidptr_output(lgdal.OGRGetDriver, [c_int])
get_driver_by_name = voidptr_output(lgdal.OGRGetDriverByName, [c_char_p])
get_driver_count = int_output(lgdal.OGRGetDriverCount, [])
get_driver_name = const_string_output(lgdal.OGR_Dr_GetName, [c_void_p])
### DataSource ###
open_ds = voidptr_output(lgdal.OGROpen, [c_char_p, c_int, POINTER(c_void_p)])
destroy_ds = void_output(lgdal.OGR_DS_Destroy, [c_void_p], errcheck=False)
release_ds = void_output(lgdal.OGRReleaseDataSource, [c_void_p])
get_ds_name = const_string_output(lgdal.OGR_DS_GetName, [c_void_p])
get_layer = voidptr_output(lgdal.OGR_DS_GetLayer, [c_void_p, c_int])
get_layer_by_name = voidptr_output(lgdal.OGR_DS_GetLayerByName, [c_void_p, c_char_p])
get_layer_count = int_output(lgdal.OGR_DS_GetLayerCount, [c_void_p])
### Layer Routines ###
get_extent = void_output(lgdal.OGR_L_GetExtent, [c_void_p, POINTER(OGREnvelope), c_int])
get_feature = voidptr_output(lgdal.OGR_L_GetFeature, [c_void_p, c_long])
get_feature_count = int_output(lgdal.OGR_L_GetFeatureCount, [c_void_p, c_int])
get_layer_defn = voidptr_output(lgdal.OGR_L_GetLayerDefn, [c_void_p])
get_layer_srs = srs_output(lgdal.OGR_L_GetSpatialRef, [c_void_p])
get_next_feature = voidptr_output(lgdal.OGR_L_GetNextFeature, [c_void_p])
reset_reading = void_output(lgdal.OGR_L_ResetReading, [c_void_p], errcheck=False)
### Feature Definition Routines ###
get_fd_geom_type = int_output(lgdal.OGR_FD_GetGeomType, [c_void_p])
get_fd_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_feat_name = const_string_output(lgdal.OGR_FD_GetName, [c_void_p])
get_field_count = int_output(lgdal.OGR_FD_GetFieldCount, [c_void_p])
get_field_defn = voidptr_output(lgdal.OGR_FD_GetFieldDefn, [c_void_p, c_int])
### Feature Routines ###
clone_feature = voidptr_output(lgdal.OGR_F_Clone, [c_void_p])
destroy_feature = void_output(lgdal.OGR_F_Destroy, [c_void_p], errcheck=False)
feature_equal = int_output(lgdal.OGR_F_Equal, [c_void_p, c_void_p])
get_feat_geom_ref = geom_output(lgdal.OGR_F_GetGeometryRef, [c_void_p])
get_feat_field_count = int_output(lgdal.OGR_F_GetFieldCount, [c_void_p])
get_feat_field_defn = voidptr_output(lgdal.OGR_F_GetFieldDefnRef, [c_void_p, c_int])
get_fid = int_output(lgdal.OGR_F_GetFID, [c_void_p])
get_field_as_datetime = int_output(lgdal.OGR_F_GetFieldAsDateTime, [c_void_p, c_int, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p, c_int_p])
get_field_as_double = double_output(lgdal.OGR_F_GetFieldAsDouble, [c_void_p, c_int])
get_field_as_integer = int_output(lgdal.OGR_F_GetFieldAsInteger, [c_void_p, c_int])
get_field_as_string = const_string_output(lgdal.OGR_F_GetFieldAsString, [c_void_p, c_int])
get_field_index = int_output(lgdal.OGR_F_GetFieldIndex, [c_void_p, c_char_p])
### Field Routines ###
get_field_name = const_string_output(lgdal.OGR_Fld_GetNameRef, [c_void_p])
get_field_precision = int_output(lgdal.OGR_Fld_GetPrecision, [c_void_p])
get_field_type = int_output(lgdal.OGR_Fld_GetType, [c_void_p])
get_field_type_name = const_string_output(lgdal.OGR_GetFieldTypeName, [c_int])
get_field_width = int_output(lgdal.OGR_Fld_GetWidth, [c_void_p])
|
paulsmith/geodjango
|
django/contrib/gis/gdal/prototypes/ds.py
|
Python
|
bsd-3-clause
| 3,839
|
from .. import mq
class MQ(mq.MQ):
"""Redis Message Broker
"""
def __init__(self, backend, store):
super().__init__(backend, store)
self._client = store.client()
async def get_message(self, *queues):
'''Asynchronously retrieve a :class:`Task` from queues
:return: a :class:`.Task` or ``None``.
'''
assert queues
args = [self.prefixed(q) for q in queues]
args.append(max(1, int(self.cfg.task_pool_timeout)))
qt = await self._client.execute('brpop', *args)
if qt:
_, message = qt
return self.decode(message)
async def flush_queues(self, *queues):
'''Clear a list of task queues
'''
pipe = self._client.pipeline()
for queue in queues:
pipe.execute('del', self.prefixed(queue))
await pipe.commit()
async def queue_message(self, queue, message):
'''Asynchronously queue a task
'''
await self._client.lpush(self.prefixed(queue), message)
async def size(self, *queues):
pipe = self._client.pipeline()
for queue in queues:
pipe.execute('llen', self.prefixed(queue))
sizes = await pipe.commit()
return sizes
async def incr(self, name):
concurrent = await self._client.incr(self.prefixed(name))
return concurrent
async def decr(self, name):
concurrent = await self._client.decr(self.prefixed(name))
return concurrent
|
quantmind/pulsar-queue
|
pq/backends/redis.py
|
Python
|
bsd-3-clause
| 1,508
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'UsbRoles.ui'
#
# Created: Wed Jun 10 09:17:48 2015
# by: PyQt4 UI code generator 4.11.3
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
def _fromUtf8(s):
return s
try:
_encoding = QtGui.QApplication.UnicodeUTF8
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig, _encoding)
except AttributeError:
def _translate(context, text, disambig):
return QtGui.QApplication.translate(context, text, disambig)
class Ui_UsbRoles(object):
def setupUi(self, UsbRoles):
UsbRoles.setObjectName(_fromUtf8("UsbRoles"))
UsbRoles.resize(210, 92)
icon = QtGui.QIcon()
icon.addPixmap(QtGui.QPixmap(_fromUtf8(":/images/icon.png")), QtGui.QIcon.Normal, QtGui.QIcon.Off)
UsbRoles.setWindowIcon(icon)
UsbRoles.setStyleSheet(_fromUtf8("QDialog {\n"
" background-color: qlineargradient(spread:pad, x1:1, y1:0.682, x2:0.966825, y2:0, stop:0 rgba(224, 224, 224, 255), stop:1 rgba(171, 171, 171, 255));\n"
"}"))
self.buttonBox = QtGui.QDialogButtonBox(UsbRoles)
self.buttonBox.setGeometry(QtCore.QRect(20, 60, 181, 32))
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtGui.QDialogButtonBox.Cancel|QtGui.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.password = QtGui.QLineEdit(UsbRoles)
self.password.setGeometry(QtCore.QRect(80, 10, 121, 21))
self.password.setEchoMode(QtGui.QLineEdit.Password)
self.password.setObjectName(_fromUtf8("password"))
self.label = QtGui.QLabel(UsbRoles)
self.label.setGeometry(QtCore.QRect(10, 0, 81, 41))
self.label.setObjectName(_fromUtf8("label"))
self.retranslateUi(UsbRoles)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("accepted()")), UsbRoles.accept)
QtCore.QObject.connect(self.buttonBox, QtCore.SIGNAL(_fromUtf8("rejected()")), UsbRoles.reject)
QtCore.QMetaObject.connectSlotsByName(UsbRoles)
def retranslateUi(self, UsbRoles):
UsbRoles.setWindowTitle(_translate("UsbRoles", "USB Roles", None))
self.password.setPlaceholderText(_translate("UsbRoles", "Enter password", None))
self.label.setText(_translate("UsbRoles", "Password:", None))
import resource_rc
|
robotican/ric
|
ric_board/scripts/RiCConfigurator/GUI/Schemes/UsbRoles.py
|
Python
|
bsd-3-clause
| 2,558
|
# -*- coding: utf-8 -*-
"""
robo.tests.test_talk_handler
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests for robo.handlers.talk.
:copyright: (c) 2015 Shinya Ohyanagi, All rights reserved.
:license: BSD, see LICENSE for more details.
"""
import os
import logging
import requests
import simplejson as json
from mock import patch
from unittest import TestCase
from robo.robot import Robot
from robo.handlers.talk import Client, Talk
def dummy_response(m):
response = requests.Response()
response.status_code = 200
data = {
'context': 'D0yHgwljc_mhTPIGs--toQ',
'utt': '\u30ac\u30c3', 'da': '0', 'yomi': '\u30ac\u30c3',
'mode': 'dialog'
}
response._content = json.dumps(data)
m.return_value = response
class NullAdapter(object):
def __init__(self, signal):
self.signal = signal
self.responses = []
def say(self, message, **kwargs):
self.responses.append(message)
return message
class TestClient(TestCase):
@classmethod
def setUpClass(cls):
os.environ['DOCOMO_API_KEY'] = 'foo'
cls.client = Client()
@patch('doco.requests.post')
def test_generate_url(self, m):
""" Client().talk() should response docomo dialogue response. """
dummy_response(m)
ret = self.client.talk('nullpo')
self.assertEqual(ret, '\u30ac\u30c3')
class TestTalkHandler(TestCase):
@classmethod
def setUpClass(cls):
logger = logging.getLogger('robo')
logger.level = logging.ERROR
cls.robot = Robot('test', logger)
cls.robot.register_default_handlers()
os.environ['DOCOMO_API_KEY'] = 'foo'
talk = Talk()
talk.signal = cls.robot.handler_signal
method = cls.robot.parse_handler_methods(talk)
cls.robot.handlers.extend(method)
adapter = NullAdapter(cls.robot.handler_signal)
cls.robot.adapters['null'] = adapter
@patch('doco.requests.post')
def test_should_talk(self, m):
""" Talk().get() should response docomo dialogue response. """
dummy_response(m)
self.robot.handler_signal.send('test aaaa')
self.assertEqual(self.robot.adapters['null'].responses[0],
'\u30ac\u30c3')
self.robot.adapters['null'].responses = []
|
heavenshell/py-robo-talk
|
tests/test_talk_handler.py
|
Python
|
bsd-3-clause
| 2,318
|
'''artlaasya signals'''
from django.dispatch import receiver
from django.db.models.signals import pre_save, post_save, pre_delete
try:
from django.utils.text import slugify
except ImportError:
try:
from django.template.defaultfilters import slugify
except ImportError:
print("Unable to import `slugify`.")
except:
print("Unable to import `slugify`.")
from decimal import Decimal
from artlaasya.utils import is_django_version_greater_than, delete_uploaded_file
from artlaasya.models import (Artist,
ArtistRatchet,
Genre,
Artwork,
ArtworkRatchet,
Event,
EventRatchet)
DJANGO_SAVE_UPDATEABLE = is_django_version_greater_than(1, 4)
@receiver(pre_save, sender=Artist)
def slugify__artist(sender, instance, slugify=slugify, **kwargs):
"""
Manages the uniquely numbered suffix for `name`.
Artist [`first_name` + `last_name` + suffix] --> `slug`.
"""
name_fields_changed = ('first_name' in instance.changed_fields or
'last_name' in instance.changed_fields)
if (name_fields_changed or not instance.slug):
_name = instance.__str__().lower()
_ratchet, _created = ArtistRatchet.ratchets.get_or_create(name=_name)
_incremented_suffix = _ratchet.suffix + 1
_ratchet.suffix = _incremented_suffix
_ratchet.save()
_suffix = str.zfill(str(_incremented_suffix), 3)
instance.slug = slugify('-'.join([_name, _suffix]))
@receiver(post_save, sender=Artist)
def deactivate_artworks_of_inactive_artist(sender, instance, created, **kwargs):
"""
Ensures that all artworks of an artist are deactivated when artist is
deactivated.
"""
is_active_field_changed = ('is_active' in instance.changed_fields)
if (is_active_field_changed and not instance.is_active):
for _artwork in instance.artworks_authored.all():
if _artwork.is_active:
_artwork.is_active = False
if DJANGO_SAVE_UPDATEABLE:
_artwork.save(update_fields=['is_active'])
else:
_artwork.save()
@receiver(pre_save, sender=Artist, dispatch_uid="d__a_b")
def delete__artist_biography(sender, instance, **kwargs):
"""
If file already exists, but new file uploaded, delete existing file.
"""
biography_field_changed = ('biography' in instance.changed_fields)
if biography_field_changed:
previous_file = instance.get_field_diff('biography')[0]
if previous_file:
delete_uploaded_file(previous_file.path)
@receiver(pre_delete, sender=Artist, dispatch_uid="d__a")
def delete__artist(sender, instance, **kwargs):
"""
Deletes `biography` uploaded file when Artist is deleted.
"""
if instance.biography:
delete_uploaded_file(instance.biography.path)
@receiver(pre_save, sender=Genre)
def slugify__genre(sender, instance, slugify=slugify, **kwargs):
"""
Manages the slugifying of `name`.
Genre [`name`] --> `slug`.
"""
name_fields_changed = ('name' in instance.changed_fields)
if (name_fields_changed or not instance.slug):
_name = instance.__str__().lower()
instance.slug = slugify(_name)
@receiver(pre_save, sender=Artwork)
def name_slugify__artwork(sender, instance, slugify=slugify, **kwargs):
"""
Manages the uniquely numbered suffix for `title`.
Artwork [`title` + suffix] --> `name' --> `slug`.
UploadedImage provides `name` and `slug`.
"""
title_field_changed = ('title' in instance.changed_fields)
if (title_field_changed or not instance.name):
_title=instance.title.lower()
_ratchet, _created = ArtworkRatchet.ratchets.get_or_create(title=_title)
_incremented_suffix = _ratchet.suffix + 1
_ratchet.suffix = _incremented_suffix
_ratchet.save()
_suffix = str.zfill(str(_incremented_suffix), 3)
instance.name = '-'.join([instance.title, _suffix])
instance.slug = slugify(instance.name)
@receiver(pre_save, sender=Artwork)
def calculate_artwork_dimensions(sender, instance, **kwargs):
"""
Calculates artwork measurements in other measurement system.
"""
dimension_fields_changed = ('image_height' in instance.changed_fields or
'image_width' in instance.changed_fields or
'measurement_units' in instance.changed_fields)
if (dimension_fields_changed or
not instance.image_height and not instance.image_width):
if instance.measurement_units == 'I':
instance.height_imperial = instance.image_height
instance.width_imperial = instance.image_width
instance.imperial_units = 'I'
instance.height_metric = round((Decimal(2.54) * instance.image_height), 2)
instance.width_metric = round((Decimal(2.54) * instance.image_width), 2)
instance.metric_units = 'C'
elif instance.measurement_units == 'C':
instance.height_metric = instance.image_height
instance.width_metric = instance.image_width
instance.metric_units = 'C'
instance.height_imperial = round((Decimal(0.394) * instance.image_height), 2)
instance.width_imperial = round((Decimal(0.394) * instance.image_width), 2)
instance.imperial_units = 'I'
@receiver(post_save, sender=Artwork)
def ensure_artwork_uniquely_representative(sender, instance, created, **kwargs):
"""
Ensures that only one artwork is representative for any one artist.
"""
if instance.is_representative:
_artworks = Artwork.artworks.filter(artist__slug=instance.artist.slug
).exclude(slug=instance.slug)
for _artwork in _artworks:
if _artwork.is_representative:
_artwork.is_representative = False
if DJANGO_SAVE_UPDATEABLE:
_artwork.save(update_fields=['is_representative'])
else:
_artwork.save()
@receiver(pre_save, sender=Event)
def slugify__event(sender, instance, slugify=slugify, **kwargs):
"""
Manages the uniquely numbered suffix for `title`.
Event [`title` + suffix] --> `slug`.
"""
title_field_changed = ('title' in instance.changed_fields)
if (title_field_changed or not instance.title):
_title=instance.title.lower()
_ratchet, _created = EventRatchet.ratchets.get_or_create(title=_title)
_incremented_suffix = _ratchet.suffix + 1
_ratchet.suffix = _incremented_suffix
_ratchet.save()
_suffix = str.zfill(str(_incremented_suffix), 3)
instance.slug = slugify('-'.join([_title, _suffix]))
@receiver(pre_save, sender=Event, dispatch_uid="d__e_i")
def delete__event_image(sender, instance, **kwargs):
"""
If image already exists, but new image uploaded, deletes existing image file.
"""
image_field_changed = ('image' in instance.changed_fields)
if image_field_changed:
previous_image = instance.get_field_diff('image')[0]
if previous_image:
delete_uploaded_file(previous_image.path)
@receiver(pre_delete, sender=Event, dispatch_uid="d__e")
def delete__event(sender, instance, **kwargs):
"""
Deletes `image` uploaded file when Event is deleted.
"""
delete_uploaded_file(instance.image.path)
#EOF - artlaasya signals
|
davidjcox/artlaasya
|
artlaasya/signals.py
|
Python
|
bsd-3-clause
| 7,852
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0002_user_activity'),
]
operations = [
migrations.AddField(
model_name='user',
name='age',
field=models.PositiveIntegerField(null=True, blank=True),
),
migrations.AddField(
model_name='user',
name='country',
field=models.CharField(max_length=30, verbose_name='Country', blank=True),
),
]
|
hotsyk/uapython2
|
uapython2/users/migrations/0003_auto_20150809_0918.py
|
Python
|
bsd-3-clause
| 593
|
# -*- coding: utf-8 -*-
"""
===========================
Cross-hemisphere comparison
===========================
This example illustrates how to visualize the difference between activity in
the left and the right hemisphere. The data from the right hemisphere is
mapped to the left hemisphere, and then the difference is plotted. For more
information see :func:`mne.compute_source_morph`.
"""
# Author: Christian Brodbeck <christianbrodbeck@nyu.edu>
#
# License: BSD-3-Clause
# %%
import mne
data_dir = mne.datasets.sample.data_path()
subjects_dir = data_dir / 'subjects'
stc_path = data_dir / 'MEG' / 'sample' / 'sample_audvis-meg-eeg'
stc = mne.read_source_estimate(stc_path, 'sample')
# First, morph the data to fsaverage_sym, for which we have left_right
# registrations:
stc = mne.compute_source_morph(stc, 'sample', 'fsaverage_sym', smooth=5,
warn=False,
subjects_dir=subjects_dir).apply(stc)
# Compute a morph-matrix mapping the right to the left hemisphere,
# and vice-versa.
morph = mne.compute_source_morph(stc, 'fsaverage_sym', 'fsaverage_sym',
spacing=stc.vertices, warn=False,
subjects_dir=subjects_dir, xhemi=True,
verbose='error') # creating morph map
stc_xhemi = morph.apply(stc)
# Now we can subtract them and plot the result:
diff = stc - stc_xhemi
diff.plot(hemi='lh', subjects_dir=subjects_dir, initial_time=0.07,
size=(800, 600))
|
larsoner/mne-python
|
examples/visualization/xhemi.py
|
Python
|
bsd-3-clause
| 1,530
|
"""
Collection of higher level functions to perform operational tasks.
Some day, this module could have a companion module containing the CLI logic
for these functions instead of scripts in ``<source>/bin/scripts``.
"""
import collections
import logging
import numpy
from smqtk.utils import (
bin_utils,
bit_utils,
parallel,
)
__author__ = "paul.tunison@kitware.com"
def compute_many_descriptors(file_elements, descr_generator, descr_factory,
descr_index, batch_size=None, overwrite=False,
procs=None, **kwds):
"""
Compute descriptors for each data file path, yielding
(filepath, DescriptorElement) tuple pairs in the order that they were
input.
*Note:* **This function currently only operated over images due to the
specific data validity check/filter performed.*
:param file_elements: Iterable of DataFileElement instances of files to
work on.
:type file_elements: collections.Iterable[smqtk.representation.data_element
.file_element.DataFileElement]
:param descr_generator: DescriptorGenerator implementation instance
to use to generate descriptor vectors.
:type descr_generator: smqtk.algorithms.DescriptorGenerator
:param descr_factory: DescriptorElement factory to use when producing
descriptor vectors.
:type descr_factory: smqtk.representation.DescriptorElementFactory
:param descr_index: DescriptorIndex instance to add generated descriptors
to. When given a non-zero batch size, we add descriptors to the given
index in batches of that size. When a batch size is not given, we add
all generated descriptors to the index after they have been generated.
:type descr_index: smqtk.representation.DescriptorIndex
:param batch_size: Optional number of elements to asynchronously compute
at a time. This is useful when it is desired for this function to yield
results before all descriptors have been computed, yet still take
advantage of any batch asynchronous computation optimizations a
particular DescriptorGenerator implementation may have. If this is 0 or
None (false-evaluating), this function blocks until all descriptors have
been generated.
:type batch_size: None | int | long
:param overwrite: If descriptors from a particular generator already exist
for particular data, re-compute the descriptor for that data and set
into the generated DescriptorElement.
:type overwrite: bool
:param procs: Tell the DescriptorGenerator to use a specific number of
threads/cores.
:type procs: None | int
:param kwds: Remaining keyword-arguments that are to be passed into the
``compute_descriptor_async`` function on the descriptor generator.
:type kwds: dict
:return: Generator that yields (filepath, DescriptorElement) for each file
path given, in the order file paths were provided.
:rtype: __generator[(str, smqtk.representation.DescriptorElement)]
"""
log = logging.getLogger(__name__)
# Capture of generated elements in order of generation
#: :type: deque[smqtk.representation.data_element.file_element.DataFileElement]
dfe_deque = collections.deque()
# Counts for logging
total = 0
unique = 0
def iter_capture_elements():
for dfe in file_elements:
dfe_deque.append(dfe)
yield dfe
if batch_size:
log.debug("Computing in batches of size %d", batch_size)
batch_i = 0
for dfe in iter_capture_elements():
# elements captured ``dfe_deque`` in iter_capture_elements
if len(dfe_deque) == batch_size:
batch_i += 1
log.debug("Computing batch %d", batch_i)
total += len(dfe_deque)
m = descr_generator.compute_descriptor_async(
dfe_deque, descr_factory, overwrite, procs, **kwds
)
unique += len(m)
log.debug("-- Processed %d so far (%d total data elements "
"input)", unique, total)
log.debug("-- adding to index")
descr_index.add_many_descriptors(m.itervalues())
log.debug("-- yielding generated descriptor elements")
for e in dfe_deque:
# noinspection PyProtectedMember
yield e._filepath, m[e]
dfe_deque.clear()
if len(dfe_deque):
log.debug("Computing final batch of size %d",
len(dfe_deque))
total += len(dfe_deque)
m = descr_generator.compute_descriptor_async(
dfe_deque, descr_factory, overwrite, procs, **kwds
)
unique += len(m)
log.debug("-- Processed %d so far (%d total data elements "
"input)", unique, total)
log.debug("-- adding to index")
descr_index.add_many_descriptors(m.itervalues())
log.debug("-- yielding generated descriptor elements")
for dfe in dfe_deque:
# noinspection PyProtectedMember
yield dfe._filepath, m[dfe]
else:
log.debug("Using single async call")
# Just do everything in one call
log.debug("Computing descriptors")
m = descr_generator.compute_descriptor_async(
iter_capture_elements(), descr_factory,
overwrite, procs, **kwds
)
log.debug("Adding to index")
descr_index.add_many_descriptors(m.itervalues())
log.debug("yielding generated elements")
for dfe in dfe_deque:
# noinspection PyProtectedMember
yield dfe._filepath, m[dfe]
def compute_hash_codes(uuids, index, functor, hash2uuids=None,
report_interval=1.0, use_mp=False):
"""
Given an iterable of DescriptorElement UUIDs, asynchronously access them
from the given ``index``, asynchronously compute hash codes via ``functor``
and convert to an integer, yielding (DescriptorElement, hash-int) pairs.
The dictionary input and returned is of the same format used by the
``LSHNearestNeighborIndex`` implementation (mapping pointed to by the
``hash2uuid_cache_filepath`` attribute).
:param uuids: Sequence of UUIDs to process
:type uuids: collections.Iterable[collections.Hashable]
:param index: Descriptor index to pull from.
:type index: smqtk.representation.descriptor_index.DescriptorIndex
:param functor: LSH hash code functor instance
:type functor: smqtk.algorithms.LshFunctor
:param hash2uuids: Hash code to UUID set to update, which is also returned
from this function. If not provided, we will start a new mapping, which
is returned instead.
:type hash2uuids: dict[int|long, set[collections.Hashable]]
:param report_interval: Frequency in seconds at which we report speed and
completion progress via logging. Reporting is disabled when logging
is not in debug and this value is greater than 0.
:type report_interval: float
:param use_mp: If multiprocessing should be used for parallel
computation vs. threading. Reminder: This will copy currently loaded
objects onto worker processes (e.g. the given index), which could lead
to dangerously high RAM consumption.
:type use_mp: bool
:return: The ``update_map`` provided or, if None was provided, a new
mapping.
:rtype: dict[int|long, set[collections.Hashable]]
"""
if hash2uuids is None:
hash2uuids = {}
# TODO: parallel map fetch elements from index?
# -> separately from compute
def get_hash(u):
v = index.get_descriptor(u).vector()
return u, bit_utils.bit_vector_to_int_large(functor.get_hash(v))
# Setup log and reporting function
log = logging.getLogger(__name__)
report_state = [0] * 7
# noinspection PyGlobalUndefined
if log.getEffectiveLevel() > logging.DEBUG or report_interval <= 0:
def report_progress(*_):
return
log.debug("Not logging progress")
else:
log.debug("Logging progress at %f second intervals", report_interval)
report_progress = bin_utils.report_progress
log.debug("Starting computation")
for uuid, hash_int in parallel.parallel_map(get_hash, uuids,
ordered=False,
use_multiprocessing=use_mp):
if hash_int not in hash2uuids:
hash2uuids[hash_int] = set()
hash2uuids[hash_int].add(uuid)
# Progress reporting
report_progress(log.debug, report_state, report_interval)
# Final report
report_state[1] -= 1
report_progress(log.debug, report_state, 0.0)
return hash2uuids
def mb_kmeans_build_apply(index, mbkm, initial_fit_size):
"""
Build the MiniBatchKMeans centroids based on the descriptors in the given
index, then predicting descriptor clusters with the final result model.
If the given index is empty, no fitting or clustering occurs and an empty
dictionary is returned.
:param index: Index of descriptors
:type index: smqtk.representation.DescriptorIndex
:param mbkm: Scikit-Learn MiniBatchKMeans instead to train and then use for
prediction
:type mbkm: sklearn.cluster.MiniBatchKMeans
:param initial_fit_size: Number of descriptors to run an initial fit with.
This brings the advantage of choosing a best initialization point from
multiple.
:type initial_fit_size: int
:return: Dictionary of the cluster label (integer) to the set of descriptor
UUIDs belonging to that cluster.
:rtype: dict[int, set[collections.Hashable]]
"""
log = logging.getLogger(__name__)
ifit_completed = False
k_deque = collections.deque()
d_fitted = 0
log.info("Getting index keys (shuffled)")
index_keys = sorted(index.iterkeys())
numpy.random.seed(mbkm.random_state)
numpy.random.shuffle(index_keys)
def parallel_iter_vectors(descriptors):
""" Get the vectors for the descriptors given.
Not caring about order returned.
"""
return parallel.parallel_map(lambda d: d.vector(), descriptors,
use_multiprocessing=False)
def get_vectors(k_iter):
""" Get numpy array of descriptor vectors (2D array returned) """
return numpy.array(list(
parallel_iter_vectors(index.get_many_descriptors(k_iter))
))
log.info("Collecting iteratively fitting model")
rps = [0] * 7
for i, k in enumerate(index_keys):
k_deque.append(k)
bin_utils.report_progress(log.debug, rps, 1.)
if initial_fit_size and not ifit_completed:
if len(k_deque) == initial_fit_size:
log.info("Initial fit using %d descriptors", len(k_deque))
log.info("- collecting vectors")
vectors = get_vectors(k_deque)
log.info("- fitting model")
mbkm.fit(vectors)
log.info("- cleaning")
d_fitted += len(vectors)
k_deque.clear()
ifit_completed = True
elif len(k_deque) == mbkm.batch_size:
log.info("Partial fit with batch size %d", len(k_deque))
log.info("- collecting vectors")
vectors = get_vectors(k_deque)
log.info("- fitting model")
mbkm.partial_fit(vectors)
log.info("- cleaning")
d_fitted += len(k_deque)
k_deque.clear()
# Final fit with any remaining descriptors
if k_deque:
log.info("Final partial fit of size %d", len(k_deque))
log.info('- collecting vectors')
vectors = get_vectors(k_deque)
log.info('- fitting model')
mbkm.partial_fit(vectors)
log.info('- cleaning')
d_fitted += len(k_deque)
k_deque.clear()
log.info("Computing descriptor classes with final KMeans model")
mbkm.verbose = False
d_classes = collections.defaultdict(set)
d_uv_iter = parallel.parallel_map(lambda d: (d.uuid(), d.vector()),
index,
use_multiprocessing=False,
name="uv-collector")
# TODO: Batch predict call inputs to something larger than one at a time.
d_uc_iter = parallel.parallel_map(
lambda (u, v): (u, mbkm.predict(v[numpy.newaxis, :])[0]),
d_uv_iter,
use_multiprocessing=False,
name="uc-collector")
rps = [0] * 7
for uuid, c in d_uc_iter:
d_classes[c].add(uuid)
bin_utils.report_progress(log.debug, rps, 1.)
rps[1] -= 1
bin_utils.report_progress(log.debug, rps, 0)
return d_classes
|
Purg/SMQTK
|
python/smqtk/compute_functions.py
|
Python
|
bsd-3-clause
| 13,076
|
"""
Interface to be implemented by id generators. It is possible
that some implementations might not require all the arguments,
for example MySQL will not require a keyInfo Object, while the
IDBroker implementation does not require a Connection as
it only rarely needs one and retrieves a connection from the
Connection pool service only when needed.
"""
__version__='$Revision: 3194 $'[11:-2]
import logging
import util.logger.Logger as Logger
import proof.ProofException as ProofException
class IDGenerator:
__is__ = 'interface'
def __init__(self, logger=None):
self.__logger = Logger.makeLogger(logger)
self.log = self.__logger.write
pass
def getId(self, connection=None, key_info=None):
""" Returns an id.
@param connection A Connection.
@param key_info an Object that contains additional info.
"""
raise ProofException.ProofNotImplementedException( \
"IdGenerator.getId: need to be overrided." )
def isPriorToInsert(self):
""" A flag to determine the timing of the id generation.
@return a <code>boolean</code> value
"""
raise ProofException.ProofNotImplementedException( \
"IdGenerator.isPriorToInsert: need to be overrided." )
def isPostInsert(self):
""" A flag to determine the timing of the id generation
@return Whether id is availble post-<code>insert</code>.
"""
raise ProofException.ProofNotImplementedException( \
"IdGenerator.isPostInsert: need to be overrided." )
def isConnectionRequired(self):
""" A flag to determine whether a Connection is required to
generate an id.
@return a <code>boolean</code> value
"""
raise ProofException.ProofNotImplementedException( \
"IdGenerator.isConnectionRequired: need to be overrided." )
|
mattduan/proof
|
pk/generator/IDGenerator.py
|
Python
|
bsd-3-clause
| 1,957
|
class LektorException(Exception):
def __init__(self, message=None):
Exception.__init__(self)
if isinstance(message, bytes):
message = message.decode("utf-8", "replace")
self.message = message
def to_json(self):
return {
"type": self.__class__.__name__,
"message": self.message,
}
def __str__(self):
return str(self.message)
def __repr__(self):
return "%s(%r)" % (self.__class__.__name__, self.message)
|
lektor/lektor
|
lektor/exception.py
|
Python
|
bsd-3-clause
| 513
|
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, unicode_literals, print_function
from distutils.version import LooseVersion
import sys
import re
import os
import six
from .. import environment
from ..console import log
from .. import util
WIN = (os.name == "nt")
class Virtualenv(environment.Environment):
"""
Manage an environment using virtualenv.
"""
tool_name = "virtualenv"
def __init__(self, conf, python, requirements, tagged_env_vars):
"""
Parameters
----------
conf : Config instance
python : str
Version of Python. Must be of the form "MAJOR.MINOR".
executable : str
Path to Python executable.
requirements : dict
Dictionary mapping a PyPI package name to a version
identifier string.
"""
executable = Virtualenv._find_python(python)
if executable is None:
raise environment.EnvironmentUnavailable(
"No executable found for python {0}".format(python))
self._executable = executable
self._python = python
self._requirements = requirements
super(Virtualenv, self).__init__(conf,
python,
requirements,
tagged_env_vars)
try:
import virtualenv
except ImportError:
raise environment.EnvironmentUnavailable(
"virtualenv package not installed")
@staticmethod
def _find_python(python):
"""Find Python executable for the given Python version"""
is_pypy = python.startswith("pypy")
# Parse python specifier
if is_pypy:
executable = python
if python == 'pypy':
python_version = '2'
else:
python_version = python[4:]
else:
python_version = python
executable = "python{0}".format(python_version)
# Find Python executable on path
try:
return util.which(executable)
except IOError:
pass
# Maybe the current one is correct?
current_is_pypy = hasattr(sys, 'pypy_version_info')
current_versions = ['{0[0]}'.format(sys.version_info),
'{0[0]}.{0[1]}'.format(sys.version_info)]
if is_pypy == current_is_pypy and python_version in current_versions:
return sys.executable
return None
@property
def name(self):
"""
Get a name to uniquely identify this environment.
"""
python = self._python
if self._python.startswith('pypy'):
# get_env_name adds py-prefix
python = python[2:]
return environment.get_env_name(self.tool_name,
python,
self._requirements,
self._tagged_env_vars)
@classmethod
def matches(self, python):
if not (re.match(r'^[0-9].*$', python) or re.match(r'^pypy[0-9.]*$', python)):
# The python name should be a version number, or pypy+number
return False
try:
import virtualenv
except ImportError:
return False
else:
if LooseVersion(virtualenv.__version__) == LooseVersion('1.11.0'):
log.warning(
"asv is not compatible with virtualenv 1.11 due to a bug in "
"setuptools.")
if LooseVersion(virtualenv.__version__) < LooseVersion('1.10'):
log.warning(
"If using virtualenv, it much be at least version 1.10")
executable = Virtualenv._find_python(python)
return executable is not None
def _setup(self):
"""
Setup the environment on disk using virtualenv.
Then, all of the requirements are installed into
it using `pip install`.
"""
env = dict(os.environ)
env.update(self.build_env_vars)
log.info("Creating virtualenv for {0}".format(self.name))
util.check_call([
sys.executable,
"-mvirtualenv",
'--no-site-packages',
"-p",
self._executable,
self._path], env=env)
log.info("Installing requirements for {0}".format(self.name))
self._install_requirements()
def _install_requirements(self):
if sys.version_info[:2] == (3, 2):
pip_args = ['install', '-v', 'wheel<0.29.0', 'pip<8']
else:
pip_args = ['install', '-v', 'wheel', 'pip>=8']
env = dict(os.environ)
env.update(self.build_env_vars)
self._run_pip(pip_args, env=env)
if self._requirements:
args = ['install', '-v', '--upgrade']
for key, val in six.iteritems(self._requirements):
pkg = key
if key.startswith('pip+'):
pkg = key[4:]
if val:
args.append("{0}=={1}".format(pkg, val))
else:
args.append(pkg)
self._run_pip(args, timeout=self._install_timeout, env=env)
def _run_pip(self, args, **kwargs):
# Run pip via python -m pip, so that it works on Windows when
# upgrading pip itself, and avoids shebang length limit on Linux
return self.run_executable('python', ['-mpip'] + list(args), **kwargs)
def run(self, args, **kwargs):
log.debug("Running '{0}' in {1}".format(' '.join(args), self.name))
return self.run_executable('python', args, **kwargs)
|
qwhelan/asv
|
asv/plugins/virtualenv.py
|
Python
|
bsd-3-clause
| 5,844
|
"""
Inlinefunc
This is a simple inline text language for use to custom-format text
in Evennia. It is applied BEFORE ANSI/MUX parsing is applied.
To activate Inlinefunc, settings.INLINEFUNC_ENABLED must be set.
The format is straightforward:
{funcname([arg1,arg2,...]) text {/funcname
Example:
"This is {pad(50,c,-) a center-padded text{/pad of width 50."
->
"This is -------------- a center-padded text--------------- of width 50."
This can be inserted in any text, operated on by the parse_inlinefunc
function. funcname() (no space is allowed between the name and the
argument tuple) is picked from a selection of valid functions from
settings.INLINEFUNC_MODULES.
Commands can be nested, and will applied inside-out. For correct
parsing their end-tags must match the starting tags in reverse order.
Example:
"The time is {pad(30){time(){/time{/padright now."
->
"The time is Oct 25, 11:09 right now."
An inline function should have the following call signature:
def funcname(text, *args)
where the text is always the part between {funcname(args) and
{/funcname and the *args are taken from the appropriate part of the
call. It is important that the inline function properly clean the
incoming args, checking their type and replacing them with sane
defaults if needed. If impossible to resolve, the unmodified text
should be returned. The inlinefunc should never cause a traceback.
"""
import re
from django.conf import settings
from src.utils import utils
# inline functions
def pad(text, *args, **kwargs):
"Pad to width. pad(text, width=78, align='c', fillchar=' ')"
width = 78
align = 'c'
fillchar = ' '
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.isdigit() else width
elif iarg == 1:
align = arg if arg in ('c', 'l', 'r') else align
elif iarg == 2:
fillchar = arg[0]
else:
break
return utils.pad(text, width=width, align=align, fillchar=fillchar)
def crop(text, *args, **kwargs):
"Crop to width. crop(text, width=78, suffix='[...]')"
width = 78
suffix = "[...]"
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.isdigit() else width
elif iarg == 1:
suffix = arg
else:
break
return utils.crop(text, width=width, suffix=suffix)
def wrap(text, *args, **kwargs):
"Wrap/Fill text to width. fill(text, width=78, indent=0)"
width = 78
indent = 0
for iarg, arg in enumerate(args):
if iarg == 0:
width = int(arg) if arg.isdigit() else width
elif iarg == 1:
indent = int(arg) if arg.isdigit() else indent
return utils.wrap(text, width=width, indent=indent)
def time(text, *args, **kwargs):
"Inserts current time"
import time
strformat = "%h %d, %H:%M"
if args and args[0]:
strformat = str(args[0])
return time.strftime(strformat)
def you(text, *args, **kwargs):
"Inserts your name"
name = "You"
sess = kwargs.get("session")
if sess and sess.puppet:
name = sess.puppet.key
return name
# load functions from module (including this one, if using default settings)
_INLINE_FUNCS = {}
for module in utils.make_iter(settings.INLINEFUNC_MODULES):
_INLINE_FUNCS.update(utils.all_from_module(module))
_INLINE_FUNCS.pop("inline_func_parse", None)
# dynamically build regexes for found functions
_RE_FUNCFULL = r"\{%s\((.*?)\)(.*?){/%s"
_RE_FUNCFULL_SINGLE = r"\{%s\((.*?)\)"
_RE_FUNCSTART = r"\{((?:%s))"
_RE_FUNCEND = r"\{/((?:%s))"
_RE_FUNCSPLIT = r"(\{/*(?:%s)(?:\(.*?\))*)"
_RE_FUNCCLEAN = r"\{%s\(.*?\)|\{/%s"
_INLINE_FUNCS = dict((key, (func, re.compile(_RE_FUNCFULL % (key, key), re.DOTALL & re.MULTILINE),
re.compile(_RE_FUNCFULL_SINGLE % key, re.DOTALL & re.MULTILINE)))
for key, func in _INLINE_FUNCS.items() if callable(func))
_FUNCSPLIT_REGEX = re.compile(_RE_FUNCSPLIT % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCSTART_REGEX = re.compile(_RE_FUNCSTART % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCEND_REGEX = re.compile(_RE_FUNCEND % r"|".join([key for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
_FUNCCLEAN_REGEX = re.compile("|".join([_RE_FUNCCLEAN % (key, key) for key in _INLINE_FUNCS]), re.DOTALL & re.MULTILINE)
# inline parser functions
def _execute_inline_function(funcname, text, session):
"""
Get the enclosed text between {funcname(...) and {/funcname
and execute the inline function to replace the whole block
with the result.
Note that this lookup is "dumb" - we just grab the first end
tag we find. So to work correctly this function must be called
"inside out" on a nested function tree, so each call only works
on a "flat" tag.
"""
def subfunc(match):
"replace the entire block with the result of the function call"
args = [part.strip() for part in match.group(1).split(",")]
intext = match.group(2)
kwargs = {"session":session}
return _INLINE_FUNCS[funcname][0](intext, *args, **kwargs)
return _INLINE_FUNCS[funcname][1].sub(subfunc, text)
def _execute_inline_single_function(funcname, text, session):
"""
Get the arguments of a single function call (no matching end tag)
and execute it with an empty text input.
"""
def subfunc(match):
"replace the single call with the result of the function call"
args = [part.strip() for part in match.group(1).split(",")]
kwargs = {"session":session}
return _INLINE_FUNCS[funcname][0]("", *args, **kwargs)
return _INLINE_FUNCS[funcname][2].sub(subfunc, text)
def parse_inlinefunc(text, strip=False, session=None):
"""
Parse inline function-replacement.
strip - remove all supported inlinefuncs from text
session - session calling for the parsing
"""
if strip:
# strip all functions
return _FUNCCLEAN_REGEX.sub("", text)
stack = []
for part in _FUNCSPLIT_REGEX.split(text):
endtag = _FUNCEND_REGEX.match(part)
if endtag:
# an end tag
endname = endtag.group(1)
while stack:
new_part = stack.pop()
part = new_part + part # add backwards -> fowards
starttag = _FUNCSTART_REGEX.match(new_part)
if starttag:
startname = starttag.group(1)
if startname == endname:
part = _execute_inline_function(startname, part, session)
break
stack.append(part)
# handle single functions without matching end tags; these are treated
# as being called with an empty string as text argument.
outstack = []
for part in _FUNCSPLIT_REGEX.split("".join(stack)):
starttag = _FUNCSTART_REGEX.match(part)
if starttag:
startname = starttag.group(1)
part = _execute_inline_single_function(startname, part, session)
outstack.append(part)
return "".join(outstack)
def _test():
# this should all be handled
s = "This is a text with a{pad(78,c,-)text {pad(5)of{/pad {pad(30)nice{/pad size{/pad inside {pad(4,l)it{/pad."
s2 = "This is a text with a----------------text of nice size---------------- inside it ."
t = parse_inlinefunc(s)
assert(t == s2)
return t
|
Pathel/deuterium
|
src/utils/inlinefunc.py
|
Python
|
bsd-3-clause
| 7,563
|
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "Lag1Trend", cycle_length = 0, transform = "BoxCox", sigma = 0.0, exog_count = 100, ar_order = 12);
|
antoinecarme/pyaf
|
tests/artificial/transf_BoxCox/trend_Lag1Trend/cycle_0/ar_12/test_artificial_32_BoxCox_Lag1Trend_0_12_100.py
|
Python
|
bsd-3-clause
| 262
|
def extractInfiniteNovelTranslations(item):
"""
# Infinite Novel Translations
"""
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or 'preview' in item['title'].lower():
return None
tagmap = [
('Ascendance of a Bookworm', 'Ascendance of a Bookworm', 'translated'),
('Yomigaeri no Maou', 'Yomigaeri no Maou', 'translated'),
('Kakei Senki wo Kakageyo!', 'Kakei Senki wo Kakageyo!', 'translated'),
('Kuro no Shoukan Samurai', 'Kuro no Shoukan Samurai', 'translated'),
('Nidoume no Jinsei wo Isekai de', 'Nidoume no Jinsei wo Isekai de', 'translated'),
('Hachi-nan', 'Hachinan tte, Sore wa Nai Deshou!', 'translated'),
('Summoned Slaughterer', 'Yobidasareta Satsuriku-sha', 'translated'),
('maou no utsuwa', 'Maou no Utsuwa', 'translated'),
('Maou no Ki', 'Maou no Ki', 'translated'),
('Imperial wars and my stratagems', 'Imperial Wars and my Stratagems', 'translated'),
('Kuro no Shoukanshi', 'Kuro no Shoukanshi', 'translated'),
('I work as Healer in Another World\'s Labyrinth City', 'I work as Healer in Another World\'s Labyrinth City', 'translated'),
('The Spearmaster and The Black Cat', 'The Spearmaster and The Black Cat', 'translated'),
('Hakai no Miko', 'Hakai no Miko', 'translated'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False
|
fake-name/ReadableWebProxy
|
WebMirror/management/rss_parser_funcs/feed_parse_extractInfiniteNovelTranslations.py
|
Python
|
bsd-3-clause
| 2,377
|
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..brains import insertMidACPCpoint
def test_insertMidACPCpoint_inputs():
input_map = dict(args=dict(argstr='%s',
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
inputLandmarkFile=dict(argstr='--inputLandmarkFile %s',
),
outputLandmarkFile=dict(argstr='--outputLandmarkFile %s',
hash_files=False,
),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = insertMidACPCpoint.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_insertMidACPCpoint_outputs():
output_map = dict(outputLandmarkFile=dict(),
)
outputs = insertMidACPCpoint.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
|
mick-d/nipype
|
nipype/interfaces/semtools/utilities/tests/test_auto_insertMidACPCpoint.py
|
Python
|
bsd-3-clause
| 1,117
|
_namespace = 'http://maec.mitre.org/XMLSchema/maec-package-2'
from .action_equivalence import ActionEquivalenceList, ActionEquivalence # noqa
from .malware_subject_reference import MalwareSubjectReference # noqa
from .object_equivalence import ObjectEquivalence, ObjectEquivalenceList # noqa
from .analysis import (Analysis, AnalysisEnvironment, NetworkInfrastructure, # noqa
CapturedProtocolList, CapturedProtocol, # noqa
AnalysisSystemList, AnalysisSystem, InstalledPrograms, # noqa
HypervisorHostSystem, DynamicAnalysisMetadata, # noqa
ToolList, CommentList, Comment, Source) # noqa
from .grouping_relationship import (GroupingRelationshipList, # noqa
GroupingRelationship, ClusteringMetadata, # noqa
ClusteringAlgorithmParameters, # noqa
ClusterComposition, ClusterEdgeNodePair) # noqa
from .malware_subject import (MalwareSubjectList, MalwareSubject, # noqa
MalwareConfigurationDetails, # noqa
MalwareConfigurationObfuscationDetails, # noqa
MalwareConfigurationObfuscationAlgorithm, # noqa
MalwareConfigurationStorageDetails, # noqa
MalwareBinaryConfigurationStorageDetails, # noqa
MalwareConfigurationParameter, # noqa
MalwareDevelopmentEnvironment, # noqa
FindingsBundleList, MetaAnalysis, # noqa
MalwareSubjectRelationshipList, # noqa
MalwareSubjectRelationship, Analyses, # noqa
MinorVariants) # noqa
from .package import Package # noqa
|
MAECProject/python-maec
|
maec/package/__init__.py
|
Python
|
bsd-3-clause
| 1,881
|
from django.contrib.sites.models import Site
from django.utils.translation import ugettext_lazy as _lazy
from tidings.events import InstanceEvent, EventUnion
from kitsune.forums.models import Thread, Forum
from kitsune.sumo.email_utils import emails_with_users_and_watches
from kitsune.sumo.templatetags.jinja_helpers import add_utm
class NewPostEvent(InstanceEvent):
"""An event which fires when a thread receives a reply
Firing this also notifies watchers of the containing forum.
"""
event_type = "thread reply"
content_type = Thread
def __init__(self, reply):
super(NewPostEvent, self).__init__(reply.thread)
# Need to store the reply for _mails
self.reply = reply
def fire(self, **kwargs):
"""Notify not only watchers of this thread but of the parent forum."""
return EventUnion(self, NewThreadEvent(self.reply)).fire(**kwargs)
def _mails(self, users_and_watches):
post_url = add_utm(self.reply.get_absolute_url(), "forums-post")
c = {
"post": self.reply.content,
"post_html": self.reply.content_parsed,
"author": self.reply.author,
"host": Site.objects.get_current().domain,
"thread": self.reply.thread.title,
"forum": self.reply.thread.forum.name,
"post_url": post_url,
}
return emails_with_users_and_watches(
subject=_lazy("Re: {forum} - {thread}"),
text_template="forums/email/new_post.ltxt",
html_template="forums/email/new_post.html",
context_vars=c,
users_and_watches=users_and_watches,
)
class NewThreadEvent(InstanceEvent):
"""An event which fires when a new thread is added to a forum"""
event_type = "forum thread"
content_type = Forum
def __init__(self, post):
super(NewThreadEvent, self).__init__(post.thread.forum)
# Need to store the post for _mails
self.post = post
def _mails(self, users_and_watches):
post_url = add_utm(self.post.thread.get_absolute_url(), "forums-thread")
c = {
"post": self.post.content,
"post_html": self.post.content_parsed,
"author": self.post.author,
"host": Site.objects.get_current().domain,
"thread": self.post.thread.title,
"forum": self.post.thread.forum.name,
"post_url": post_url,
}
return emails_with_users_and_watches(
subject=_lazy("{forum} - {thread}"),
text_template="forums/email/new_thread.ltxt",
html_template="forums/email/new_thread.html",
context_vars=c,
users_and_watches=users_and_watches,
)
|
mozilla/kitsune
|
kitsune/forums/events.py
|
Python
|
bsd-3-clause
| 2,761
|
from __future__ import absolute_import
import gocept.httpserverlayer.custom
import json
import mock
import pkg_resources
import plone.testing
import zeit.cms.content.interfaces
import zeit.cms.testcontenttype.testcontenttype
import zeit.cms.testing
import zeit.content.article.testing
import zeit.content.image.testing
import zeit.content.link.testing
import zeit.content.volume.testing
import zeit.find.testing
import zeit.push.testing
import zeit.workflow.testing
HTTP_LAYER = zeit.cms.testing.HTTPLayer(
zeit.cms.testing.RecordingRequestHandler,
name='HTTPLayer', module=__name__)
product_config = """
<product-config zeit.retresco>
base-url http://localhost:[PORT]
elasticsearch-url http://tms-backend.staging.zeit.de:80/elasticsearch
elasticsearch-index zeit_pool
elasticsearch-connection-class zeit.retresco.search.Connection
topic-redirect-prefix http://www.zeit.de
index-principal zope.user
</product-config>
"""
class ElasticsearchMockLayer(plone.testing.Layer):
def setUp(self):
self['elasticsearch_mocker'] = mock.patch(
'elasticsearch.client.Elasticsearch.search')
self['elasticsearch'] = self['elasticsearch_mocker'].start()
filename = pkg_resources.resource_filename(
'zeit.retresco.tests', 'elasticsearch_result.json')
with open(filename) as response:
self['elasticsearch'].return_value = json.load(response)
def tearDown(self):
del self['elasticsearch']
self['elasticsearch_mocker'].stop()
del self['elasticsearch_mocker']
ELASTICSEARCH_MOCK_LAYER = ElasticsearchMockLayer()
class TMSMockLayer(plone.testing.Layer):
def setUp(self):
self['tms_mock'] = mock.Mock()
self['tms_mock'].url = 'http://tms.example.com'
self['tms_mock'].get_article_keywords.return_value = []
self['tms_zca'] = gocept.zcapatch.Patches()
self['tms_zca'].patch_utility(
self['tms_mock'], zeit.retresco.interfaces.ITMS)
def tearDown(self):
self['tms_zca'].reset()
del self['tms_zca']
del self['tms_mock']
def testTearDown(self):
self['tms_mock'].reset_mock()
TMS_MOCK_LAYER = TMSMockLayer()
class ZCMLLayer(zeit.cms.testing.ZCMLLayer):
defaultBases = zeit.cms.testing.ZCMLLayer.defaultBases + (HTTP_LAYER,)
def setUp(self):
self.product_config = self.product_config.replace(
'[PORT]', str(self['http_port']))
super(ZCMLLayer, self).setUp()
ZCML_LAYER = ZCMLLayer(
'ftesting.zcml', product_config=zeit.cms.testing.cms_product_config +
product_config +
zeit.find.testing.product_config +
zeit.push.testing.product_config +
zeit.workflow.testing.product_config +
zeit.content.article.testing.product_config +
zeit.content.link.testing.product_config +
zeit.content.volume.testing.product_config +
zeit.content.image.testing.product_config)
CELERY_LAYER = zeit.cms.testing.CeleryWorkerLayer(
name='CeleryLayer', bases=(ZCML_LAYER,))
CELERY_LAYER.queues += ('search',)
MOCK_ZCML_LAYER = plone.testing.Layer(
bases=(ZCML_LAYER, ELASTICSEARCH_MOCK_LAYER), name='MockZCMLLayer',
module=__name__)
class FunctionalTestCase(zeit.cms.testing.FunctionalTestCase):
layer = ZCML_LAYER
class TagTestHelpers(object):
"""Helper to prefill DAV-Property used for keywords of a content object."""
def set_tags(self, content, xml):
"""Prefill DAV-Property for keywords of `content` with `xml`.
It inserts `xml` into a newly created DAV-property in the
the 'tagging' namespace. `xml` is a string containing XML
representing `Tag` objects, which requires `type` and `text`::
<tag type="Person">Karen Duve</tag>
<tag type="Location">Berlin</tag>
"""
dav = zeit.connector.interfaces.IWebDAVProperties(content)
name, ns = dav_key = zeit.retresco.tagger.KEYWORD_PROPERTY
dav[dav_key] = """<ns:rankedTags xmlns:ns="{ns}">
<rankedTags>{0}</rankedTags></ns:rankedTags>""".format(
xml, ns=ns, tag=name)
def create_testcontent():
content = zeit.cms.testcontenttype.testcontenttype.ExampleContentType()
content.uniqueId = 'http://xml.zeit.de/testcontent'
content.teaserText = 'teaser'
content.title = 'title'
zeit.cms.content.interfaces.IUUID(content).id = 'myid'
return content
|
ZeitOnline/zeit.retresco
|
src/zeit/retresco/testing.py
|
Python
|
bsd-3-clause
| 4,428
|
# Copyright 2015 The PDFium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import shlex
import shutil
# This module collects and writes output in a format expected by the
# Gold baseline tool. Based on meta data provided explicitly and by
# adding a series of test results it can be used to produce
# a JSON file that is uploaded to Google Storage and ingested by Gold.
#
# The output will look similar this:
#
# {
# "build_number" : "2",
# "gitHash" : "a4a338179013b029d6dd55e737b5bd648a9fb68c",
# "key" : {
# "arch" : "arm64",
# "compiler" : "Clang",
# },
# "results" : [
# {
# "key" : {
# "config" : "vk",
# "name" : "yuv_nv12_to_rgb_effect",
# "source_type" : "gm"
# },
# "md5" : "7db34da246868d50ab9ddd776ce6d779",
# "options" : {
# "ext" : "png",
# "gamma_correct" : "no"
# }
# },
# {
# "key" : {
# "config" : "vk",
# "name" : "yuv_to_rgb_effect",
# "source_type" : "gm"
# },
# "md5" : "0b955f387740c66eb23bf0e253c80d64",
# "options" : {
# "ext" : "png",
# "gamma_correct" : "no"
# }
# }
# ],
# }
#
class GoldResults(object):
def __init__(self, source_type, outputDir, propertiesStr, keyStr,
ignore_hashes_file):
"""
source_type is the source_type (=corpus) field used for all results.
output_dir is the directory where the resulting images are copied and
the dm.json file is written.
propertiesStr is a string with space separated key/value pairs that
is used to set the top level fields in the output JSON file.
keyStr is a string with space separated key/value pairs that
is used to set the 'key' field in the output JSON file.
ignore_hashes_file is a file that contains a list of image hashes
that should be ignored.
"""
self._source_type = source_type
self._properties = self._parseKeyValuePairs(propertiesStr)
self._properties["key"] = self._parseKeyValuePairs(keyStr)
self._results = []
self._outputDir = outputDir
# make sure the output directory exists.
if not os.path.exists(outputDir):
os.makedirs(outputDir)
self._ignore_hashes = set()
if ignore_hashes_file:
with open(ignore_hashes_file, 'r') as ig_file:
hashes=[x.strip() for x in ig_file.readlines() if x.strip()]
self._ignore_hashes = set(hashes)
def AddTestResult(self, testName, md5Hash, outputImagePath):
# If the hash is in the list of hashes to ignore then we don'try
# make a copy, but add it to the result.
imgExt = os.path.splitext(outputImagePath)[1].lstrip(".")
if md5Hash not in self._ignore_hashes:
# Copy the image to <output_dir>/<md5Hash>.<image_extension>
if not imgExt:
raise ValueError("File %s does not have an extension" % outputImagePath)
newFilePath = os.path.join(self._outputDir, md5Hash + '.' + imgExt)
shutil.copy2(outputImagePath, newFilePath)
# Add an entry to the list of test results
self._results.append({
"key": {
"name": testName,
"source_type": self._source_type,
},
"md5": md5Hash,
"options": {
"ext": imgExt,
"gamma_correct": "no"
}
})
def _parseKeyValuePairs(self, kvStr):
kvPairs = shlex.split(kvStr)
if len(kvPairs) % 2:
raise ValueError("Uneven number of key/value pairs. Got %s" % kvStr)
return { kvPairs[i]:kvPairs[i+1] for i in range(0, len(kvPairs), 2) }
def WriteResults(self):
self._properties.update({
"results": self._results
})
outputFileName = os.path.join(self._outputDir, "dm.json")
with open(outputFileName, 'wb') as outfile:
json.dump(self._properties, outfile, indent=1)
outfile.write("\n")
# Produce example output for manual testing.
if __name__ == "__main__":
# Create a test directory with three empty 'image' files.
testDir = "./testdirectory"
if not os.path.exists(testDir):
os.makedirs(testDir)
open(os.path.join(testDir, "image1.png"), 'wb').close()
open(os.path.join(testDir, "image2.png"), 'wb').close()
open(os.path.join(testDir, "image3.png"), 'wb').close()
# Create an instance and add results.
propStr = """build_number 2 "builder name" Builder-Name gitHash a4a338179013b029d6dd55e737b5bd648a9fb68c"""
keyStr = "arch arm64 compiler Clang configuration Debug"
hash_file = os.path.join(testDir, "ignore_hashes.txt")
with open(hash_file, 'wb') as f:
f.write("\n".join(["hash-1","hash-4"]) + "\n")
gr = GoldResults("pdfium", testDir, propStr, keyStr, hash_file)
gr.AddTestResult("test-1", "hash-1", os.path.join(testDir, "image1.png"))
gr.AddTestResult("test-2", "hash-2", os.path.join(testDir, "image2.png"))
gr.AddTestResult("test-3", "hash-3", os.path.join(testDir, "image3.png"))
gr.WriteResults()
|
DrAlexx/pdfium
|
testing/tools/gold.py
|
Python
|
bsd-3-clause
| 5,115
|
import logging
from typing import Optional, Tuple
from PyQt5 import QtWidgets
from matplotlib.axes import Axes, np
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT, FigureCanvasQTAgg
from matplotlib.figure import Figure
from matplotlib.lines import Line2D
from .capillarysizer_ui import Ui_Form
from ...utils.window import WindowRequiresDevices
from ....core2.algorithms.peakfit import fitpeak, PeakType
from ....core2.dataclasses import Scan
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class CapillarySizer(QtWidgets.QWidget, WindowRequiresDevices, Ui_Form):
figure: Figure
figtoolbar: NavigationToolbar2QT
canvas: FigureCanvasQTAgg
axes: Axes
scan: Optional[Scan] = None
positive: Tuple[float, float] = (.0, .0)
negative: Tuple[float, float] = (.0, .0)
line: Line2D = None
positivepeakline: Optional[Line2D] = None
negativepeakline: Optional[Line2D] = None
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.setupUi(self)
def setupUi(self, Form):
super().setupUi(Form)
self.figure = Figure(constrained_layout=True)
self.canvas = FigureCanvasQTAgg(self.figure)
self.figtoolbar = NavigationToolbar2QT(self.canvas, self)
self.figureVerticalLayout.addWidget(self.canvas)
self.figureVerticalLayout.addWidget(self.figtoolbar)
self.axes = self.figure.add_subplot(self.figure.add_gridspec(1, 1)[:, :])
self.canvas.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
self.canvas.draw_idle()
self.scanIndexSpinBox.valueChanged.connect(self.scanIndexChanged)
self.signalNameComboBox.currentIndexChanged.connect(self.signalNameChanged)
self.fitNegativeToolButton.clicked.connect(self.fitPeak)
self.fitPositiveToolButton.clicked.connect(self.fitPeak)
self.sampleNameComboBox.currentIndexChanged.connect(self.sampleChanged)
self.sampleNameComboBox.setModel(self.instrument.samplestore.sortedmodel)
self.negativeValDoubleSpinBox.valueChanged.connect(self.setValuesFromSpinboxes)
self.positiveValDoubleSpinBox.valueChanged.connect(self.setValuesFromSpinboxes)
self.negativeErrDoubleSpinBox.valueChanged.connect(self.setValuesFromSpinboxes)
self.positiveErrDoubleSpinBox.valueChanged.connect(self.setValuesFromSpinboxes)
self.updateCenterToolButton.clicked.connect(self.saveCenter)
self.updateThicknessToolButton.clicked.connect(self.saveThickness)
self.updateCenterToolButton.setIcon(
QtWidgets.QApplication.instance().style().standardIcon(QtWidgets.QStyle.SP_ArrowRight))
self.updateThicknessToolButton.setIcon(
QtWidgets.QApplication.instance().style().standardIcon(QtWidgets.QStyle.SP_ArrowRight))
self.instrument.scan.lastscanchanged.connect(self.onLastScanChanged)
if self.instrument.scan.firstscan() is None:
self.scanIndexSpinBox.setEnabled(False)
else:
self.scanIndexSpinBox.setRange(self.instrument.scan.firstscan(), self.instrument.scan.lastscan())
self.derivativeToolButton.toggled.connect(self.signalNameChanged)
if self.instrument.scan.lastscan() is not None:
self.scanIndexSpinBox.setValue(self.instrument.scan.lastscan())
self.signalNameComboBox.setCurrentIndex(0)
self.reloadToolButton.clicked.connect(self.signalNameChanged)
self.replot()
def fitPeak(self):
if self.line is None:
return
x = self.line.get_xdata()
y = self.line.get_ydata()
xmin, xmax, ymin, ymax = self.axes.axis()
idx = np.logical_and(
np.logical_and(x >= xmin, x <= xmax),
np.logical_and(y >= ymin, y <= ymax)
)
if self.sender() == self.fitNegativeToolButton:
y = -y
try:
# do not use y error bars: if y<0, y**0.5 is NaN, which will break the fitting routine
pars, covar, peakfunc = fitpeak(x[idx], y[idx], None, None, PeakType.Lorentzian)
except ValueError as ve:
QtWidgets.QMessageBox.critical(self, 'Error while fitting',
f'Cannot fit peak, please try another range. The error message was: {ve}')
return
logger.debug(f'Peak parameters: {pars}')
logger.debug(f'Covariance matrix: {covar}')
xfit = np.linspace(x[idx].min(), x[idx].max(), 100)
yfit = peakfunc(xfit)
if self.sender() == self.fitNegativeToolButton:
if self.negativepeakline is None:
self.negativepeakline = self.axes.plot(xfit, - yfit, 'b-', lw=3)[0]
else:
self.negativepeakline.set_xdata(xfit)
self.negativepeakline.set_ydata(-yfit)
self.negative = (pars[1], covar[1, 1] ** 0.5)
self.negativeValDoubleSpinBox.blockSignals(True)
self.negativeErrDoubleSpinBox.blockSignals(True)
self.negativeValDoubleSpinBox.setValue(pars[1])
self.negativeErrDoubleSpinBox.setValue(covar[1, 1] ** 0.5)
self.negativeValDoubleSpinBox.blockSignals(False)
self.negativeErrDoubleSpinBox.blockSignals(False)
else:
if self.positivepeakline is None:
self.positivepeakline = self.axes.plot(xfit, yfit, 'r-', lw=3)[0]
else:
self.positivepeakline.set_xdata(xfit)
self.positivepeakline.set_ydata(yfit)
self.positive = (pars[1], covar[1, 1] ** 0.5)
self.positiveValDoubleSpinBox.blockSignals(True)
self.positiveErrDoubleSpinBox.blockSignals(True)
self.positiveValDoubleSpinBox.setValue(pars[1])
self.positiveErrDoubleSpinBox.setValue(covar[1, 1] ** 0.5)
self.positiveValDoubleSpinBox.blockSignals(False)
self.positiveErrDoubleSpinBox.blockSignals(False)
self.canvas.draw_idle()
self.recalculate()
def onLastScanChanged(self):
if self.instrument.scan.firstscan() is not None:
self.scanIndexSpinBox.setMaximum(self.instrument.scan.lastscan())
self.scanIndexSpinBox.setMinimum(self.instrument.scan.firstscan())
self.scanIndexSpinBox.setEnabled(True)
else:
self.scanIndexSpinBox.setEnabled(False)
def setValuesFromSpinboxes(self):
self.positive = (self.positiveValDoubleSpinBox.value(), self.positiveErrDoubleSpinBox.value())
self.negative = (self.negativeValDoubleSpinBox.value(), self.negativeErrDoubleSpinBox.value())
self.recalculate()
def recalculate(self):
positionval = 0.5 * (self.positive[0] + self.negative[0])
positionerr = 0.5 * (self.positive[1] ** 2 + self.negative[1] ** 2) ** 0.5
thicknessval = abs(self.positive[0] - self.negative[0])
thicknesserr = (self.positive[1] ** 2 + self.negative[1] ** 2) ** 0.5
self.newPositionLabel.setText(f'{positionval:.4f} \xb1 {positionerr:.4f}')
self.newThicknessLabel.setText(f'{thicknessval:.4f} \xb1 {thicknesserr:.4f} mm')
def sampleChanged(self):
if self.sampleNameComboBox.currentIndex() < 0:
return
sample = self.instrument.samplestore[self.sampleNameComboBox.currentText()]
if self.instrument.samplestore.hasMotors() and (self.scan is not None):
if self.instrument.samplestore.xmotorname() == self.scan.motorname:
self.oldPositionLabel.setText(f'{sample.positionx[0]:.4f} \xb1 {sample.positionx[1]:.4f}')
elif self.instrument.samplestore.ymotorname() == self.scan.motorname:
self.oldPositionLabel.setText(f'{sample.positiony[0]:.4f} \xb1 {sample.positiony[1]:.4f}')
self.oldThicknessLabel.setText(f'{sample.thickness[0] * 10.0:.4f} \xb1 {sample.thickness[1] * 10.0:.4f} mm')
def scanIndexChanged(self, value: int):
self.scan = self.instrument.scan[value]
self.signalNameComboBox.blockSignals(True)
oldsignal = self.signalNameComboBox.currentText()
self.signalNameComboBox.clear()
self.signalNameComboBox.addItems(self.scan.columnnames[2:])
self.signalNameComboBox.setCurrentIndex(self.signalNameComboBox.findText(oldsignal))
self.signalNameComboBox.blockSignals(False)
self.signalNameChanged()
def signalNameChanged(self):
if self.signalNameComboBox.currentIndex() >= 0:
self.replot()
self.figtoolbar.update()
def replot(self):
if self.scan is None:
return
if self.positivepeakline is not None:
self.positivepeakline.remove()
self.positivepeakline = None
if self.negativepeakline is not None:
self.negativepeakline.remove()
self.negativepeakline = None
x = self.scan[self.scan.motorname]
y = self.scan[self.signalNameComboBox.currentText()]
if self.derivativeToolButton.isChecked():
y = (y[1:] - y[:-1]) / (x[1:] - x[:-1])
x = 0.5 * (x[1:] + x[:-1])
if self.line is None:
self.line = self.axes.plot(x, y, 'k.-')[0]
else:
self.line.set_xdata(x)
self.line.set_ydata(y)
self.axes.relim()
self.axes.autoscale(True)
self.axes.set_xlabel(self.scan.motorname)
self.axes.set_ylabel(
'Derivative of ' + self.signalNameComboBox.currentText()
if self.derivativeToolButton.isChecked() else self.signalNameComboBox.currentText())
self.axes.set_title(self.scan.comment)
self.axes.grid(True, which='both')
self.canvas.draw_idle()
def saveCenter(self):
positionval = 0.5 * (self.positive[0] + self.negative[0])
positionerr = 0.5 * (self.positive[1] ** 2 + self.negative[1] ** 2) ** 0.5
if not self.instrument.samplestore.hasMotors():
# ask the user which direction this is
msgbox = QtWidgets.QMessageBox(self.window())
msgbox.setIcon(QtWidgets.QMessageBox.Question)
msgbox.setWindowTitle('Select direction')
msgbox.setText('Please select X or Y direction to save the determined sample center to:')
btnX=msgbox.addButton('X', QtWidgets.QMessageBox.YesRole)
btnY=msgbox.addButton('Y', QtWidgets.QMessageBox.NoRole)
msgbox.addButton(QtWidgets.QMessageBox.Cancel)
result = msgbox.exec_()
logger.debug(f'{result=}')
if msgbox.clickedButton() == btnX:
xcoordinate = True
elif msgbox.clickedButton() == btnY:
xcoordinate = False
else:
xcoordinate = None
elif self.instrument.samplestore.xmotorname() == self.scan.motorname:
xcoordinate = True
elif self.instrument.samplestore.ymotorname() == self.scan.motorname:
xcoordinate = False
else:
xcoordinate = None
if xcoordinate is None:
return
else:
try:
self.instrument.samplestore.updateSample(self.sampleNameComboBox.currentText(),
'positionx' if xcoordinate else 'positiony',
(positionval, positionerr))
logger.info(
f'Updated {"X" if xcoordinate else "Y"} '
f'position of sample {self.sampleNameComboBox.currentText()} to {positionval:.4f} \xb1 {positionerr:.4f}.')
except ValueError:
QtWidgets.QMessageBox.critical(
self, 'Parameter locked',
f'Cannot set position for sample {self.sampleNameComboBox.currentText()}: this parameter has been set read-only!')
self.sampleChanged()
def saveThickness(self):
thicknessval = abs(self.positive[0] - self.negative[0])
thicknesserr = (self.positive[1] ** 2 + self.negative[1] ** 2) ** 0.5
sample = self.instrument.samplestore[self.sampleNameComboBox.currentText()]
try:
sample.thickness = (thicknessval / 10, thicknesserr / 10)
except ValueError:
QtWidgets.QMessageBox.critical(
self, 'Parameter locked',
f'Cannot set thickness for sample {sample.title}: this parameter has been set read-only!')
return
self.instrument.samplestore.updateSample(sample.title, 'thickness', sample.thickness)
logger.info(
f'Updated thickness of sample {sample.title} to {sample.thickness[0]:.4f} \xb1 {sample.thickness[1]:.4f} cm.')
self.sampleChanged()
|
awacha/cct
|
cct/qtgui2/tools/capillarysizer/capillarysizer.py
|
Python
|
bsd-3-clause
| 12,812
|
import json
import re
from django import http
from django.db.models import Count
from django.shortcuts import redirect, render
from django.views.decorators.csrf import csrf_exempt
from tower import ugettext as _
import amo
import amo.utils
from amo.decorators import post_required
from amo.utils import urlparams
from amo.urlresolvers import reverse
from addons.models import Addon
from search.utils import floor_version
from versions.compare import version_dict as vdict, version_int as vint
from .models import CompatReport, AppCompat, CompatTotals
from .forms import AppVerForm, CompatForm
def index(request, version=None):
template = 'compat/index.html'
COMPAT = [v for v in amo.COMPAT if v['app'] == request.APP.id]
compat_dict = dict((v['main'], v) for v in COMPAT)
if not COMPAT:
return render(request, template, {'results': False})
if version not in compat_dict:
return http.HttpResponseRedirect(reverse('compat.index',
args=[COMPAT[0]['main']]))
qs = AppCompat.search()
binary = None
initial = {'appver': '%s-%s' % (request.APP.id, version), 'type': 'all'}
initial.update(request.GET.items())
form = CompatForm(initial)
if request.GET and form.is_valid():
if form.cleaned_data['appver']:
app, ver = form.cleaned_data['appver'].split('-')
if int(app) != request.APP.id or ver != version:
new = reverse('compat.index', args=[ver], add_prefix=False)
url = '/%s%s' % (amo.APP_IDS[int(app)].short, new)
type_ = form.cleaned_data['type'] or None
return http.HttpResponseRedirect(urlparams(url, type=type_))
if form.cleaned_data['type'] != 'all':
binary = form.cleaned_data['type'] == 'binary'
compat, app = compat_dict[version], str(request.APP.id)
compat_queries = (
('prev', qs.query(**{
'top_95.%s.%s' % (app, vint(compat['previous'])): True,
'support.%s.max__gte' % app: vint(compat['previous'])})),
('top_95', qs.query(**{'top_95_all.%s' % app: True})),
('all', qs),
)
compat_levels = [(key, version_compat(queryset, compat, app, binary))
for key, queryset in compat_queries]
usage_addons, usage_total = usage_stats(request, compat, app, binary)
return render(request, template,
{'version': version, 'usage_addons': usage_addons,
'usage_total': usage_total, 'compat_levels': compat_levels,
'form': form, 'results': True,
'show_previous': request.GET.get('previous')})
def version_compat(qs, compat, app, binary):
facets = []
for v, prev in zip(compat['versions'], (None,) + compat['versions']):
d = {'from': vint(v)}
if prev:
d['to'] = vint(prev)
facets.append(d)
# Pick up everything else for an Other count.
facets.append({'to': vint(compat['versions'][-1])})
facet = {'range': {'support.%s.max' % app: facets}}
if binary is not None:
qs = qs.query(binary=binary)
qs = qs.facet(by_status=facet)
result = qs[:0].raw()
total_addons = result['hits']['total']
ranges = result['facets']['by_status']['ranges']
titles = compat['versions'] + (_('Other'),)
faceted = [(v, r['count']) for v, r in zip(titles, ranges)]
return total_addons, faceted
def usage_stats(request, compat, app, binary=None):
# Get the list of add-ons for usage stats.
qs = AppCompat.search().order_by('-usage.%s' % app).values_dict()
if request.GET.get('previous'):
qs = qs.filter(**{
'support.%s.max__gte' % app: vint(compat['previous'])})
else:
qs = qs.filter(**{'support.%s.max__gte' % app: 0})
if binary is not None:
qs = qs.filter(binary=binary)
addons = amo.utils.paginate(request, qs)
for obj in addons.object_list:
obj['usage'] = obj['usage'][app]
obj['max_version'] = obj['max_version'][app]
return addons, CompatTotals.objects.get(app=app).total
@csrf_exempt
@post_required
def incoming(request):
# Turn camelCase into snake_case.
snake_case = lambda s: re.sub('[A-Z]+', '_\g<0>', s).lower()
try:
data = [(snake_case(k), v)
for k, v in json.loads(request.body).items()]
except Exception:
return http.HttpResponseBadRequest()
# Build up a new report.
report = CompatReport(client_ip=request.META.get('REMOTE_ADDR', ''))
fields = CompatReport._meta.get_all_field_names()
for key, value in data:
if key in fields:
setattr(report, key, value)
else:
return http.HttpResponseBadRequest()
report.save()
return http.HttpResponse(status=204)
def reporter(request):
query = request.GET.get('guid')
if query:
qs = None
if query.isdigit():
qs = Addon.objects.filter(id=query)
if not qs:
qs = Addon.objects.filter(slug=query)
if not qs:
qs = Addon.objects.filter(guid=query)
if not qs and len(query) > 4:
qs = CompatReport.objects.filter(guid__startswith=query)
if qs:
return redirect('compat.reporter_detail', qs[0].guid)
addons = (request.amo_user.addons.all()
if request.user.is_authenticated() else [])
return render(request, 'compat/reporter.html',
dict(query=query, addons=addons))
def reporter_detail(request, guid):
qs = CompatReport.objects.filter(guid=guid)
form = AppVerForm(request.GET)
if request.GET and form.is_valid() and form.cleaned_data['appver']:
# Apply filters only if we have a good app/version combination.
app, ver = form.cleaned_data['appver'].split('-')
app = amo.APP_IDS[int(app)]
ver = vdict(floor_version(ver))['major'] # 3.6 => 3
# Ideally we'd have a `version_int` column to do strict version
# comparing, but that's overkill for basic version filtering here.
qs = qs.filter(app_guid=app.guid,
app_version__startswith=str(ver) + '.')
works_ = dict(qs.values_list('works_properly').annotate(Count('id')))
works = {'success': works_.get(True, 0), 'failure': works_.get(False, 0)}
works_properly = request.GET.get('works_properly')
if works_properly:
qs = qs.filter(works_properly=works_properly)
reports = amo.utils.paginate(request, qs.order_by('-created'), 100)
addon = Addon.objects.filter(guid=guid)
name = addon[0].name if addon else guid
return render(request, 'compat/reporter_detail.html',
dict(reports=reports, works=works,
works_properly=works_properly,
name=name, guid=guid, form=form))
|
SuriyaaKudoIsc/olympia
|
apps/compat/views.py
|
Python
|
bsd-3-clause
| 6,863
|
from __future__ import division
import itertools
import warnings
import numpy as np
scipy_gaussian_filter = None # expensive
from .base import ndfeature, winitfeature, imgfeature
from ._gradient import gradient_cython
from .windowiterator import WindowIterator, WindowIteratorResult
def _np_gradient(pixels):
"""
This method is used in the case of multi-channel images (not 2D images).
The output ordering is identical to the gradient() method, returning
a 2 * n_channels image with gradients in order of the first axis derivative
over all the channels, then the second etc. For example, in the case of
a 3D image with 2 channels, the ordering would be:
I[:, 0, 0, 0] = [A_0, B_0, A_1, B_1, A_2, B_2]
where A and B are the 'channel' labels (synonymous with RGB for a colour
image) and 0,1,2 are the axis labels (synonymous with y,x for a 2D image).
"""
n_dims = pixels.ndim - 1
grad_per_dim_per_channel = [np.gradient(g, edge_order=1)
for g in pixels]
# Flatten out the separate dims
grad_per_channel = list(itertools.chain.from_iterable(
grad_per_dim_per_channel))
# Add a channel axis for broadcasting
grad_per_channel = [g[None, ...] for g in grad_per_channel]
# Permute the list so it is first axis, second axis, etc
grad_per_channel = [grad_per_channel[i::n_dims]
for i in range(n_dims)]
grad_per_channel = list(itertools.chain.from_iterable(grad_per_channel))
# Concatenate gradient list into an array (the new_image)
return np.concatenate(grad_per_channel, axis=0)
@ndfeature
def gradient(pixels):
r"""
Calculates the gradient of an input image. The image is assumed to have
channel information on the first axis. In the case of multiple channels,
it returns the gradient over each axis over each channel as the first axis.
The gradient is computed using second order accurate central differences in
the interior and first order accurate one-side (forward or backwards)
differences at the boundaries.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array where the first dimension
is interpreted as channels. This means an N-dimensional image is
represented by an N+1 dimensional array.
Returns
-------
gradient : `ndarray`
The gradient over each axis over each channel. Therefore, the
first axis of the gradient of a 2D, single channel image, will have
length `2`. The first axis of the gradient of a 2D, 3-channel image,
will have length `6`, the ordering being
``I[:, 0, 0] = [R0_y, G0_y, B0_y, R0_x, G0_x, B0_x]``. To be clear,
all the ``y``-gradients are returned over each channel, then all
the ``x``-gradients.
"""
if (pixels.ndim - 1) == 2: # 2D Image
return gradient_cython(pixels)
else:
return _np_gradient(pixels)
@ndfeature
def gaussian_filter(pixels, sigma):
r"""
Calculates the convolution of the input image with a multidimensional
Gaussian filter.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
sigma : `float` or `list` of `float`
The standard deviation for Gaussian kernel. The standard deviations of
the Gaussian filter are given for each axis as a `list`, or as a single
`float`, in which case it is equal for all axes.
Returns
-------
output_image : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The filtered image has the same type and size as the input ``pixels``.
"""
global scipy_gaussian_filter
if scipy_gaussian_filter is None:
from scipy.ndimage import gaussian_filter as scipy_gaussian_filter
output = np.empty(pixels.shape, dtype=pixels.dtype)
for dim in range(pixels.shape[0]):
scipy_gaussian_filter(pixels[dim], sigma, output=output[dim])
return output
@winitfeature
def hog(pixels, mode='dense', algorithm='dalaltriggs', num_bins=9,
cell_size=8, block_size=2, signed_gradient=True, l2_norm_clip=0.2,
window_height=1, window_width=1, window_unit='blocks',
window_step_vertical=1, window_step_horizontal=1,
window_step_unit='pixels', padding=True, verbose=False):
r"""
Extracts Histograms of Oriented Gradients (HOG) features from the input
image.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : {``dense``, ``sparse``}, optional
The ``sparse`` case refers to the traditional usage of HOGs, so
predefined parameters values are used.
The ``sparse`` case of ``dalaltriggs`` algorithm sets
``window_height = window_width = block_size`` and
``window_step_horizontal = window_step_vertical = cell_size``.
The ``sparse`` case of ``zhuramanan`` algorithm sets
``window_height = window_width = 3 * cell_size`` and
``window_step_horizontal = window_step_vertical = cell_size``.
In the ``dense`` case, the user can choose values for `window_height`,
`window_width`, `window_unit`, `window_step_vertical`,
`window_step_horizontal`, `window_step_unit` and `padding` to customize
the HOG calculation.
window_height : `float`, optional
Defines the height of the window. The metric unit is defined by
`window_unit`.
window_width : `float`, optional
Defines the width of the window. The metric unit is defined by
`window_unit`.
window_unit : {``blocks``, ``pixels``}, optional
Defines the metric unit of the `window_height` and `window_width`
parameters.
window_step_vertical : `float`, optional
Defines the vertical step by which the window is moved, thus it
controls the features' density. The metric unit is defined by
`window_step_unit`.
window_step_horizontal : `float`, optional
Defines the horizontal step by which the window is moved, thus it
controls the features' density. The metric unit is defined by
`window_step_unit`.
window_step_unit : {``pixels``, ``cells``}, optional
Defines the metric unit of the `window_step_vertical` and
`window_step_horizontal` parameters.
padding : `bool`, optional
If ``True``, the output image is padded with zeros to match the input
image's size.
algorithm : {``dalaltriggs``, ``zhuramanan``}, optional
Specifies the algorithm used to compute HOGs. ``dalaltriggs`` is the
implementation of [1] and ``zhuramanan`` is the implementation of [2].
cell_size : `float`, optional
Defines the cell size in pixels. This value is set to both the width
and height of the cell. This option is valid for both algorithms.
block_size : `float`, optional
Defines the block size in cells. This value is set to both the width
and height of the block. This option is valid only for the
``dalaltriggs`` algorithm.
num_bins : `float`, optional
Defines the number of orientation histogram bins. This option is
valid only for the ``dalaltriggs`` algorithm.
signed_gradient : `bool`, optional
Flag that defines whether we use signed or unsigned gradient angles.
This option is valid only for the ``dalaltriggs`` algorithm.
l2_norm_clip : `float`, optional
Defines the clipping value of the gradients' L2-norm. This option is
valid only for the ``dalaltriggs`` algorithm.
verbose : `bool`, optional
Flag to print HOG related information.
Returns
-------
hog : :map:`Image` or subclass or ``(X, Y, ..., Z, K)`` `ndarray`
The HOG features image. It has the same type as the input ``pixels``.
The output number of channels in the case of ``dalaltriggs`` is
``K = num_bins * block_size *block_size`` and ``K = 31`` in the case of
``zhuramanan``.
Raises
------
ValueError
HOG features mode must be either dense or sparse
ValueError
Algorithm must be either dalaltriggs or zhuramanan
ValueError
Number of orientation bins must be > 0
ValueError
Cell size (in pixels) must be > 0
ValueError
Block size (in cells) must be > 0
ValueError
Value for L2-norm clipping must be > 0.0
ValueError
Window height must be >= block size and <= image height
ValueError
Window width must be >= block size and <= image width
ValueError
Window unit must be either pixels or blocks
ValueError
Horizontal window step must be > 0
ValueError
Vertical window step must be > 0
ValueError
Window step unit must be either pixels or cells
References
----------
.. [1] N. Dalal and B. Triggs, "Histograms of oriented gradients for human
detection", Proceedings of the IEEE Conference on Computer Vision and
Pattern Recognition (CVPR), 2005.
.. [2] X. Zhu, D. Ramanan. "Face detection, pose estimation and landmark
localization in the wild", Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition (CVPR), 2012.
"""
# TODO: This is a temporary fix
# flip axis
pixels = np.rollaxis(pixels, 0, len(pixels.shape))
# Parse options
if mode not in ['dense', 'sparse']:
raise ValueError("HOG features mode must be either dense or sparse")
if algorithm not in ['dalaltriggs', 'zhuramanan']:
raise ValueError("Algorithm must be either dalaltriggs or zhuramanan")
if num_bins <= 0:
raise ValueError("Number of orientation bins must be > 0")
if cell_size <= 0:
raise ValueError("Cell size (in pixels) must be > 0")
if block_size <= 0:
raise ValueError("Block size (in cells) must be > 0")
if l2_norm_clip <= 0.0:
raise ValueError("Value for L2-norm clipping must be > 0.0")
if mode == 'dense':
if window_unit not in ['pixels', 'blocks']:
raise ValueError("Window unit must be either pixels or blocks")
window_height_temp = window_height
window_width_temp = window_width
if window_unit == 'blocks':
window_height_temp = window_height * block_size * cell_size
window_width_temp = window_width * block_size * cell_size
if (window_height_temp < block_size * cell_size or
window_height_temp > pixels.shape[0]):
raise ValueError("Window height must be >= block size and <= "
"image height")
if (window_width_temp < block_size*cell_size or
window_width_temp > pixels.shape[1]):
raise ValueError("Window width must be >= block size and <= "
"image width")
if window_step_horizontal <= 0:
raise ValueError("Horizontal window step must be > 0")
if window_step_vertical <= 0:
raise ValueError("Vertical window step must be > 0")
if window_step_unit not in ['pixels', 'cells']:
raise ValueError("Window step unit must be either pixels or cells")
# Correct input image_data
pixels = np.asfortranarray(pixels)
pixels *= 255.
# Dense case
if mode == 'dense':
# Iterator parameters
if algorithm == 'dalaltriggs':
algorithm = 1
if window_unit == 'blocks':
block_in_pixels = cell_size * block_size
window_height = np.uint32(window_height * block_in_pixels)
window_width = np.uint32(window_width * block_in_pixels)
if window_step_unit == 'cells':
window_step_vertical = np.uint32(window_step_vertical *
cell_size)
window_step_horizontal = np.uint32(window_step_horizontal *
cell_size)
elif algorithm == 'zhuramanan':
algorithm = 2
if window_unit == 'blocks':
block_in_pixels = 3 * cell_size
window_height = np.uint32(window_height * block_in_pixels)
window_width = np.uint32(window_width * block_in_pixels)
if window_step_unit == 'cells':
window_step_vertical = np.uint32(window_step_vertical *
cell_size)
window_step_horizontal = np.uint32(window_step_horizontal *
cell_size)
iterator = WindowIterator(pixels, window_height, window_width,
window_step_horizontal,
window_step_vertical, padding)
# Sparse case
else:
# Create iterator
if algorithm == 'dalaltriggs':
algorithm = 1
window_size = cell_size * block_size
step = cell_size
else:
algorithm = 2
window_size = 3 * cell_size
step = cell_size
iterator = WindowIterator(pixels, window_size, window_size, step,
step, False)
# Print iterator's info
if verbose:
print(iterator)
# Compute HOG
hog_descriptor = iterator.HOG(algorithm, num_bins, cell_size, block_size,
signed_gradient, l2_norm_clip, verbose)
# TODO: This is a temporal fix
# flip axis
hog_descriptor = WindowIteratorResult(
np.ascontiguousarray(np.rollaxis(hog_descriptor.pixels, -1)),
hog_descriptor.centres)
return hog_descriptor
@ndfeature
def igo(pixels, double_angles=False, verbose=False):
r"""
Extracts Image Gradient Orientation (IGO) features from the input image.
The output image has ``N * C`` number of channels, where ``N`` is the
number of channels of the original image and ``C = 2`` or ``C = 4``
depending on whether double angles are used.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
double_angles : `bool`, optional
Assume that ``phi`` represents the gradient orientations.
If this flag is ``False``, the features image is the concatenation of
``cos(phi)`` and ``sin(phi)``, thus 2 channels.
If ``True``, the features image is the concatenation of
``cos(phi)``, ``sin(phi)``, ``cos(2 * phi)``, ``sin(2 * phi)``, thus 4
channels.
verbose : `bool`, optional
Flag to print IGO related information.
Returns
-------
igo : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The IGO features image. It has the same type and shape as the input
``pixels``. The output number of channels depends on the
``double_angles`` flag.
Raises
------
ValueError
Image has to be 2D in order to extract IGOs.
References
----------
.. [1] G. Tzimiropoulos, S. Zafeiriou and M. Pantic, "Subspace learning
from image gradient orientations", IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 34, num. 12, p. 2454--2466, 2012.
"""
# check number of dimensions
if len(pixels.shape) != 3:
raise ValueError('IGOs only work on 2D images. Expects image data '
'to be 3D, channels + shape.')
n_img_chnls = pixels.shape[0]
# feature channels per image channel
feat_chnls = 2
if double_angles:
feat_chnls = 4
# compute gradients
grad = gradient(pixels)
# compute angles
grad_orient = np.angle(grad[:n_img_chnls] + 1j * grad[n_img_chnls:])
# compute igo image
igo_pixels = np.empty((n_img_chnls * feat_chnls,
pixels.shape[1], pixels.shape[2]),
dtype=pixels.dtype)
if double_angles:
dbl_grad_orient = 2 * grad_orient
# y angles
igo_pixels[:n_img_chnls] = np.sin(grad_orient)
igo_pixels[n_img_chnls:n_img_chnls*2] = np.sin(dbl_grad_orient)
# x angles
igo_pixels[n_img_chnls*2:n_img_chnls*3] = np.cos(grad_orient)
igo_pixels[n_img_chnls*3:] = np.cos(dbl_grad_orient)
else:
igo_pixels[:n_img_chnls] = np.sin(grad_orient) # y
igo_pixels[n_img_chnls:] = np.cos(grad_orient) # x
# print information
if verbose:
info_str = "IGO Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], n_img_chnls)
info_str = "{} - Double angles are {}.\n".format(
info_str, 'enabled' if double_angles else 'disabled')
info_str = "{}Output image size {}W x {}H with {} channels.".format(
info_str, igo_pixels.shape[2], igo_pixels.shape[1], n_img_chnls)
print(info_str)
return igo_pixels
@ndfeature
def es(pixels, verbose=False):
r"""
Extracts Edge Structure (ES) features from the input image. The output image
has ``N * C`` number of channels, where ``N`` is the number of channels of
the original image and ``C = 2``.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either an image object itself or an array where the first axis
represents the number of channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
verbose : `bool`, optional
Flag to print ES related information.
Returns
-------
es : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is ``C = 2``.
Raises
------
ValueError
Image has to be 2D in order to extract ES features.
References
----------
.. [1] T. Cootes, C. Taylor, "On representing edge structure for model
matching", Proceedings of the IEEE Conference on Computer Vision and
Pattern Recognition (CVPR), 2001.
"""
# check number of dimensions
if len(pixels.shape) != 3:
raise ValueError('ES features only work on 2D images. Expects '
'image data to be 3D, channels + shape.')
n_img_chnls = pixels.shape[0]
# feature channels per image channel
feat_channels = 2
# compute gradients
grad = gradient(pixels)
# compute magnitude
grad_abs = np.abs(grad[:n_img_chnls] + 1j * grad[n_img_chnls:])
# compute es image
grad_abs = grad_abs + np.median(grad_abs)
es_pixels = np.empty((pixels.shape[0] * feat_channels,
pixels.shape[1], pixels.shape[2]),
dtype=pixels.dtype)
es_pixels[:n_img_chnls] = grad[:n_img_chnls] / grad_abs
es_pixels[n_img_chnls:] = grad[n_img_chnls:] / grad_abs
# print information
if verbose:
info_str = "ES Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], n_img_chnls)
info_str = "{}Output image size {}W x {}H with {} channels.".format(
info_str, es_pixels.shape[2], es_pixels.shape[1], n_img_chnls)
print(info_str)
return es_pixels
@ndfeature
def daisy(pixels, step=1, radius=15, rings=2, histograms=2, orientations=8,
normalization='l1', sigmas=None, ring_radii=None, verbose=False):
r"""
Extracts Daisy features from the input image. The output image has ``N * C``
number of channels, where ``N`` is the number of channels of the original
image and ``C`` is the feature channels determined by the input options.
Specifically, ``C = (rings * histograms + 1) * orientations``.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
step : `int`, optional
The sampling step that defines the density of the output image.
radius : `int`, optional
The radius (in pixels) of the outermost ring.
rings : `int`, optional
The number of rings to be used.
histograms : `int`, optional
The number of histograms sampled per ring.
orientations : `int`, optional
The number of orientations (bins) per histogram.
normalization : [ 'l1', 'l2', 'daisy', None ], optional
It defines how to normalize the descriptors
If 'l1' then L1-normalization is applied at each descriptor.
If 'l2' then L2-normalization is applied at each descriptor.
If 'daisy' then L2-normalization is applied at individual histograms.
If None then no normalization is employed.
sigmas : `list` of `float` or ``None``, optional
Standard deviation of spatial Gaussian smoothing for the centre
histogram and for each ring of histograms. The `list` of sigmas should
be sorted from the centre and out. I.e. the first sigma value defines
the spatial smoothing of the centre histogram and the last sigma value
defines the spatial smoothing of the outermost ring. Specifying sigmas
overrides the `rings` parameter by setting ``rings = len(sigmas) - 1``.
ring_radii : `list` of `float` or ``None``, optional
Radius (in pixels) for each ring. Specifying `ring_radii` overrides the
`rings` and `radius` parameters by setting ``rings = len(ring_radii)``
and ``radius = ring_radii[-1]``.
If both sigmas and ring_radii are given, they must satisfy ::
len(ring_radii) == len(sigmas) + 1
since no radius is needed for the centre histogram.
verbose : `bool`
Flag to print Daisy related information.
Returns
-------
daisy : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is
``C = (rings * histograms + 1) * orientations``.
Raises
------
ValueError
len(sigmas)-1 != len(ring_radii)
ValueError
Invalid normalization method.
References
----------
.. [1] E. Tola, V. Lepetit and P. Fua, "Daisy: An efficient dense descriptor
applied to wide-baseline stereo", IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 32, num. 5, p. 815-830, 2010.
"""
from menpo.external.skimage._daisy import _daisy
# Parse options
if sigmas is not None and ring_radii is not None \
and len(sigmas) - 1 != len(ring_radii):
raise ValueError('`len(sigmas)-1 != len(ring_radii)`')
if ring_radii is not None:
rings = len(ring_radii)
radius = ring_radii[-1]
if sigmas is not None:
rings = len(sigmas) - 1
if sigmas is None:
sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
if ring_radii is None:
ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
if normalization is None:
normalization = 'off'
if normalization not in ['l1', 'l2', 'daisy', 'off']:
raise ValueError('Invalid normalization method.')
# Compute daisy features
daisy_descriptor = _daisy(pixels, step=step, radius=radius, rings=rings,
histograms=histograms, orientations=orientations,
normalization=normalization, sigmas=sigmas,
ring_radii=ring_radii)
# print information
if verbose:
info_str = "Daisy Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], pixels.shape[0])
info_str = "{} - Sampling step is {}.\n".format(info_str, step)
info_str = "{} - Radius of {} pixels, {} rings and {} histograms " \
"with {} orientations.\n".format(
info_str, radius, rings, histograms, orientations)
if not normalization == 'off':
info_str = "{} - Using {} normalization.\n".format(info_str,
normalization)
else:
info_str = "{} - No normalization emplyed.\n".format(info_str)
info_str = "{}Output image size {}W x {}H x {}.".format(
info_str, daisy_descriptor.shape[2], daisy_descriptor.shape[1],
daisy_descriptor.shape[0])
print(info_str)
return daisy_descriptor
# TODO: Needs fixing ...
@winitfeature
def lbp(pixels, radius=None, samples=None, mapping_type='riu2',
window_step_vertical=1, window_step_horizontal=1,
window_step_unit='pixels', padding=True, verbose=False,
skip_checks=False):
r"""
Extracts Local Binary Pattern (LBP) features from the input image. The
output image has ``N * C`` number of channels, where ``N`` is the number of
channels of the original image and ``C`` is the number of radius/samples
values combinations that are used in the LBP computation.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
radius : `int` or `list` of `int` or ``None``, optional
It defines the radius of the circle (or circles) at which the sampling
points will be extracted. The radius (or radii) values must be greater
than zero. There must be a radius value for each samples value, thus
they both need to have the same length. If ``None``, then
``[1, 2, 3, 4]`` is used.
samples : `int` or `list` of `int` or ``None``, optional
It defines the number of sampling points that will be extracted at each
circle. The samples value (or values) must be greater than zero. There
must be a samples value for each radius value, thus they both need to
have the same length. If ``None``, then ``[8, 8, 8, 8]`` is used.
mapping_type : {``u2``, ``ri``, ``riu2``, ``none``}, optional
It defines the mapping type of the LBP codes. Select ``u2`` for
uniform-2 mapping, ``ri`` for rotation-invariant mapping, ``riu2`` for
uniform-2 and rotation-invariant mapping and ``none`` to use no mapping
and only the decimal values instead.
window_step_vertical : `float`, optional
Defines the vertical step by which the window is moved, thus it controls
the features density. The metric unit is defined by `window_step_unit`.
window_step_horizontal : `float`, optional
Defines the horizontal step by which the window is moved, thus it
controls the features density. The metric unit is defined by
`window_step_unit`.
window_step_unit : {``pixels``, ``window``}, optional
Defines the metric unit of the `window_step_vertical` and
`window_step_horizontal` parameters.
padding : `bool`, optional
If ``True``, the output image is padded with zeros to match the input
image's size.
verbose : `bool`, optional
Flag to print LBP related information.
skip_checks : `bool`, optional
If ``True``, do not perform any validation of the parameters.
Returns
-------
lbp : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is
``C = len(radius) * len(samples)``.
Raises
------
ValueError
Radius and samples must both be either integers or lists
ValueError
Radius and samples must have the same length
ValueError
Radius must be > 0
ValueError
Radii must be > 0
ValueError
Samples must be > 0
ValueError
Mapping type must be u2, ri, riu2 or none
ValueError
Horizontal window step must be > 0
ValueError
Vertical window step must be > 0
ValueError
Window step unit must be either pixels or window
References
----------
.. [1] T. Ojala, M. Pietikainen, and T. Maenpaa, "Multiresolution gray-scale
and rotation invariant texture classification with local binary
patterns", IEEE Transactions on Pattern Analysis and Machine
Intelligence, vol. 24, num. 7, p. 971-987, 2002.
"""
if radius is None:
radius = range(1, 5)
if samples is None:
samples = [8]*4
# TODO: This is a temporal fix
# flip axis
pixels = np.rollaxis(pixels, 0, len(pixels.shape))
if not skip_checks:
# Check parameters
if ((isinstance(radius, int) and isinstance(samples, list)) or
(isinstance(radius, list) and isinstance(samples, int))):
raise ValueError("Radius and samples must both be either integers "
"or lists")
elif isinstance(radius, list) and isinstance(samples, list):
if len(radius) != len(samples):
raise ValueError("Radius and samples must have the same "
"length")
if isinstance(radius, int) and radius < 1:
raise ValueError("Radius must be > 0")
elif isinstance(radius, list) and sum(r < 1 for r in radius) > 0:
raise ValueError("Radii must be > 0")
if isinstance(samples, int) and samples < 1:
raise ValueError("Samples must be > 0")
elif isinstance(samples, list) and sum(s < 1 for s in samples) > 0:
raise ValueError("Samples must be > 0")
if mapping_type not in ['u2', 'ri', 'riu2', 'none']:
raise ValueError("Mapping type must be u2, ri, riu2 or "
"none")
if window_step_horizontal <= 0:
raise ValueError("Horizontal window step must be > 0")
if window_step_vertical <= 0:
raise ValueError("Vertical window step must be > 0")
if window_step_unit not in ['pixels', 'window']:
raise ValueError("Window step unit must be either pixels or "
"window")
# Correct input image_data
pixels = np.asfortranarray(pixels)
# Parse options
radius = np.asfortranarray(radius)
samples = np.asfortranarray(samples)
window_height = np.uint32(2 * radius.max() + 1)
window_width = window_height
if window_step_unit == 'window':
window_step_vertical = np.uint32(window_step_vertical * window_height)
window_step_horizontal = np.uint32(window_step_horizontal *
window_width)
if mapping_type == 'u2':
mapping_type = 1
elif mapping_type == 'ri':
mapping_type = 2
elif mapping_type == 'riu2':
mapping_type = 3
else:
mapping_type = 0
# Create iterator object
iterator = WindowIterator(pixels, window_height, window_width,
window_step_horizontal, window_step_vertical,
padding)
# Print iterator's info
if verbose:
print(iterator)
# Compute LBP
lbp_descriptor = iterator.LBP(radius, samples, mapping_type, verbose)
# TODO: This is a temporary fix
# flip axis
lbp_descriptor = WindowIteratorResult(
np.ascontiguousarray(np.rollaxis(lbp_descriptor.pixels, -1)),
lbp_descriptor.centres)
return lbp_descriptor
@imgfeature
def normalize(img, scale_func=None, mode='all',
error_on_divide_by_zero=True):
r"""
Normalize the pixel values via mean centering and an optional scaling. By
default the scaling will be ``1.0``. The ``mode`` parameter selects
whether the normalisation is computed across all pixels in the image or
per-channel.
Parameters
----------
img : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
scale_func : `callable`, optional
Compute the scaling factor. Expects a single parameter and an optional
`axis` keyword argument and will be passed the entire pixel array.
Should return a 1D numpy array of one or more values.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
if scale_func is None:
def scale_func(_, axis=None):
return np.array([1.0])
pixels = img.as_vector(keep_channels=True)
if mode == 'all':
centered_pixels = pixels - np.mean(pixels)
scale_factor = scale_func(centered_pixels)
elif mode == 'per_channel':
centered_pixels = pixels - np.mean(pixels, axis=1, keepdims=1)
scale_factor = scale_func(centered_pixels, axis=1).reshape([-1, 1])
else:
raise ValueError("Supported modes are {{'all', 'per_channel'}} - '{}' "
"is not known".format(mode))
zero_denom = (scale_factor == 0).ravel()
any_non_zero = np.any(zero_denom)
if error_on_divide_by_zero and any_non_zero:
raise ValueError("Computed scale factor cannot be 0.0")
elif any_non_zero:
warnings.warn('One or more the scale factors are 0.0 and thus these'
'entries will be skipped during normalization.')
non_zero_denom = ~zero_denom
centered_pixels[non_zero_denom] = (centered_pixels[non_zero_denom] /
scale_factor[non_zero_denom])
return img.from_vector(centered_pixels)
else:
return img.from_vector(centered_pixels / scale_factor)
@ndfeature
def normalize_norm(pixels, mode='all', error_on_divide_by_zero=True):
r"""
Normalize the pixels to be mean centred and have unit norm. The ``mode``
parameter selects whether the normalisation is computed across all pixels in
the image or per-channel.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
def unit_norm(x, axis=None):
return np.linalg.norm(x, axis=axis)
return normalize(pixels, scale_func=unit_norm, mode=mode,
error_on_divide_by_zero=error_on_divide_by_zero)
@ndfeature
def normalize_std(pixels, mode='all', error_on_divide_by_zero=True):
r"""
Normalize the pixels to be mean centred and have unit standard deviation.
The ``mode`` parameter selects whether the normalisation is computed across
all pixels in the image or per-channel.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
def unit_std(x, axis=None):
return np.std(x, axis=axis)
return normalize(pixels, scale_func=unit_std, mode=mode,
error_on_divide_by_zero=error_on_divide_by_zero)
@ndfeature
def normalize_var(pixels, mode='all', error_on_divide_by_zero=True):
r"""
Normalize the pixels to be mean centred and normalize according
to the variance.
The ``mode`` parameter selects whether the normalisation is computed across
all pixels in the image or per-channel.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
def unit_var(x, axis=None):
return np.var(x, axis=axis)
return normalize(pixels, scale_func=unit_var, mode=mode,
error_on_divide_by_zero=error_on_divide_by_zero)
@ndfeature
def no_op(pixels):
r"""
A no operation feature - does nothing but return a copy of the pixels
passed in.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A copy of the image that was passed in.
"""
return pixels.copy()
def features_selection_widget():
r"""
Widget that allows for easy selection of a features function and its
options. It also has a 'preview' tab for visual inspection. It returns a
`list` of length 1 with the selected features function closure.
Returns
-------
features_function : `list` of length ``1``
The function closure of the features function using `functools.partial`.
So the function can be called as: ::
features_image = features_function[0](image)
Examples
--------
The widget can be invoked as ::
from menpo.feature import features_selection_widget
features_fun = features_selection_widget()
And the returned function can be used as ::
import menpo.io as mio
image = mio.import_builtin_asset.lenna_png()
features_image = features_fun[0](image)
"""
from menpowidgets import features_selection
return features_selection()
|
yuxiang-zhou/menpo
|
menpo/feature/features.py
|
Python
|
bsd-3-clause
| 41,854
|
# %load ../../src/log_utils.py
# %%writefile ../../src/log_utils.py
"""
Author: Jim Clauwaert
Created in the scope of my PhD
"""
import json
import datetime as dt
def LogInit(function, model, parameters, localarg):
dataLabel=''
pw = ''
if localarg['pw'] is True:
pw = 'pw'
if localarg['dataLabel'] is not None:
dataLabel = localarg['dataLabel']
time = dt.datetime.now().strftime('%Y-%m-%d_%H-%M')
parString = ''.join([num for num in parameters])
LOGFILENAME = '{}_{}_{}-{}-{}_{}_-{}_{}'.format(time ,function, model[0].upper(),
model[1],model[2],parString,dataLabel, pw)
RESULTLOG = '../logs/result_logger/'+LOGFILENAME
MAINLOG = '../logs/log.txt'
output = '\n\nSTARTED '+LOGFILENAME + '\n\targuments: '+str(localarg)
with open(MAINLOG, 'a') as f:
f.write(output)
f.close()
print(output)
return LOGFILENAME, MAINLOG, RESULTLOG
def LogWrap(MAINLOG, RESULTLOG, results):
output=''
if type(results) is list:
for result in results:
output=output+'\n'+result
else:
output = results
with open(RESULTLOG+'.txt', 'w') as f:
f.write(output)
f.close()
outputWrap = '\n...FINISHED'
with open(MAINLOG, 'a') as f:
f.write(outputWrap)
print(outputWrap)
f.close()
|
Kleurenprinter/prompred
|
src/log_utils.py
|
Python
|
bsd-3-clause
| 1,404
|
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import unittest
from telemetry.core import discover
import gpu_test_base
def _GetGpuDir(*subdirs):
gpu_dir = os.path.dirname(os.path.dirname(__file__))
return os.path.join(gpu_dir, *subdirs)
#
# Unit tests verifying invariants of classes in GpuTestBase.
#
class NoOverridesTest(unittest.TestCase):
def testValidatorBase(self):
all_validators = discover.DiscoverClasses(
_GetGpuDir('gpu_tests'), _GetGpuDir(),
gpu_test_base.ValidatorBase,
index_by_class_name=True).values()
self.assertGreater(len(all_validators), 0)
for validator in all_validators:
self.assertEquals(gpu_test_base.ValidatorBase.ValidateAndMeasurePage,
validator.ValidateAndMeasurePage,
'Class %s should not override ValidateAndMeasurePage'
% validator.__name__)
def testPageBase(self):
all_pages = discover.DiscoverClasses(
_GetGpuDir(), _GetGpuDir(),
gpu_test_base.PageBase,
index_by_class_name=True).values()
self.assertGreater(len(all_pages), 0)
for page in all_pages:
self.assertEquals(gpu_test_base.PageBase.RunNavigateSteps,
page.RunNavigateSteps,
'Class %s should not override ValidateAndMeasurePage'
% page.__name__)
self.assertEquals(gpu_test_base.PageBase.RunPageInteractions,
page.RunPageInteractions,
'Class %s should not override RunPageInteractions'
% page.__name__)
|
lihui7115/ChromiumGStreamerBackend
|
content/test/gpu/gpu_tests/gpu_test_base_unittest.py
|
Python
|
bsd-3-clause
| 1,728
|
"""Indexer objects for computing start/end window bounds for rolling operations"""
from datetime import timedelta
from typing import (
Dict,
Optional,
Tuple,
Type,
)
import numpy as np
from pandas._libs.window.indexers import calculate_variable_window_bounds
from pandas.util._decorators import Appender
from pandas.core.dtypes.common import ensure_platform_int
from pandas.tseries.offsets import Nano
get_window_bounds_doc = """
Computes the bounds of a window.
Parameters
----------
num_values : int, default 0
number of values that will be aggregated over
window_size : int, default 0
the number of rows in a window
min_periods : int, default None
min_periods passed from the top level rolling API
center : bool, default None
center passed from the top level rolling API
closed : str, default None
closed passed from the top level rolling API
win_type : str, default None
win_type passed from the top level rolling API
Returns
-------
A tuple of ndarray[int64]s, indicating the boundaries of each
window
"""
class BaseIndexer:
"""Base class for window bounds calculations."""
def __init__(
self, index_array: Optional[np.ndarray] = None, window_size: int = 0, **kwargs
):
"""
Parameters
----------
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.index_array = index_array
self.window_size = window_size
# Set user defined kwargs as attributes that can be used in get_window_bounds
for key, value in kwargs.items():
setattr(self, key, value)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
raise NotImplementedError
class FixedWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of fixed length."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
if center:
offset = (self.window_size - 1) // 2
else:
offset = 0
end = np.arange(1 + offset, num_values + 1 + offset, dtype="int64")
start = end - self.window_size
if closed in ["left", "both"]:
start -= 1
if closed in ["left", "neither"]:
end -= 1
end = np.clip(end, 0, num_values)
start = np.clip(start, 0, num_values)
return start, end
class VariableWindowIndexer(BaseIndexer):
"""Creates window boundaries that are of variable length, namely for time series."""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
# error: Argument 4 to "calculate_variable_window_bounds" has incompatible
# type "Optional[bool]"; expected "bool"
# error: Argument 6 to "calculate_variable_window_bounds" has incompatible
# type "Optional[ndarray]"; expected "ndarray"
return calculate_variable_window_bounds(
num_values,
self.window_size,
min_periods,
center, # type: ignore[arg-type]
closed,
self.index_array, # type: ignore[arg-type]
)
class VariableOffsetWindowIndexer(BaseIndexer):
"""Calculate window boundaries based on a non-fixed offset such as a BusinessDay"""
def __init__(
self,
index_array: Optional[np.ndarray] = None,
window_size: int = 0,
index=None,
offset=None,
**kwargs,
):
super().__init__(index_array, window_size, **kwargs)
self.index = index
self.offset = offset
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
# if windows is variable, default is 'right', otherwise default is 'both'
if closed is None:
closed = "right" if self.index is not None else "both"
right_closed = closed in ["right", "both"]
left_closed = closed in ["left", "both"]
if self.index[num_values - 1] < self.index[0]:
index_growth_sign = -1
else:
index_growth_sign = 1
start = np.empty(num_values, dtype="int64")
start.fill(-1)
end = np.empty(num_values, dtype="int64")
end.fill(-1)
start[0] = 0
# right endpoint is closed
if right_closed:
end[0] = 1
# right endpoint is open
else:
end[0] = 0
# start is start of slice interval (including)
# end is end of slice interval (not including)
for i in range(1, num_values):
end_bound = self.index[i]
start_bound = self.index[i] - index_growth_sign * self.offset
# left endpoint is closed
if left_closed:
start_bound -= Nano(1)
# advance the start bound until we are
# within the constraint
start[i] = i
for j in range(start[i - 1], i):
if (self.index[j] - start_bound) * index_growth_sign > timedelta(0):
start[i] = j
break
# end bound is previous end
# or current index
if (self.index[end[i - 1]] - end_bound) * index_growth_sign <= timedelta(0):
end[i] = i + 1
else:
end[i] = end[i - 1]
# right endpoint is open
if not right_closed:
end[i] -= 1
return start, end
class ExpandingIndexer(BaseIndexer):
"""Calculate expanding window bounds, mimicking df.expanding()"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
return (
np.zeros(num_values, dtype=np.int64),
np.arange(1, num_values + 1, dtype=np.int64),
)
class FixedForwardWindowIndexer(BaseIndexer):
"""
Creates window boundaries for fixed-length windows that include the
current row.
Examples
--------
>>> df = pd.DataFrame({'B': [0, 1, 2, np.nan, 4]})
>>> df
B
0 0.0
1 1.0
2 2.0
3 NaN
4 4.0
>>> indexer = pd.api.indexers.FixedForwardWindowIndexer(window_size=2)
>>> df.rolling(window=indexer, min_periods=1).sum()
B
0 1.0
1 3.0
2 2.0
3 4.0
4 4.0
"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
if center:
raise ValueError("Forward-looking windows can't have center=True")
if closed is not None:
raise ValueError(
"Forward-looking windows don't support setting the closed argument"
)
start = np.arange(num_values, dtype="int64")
end_s = start[: -self.window_size] + self.window_size
end_e = np.full(self.window_size, num_values, dtype="int64")
end = np.concatenate([end_s, end_e])
return start, end
class GroupbyIndexer(BaseIndexer):
"""Calculate bounds to compute groupby rolling, mimicking df.groupby().rolling()"""
def __init__(
self,
index_array: Optional[np.ndarray] = None,
window_size: int = 0,
groupby_indicies: Optional[Dict] = None,
window_indexer: Type[BaseIndexer] = BaseIndexer,
indexer_kwargs: Optional[Dict] = None,
**kwargs,
):
"""
Parameters
----------
index_array : np.ndarray or None
np.ndarray of the index of the original object that we are performing
a chained groupby operation over. This index has been pre-sorted relative to
the groups
window_size : int
window size during the windowing operation
groupby_indicies : dict or None
dict of {group label: [positional index of rows belonging to the group]}
window_indexer : BaseIndexer
BaseIndexer class determining the start and end bounds of each group
indexer_kwargs : dict or None
Custom kwargs to be passed to window_indexer
**kwargs :
keyword arguments that will be available when get_window_bounds is called
"""
self.groupby_indicies = groupby_indicies or {}
self.window_indexer = window_indexer
self.indexer_kwargs = indexer_kwargs or {}
super().__init__(
index_array, self.indexer_kwargs.pop("window_size", window_size), **kwargs
)
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
# 1) For each group, get the indices that belong to the group
# 2) Use the indices to calculate the start & end bounds of the window
# 3) Append the window bounds in group order
start_arrays = []
end_arrays = []
window_indicies_start = 0
for key, indices in self.groupby_indicies.items():
index_array: np.ndarray | None
if self.index_array is not None:
index_array = self.index_array.take(ensure_platform_int(indices))
else:
index_array = self.index_array
indexer = self.window_indexer(
index_array=index_array,
window_size=self.window_size,
**self.indexer_kwargs,
)
start, end = indexer.get_window_bounds(
len(indices), min_periods, center, closed
)
start = start.astype(np.int64)
end = end.astype(np.int64)
# Cannot use groupby_indicies as they might not be monotonic with the object
# we're rolling over
window_indicies = np.arange(
window_indicies_start, window_indicies_start + len(indices)
)
window_indicies_start += len(indices)
# Extend as we'll be slicing window like [start, end)
window_indicies = np.append(
window_indicies, [window_indicies[-1] + 1]
).astype(np.int64)
start_arrays.append(window_indicies.take(ensure_platform_int(start)))
end_arrays.append(window_indicies.take(ensure_platform_int(end)))
start = np.concatenate(start_arrays)
end = np.concatenate(end_arrays)
return start, end
class ExponentialMovingWindowIndexer(BaseIndexer):
"""Calculate ewm window bounds (the entire window)"""
@Appender(get_window_bounds_doc)
def get_window_bounds(
self,
num_values: int = 0,
min_periods: Optional[int] = None,
center: Optional[bool] = None,
closed: Optional[str] = None,
) -> Tuple[np.ndarray, np.ndarray]:
return np.array([0], dtype=np.int64), np.array([num_values], dtype=np.int64)
|
datapythonista/pandas
|
pandas/core/window/indexers.py
|
Python
|
bsd-3-clause
| 12,001
|
from django.utils.translation import ugettext_lazy as _
from cms.models import CMSPlugin
from cms.plugin_base import CMSPluginBase
from cms.plugin_pool import plugin_pool
from .forms import MultiColumnForm
from .models import MultiColumns, Column
class MultiColumnPlugin(CMSPluginBase):
model = MultiColumns
module = _("Multi Columns")
name = _("Multi Columns")
render_template = "cms/plugins/multi_column.html"
allow_children = True
child_classes = ["ColumnPlugin"]
form = MultiColumnForm
def save_model(self, request, obj, form, change):
response = super(MultiColumnPlugin, self).save_model(
request, obj, form, change
)
for x in range(int(form.cleaned_data['create'])):
col = Column(
parent=obj,
placeholder=obj.placeholder,
language=obj.language,
width=form.cleaned_data['create_width'],
position=CMSPlugin.objects.filter(parent=obj).count(),
plugin_type=ColumnPlugin.__name__
)
col.save()
return response
class ColumnPlugin(CMSPluginBase):
model = Column
module = _("Multi Columns")
name = _("Column")
render_template = "cms/plugins/column.html"
parent_classes = ["MultiColumnPlugin"]
allow_children = True
plugin_pool.register_plugin(MultiColumnPlugin)
plugin_pool.register_plugin(ColumnPlugin)
|
divio/djangocms-column
|
djangocms_column/cms_plugins.py
|
Python
|
bsd-3-clause
| 1,442
|
# -*- coding: utf-8 -*-
"""
Display the battery level.
Configuration parameters:
- blocks : a string, where each character represents battery level
especially useful when using icon fonts (e.g. FontAwesome)
default is "_▁▂▃▄▅▆▇█"
- cache_timeout : a timeout to refresh the battery state
default is 30
- charging_character : a character to represent charging battery
especially useful when using icon fonts (e.g. FontAwesome)
default is "⚡"
- color_bad : a color to use when the battery level is bad
None means get it from i3status config
default is None
- color_charging : a color to use when the battery is charging
None means get it from i3status config
default is "#FCE94F"
- color_degraded : a color to use when the battery level is degraded
None means get it from i3status config
default is None
- color_good : a color to use when the battery level is good
None means get it from i3status config
default is None
- format : string that formats the output. See placeholders below.
default is "{icon}"
- hide_when_full : hide any information when battery is fully charged
default is False
- notification : show current battery state as notification on click
default is False
Format of status string placeholders:
{ascii_bar} - a string of ascii characters representing the battery level,
an alternative visualization to '{icon}' option
{icon} - a character representing the battery level,
as defined by the 'blocks' and 'charging_character' parameters
{percent} - the remaining battery percentage (previously '{}')
Obsolete configuration parameters:
- mode : an old way to define 'format' parameter. The current behavior is:
- if 'format' is specified, this parameter is completely ignored
- if the value is 'ascii_bar', the 'format' is set to "{ascii_bar}"
- if the value is 'text', the 'format' is set to "Battery: {percent}"
- all other values are ignored
- there is no default value for this parameter
- show_percent_with_blocks : an old way to define 'format' parameter:
- if 'format' is specified, this parameter is completely ignored
- if the value is True, the 'format' is set to "{icon} {percent}%"
- there is no default value for this parameter
Requires:
- the 'acpi' command line
@author shadowprince, AdamBSteele, maximbaz
@license Eclipse Public License
"""
from __future__ import division # python2 compatibility
from time import time
import math
import subprocess
BLOCKS = ["_", "▁", "▂", "▃", "▄", "▅", "▆", "▇", "█"]
CHARGING_CHARACTER = "⚡"
EMPTY_BLOCK_CHARGING = '|'
EMPTY_BLOCK_DISCHARGING = '⍀'
FULL_BLOCK = '█'
FORMAT = "{icon}"
class Py3status:
"""
"""
# available configuration parameters
blocks = BLOCKS
cache_timeout = 30
charging_character = CHARGING_CHARACTER
color_bad = None
color_charging = "#FCE94F"
color_degraded = None
color_good = None
format = FORMAT
hide_when_full = False
notification = False
# obsolete configuration parameters
mode = None
show_percent_with_blocks = None
def battery_level(self, i3s_output_list, i3s_config):
self.i3s_config = i3s_config
self.i3s_output_list = i3s_output_list
self._refresh_battery_info()
self._provide_backwards_compatibility()
self._update_icon()
self._update_ascii_bar()
self._update_full_text()
return self._build_response()
def on_click(self, i3s_output_list, i3s_config, event):
"""
Display a notification with the remaining charge time.
"""
if self.notification and self.time_remaining:
subprocess.call(
['notify-send', '{}'.format(self.time_remaining), '-t', '4000'
],
stdout=open('/dev/null', 'w'),
stderr=open('/dev/null', 'w'))
def _provide_backwards_compatibility(self):
# Backwards compatibility for 'mode' parameter
if self.format == FORMAT and self.mode == 'ascii_bar':
self.format = "{ascii_bar}"
if self.format == FORMAT and self.mode == 'text':
self.format = "Battery: {percent}"
# Backwards compatibility for 'show_percent_with_blocks' parameter
if self.format == FORMAT and self.show_percent_with_blocks:
self.format = "{icon} {percent}%"
# Backwards compatibility for '{}' option in format string
self.format = self.format.replace('{}', '{percent}')
def _refresh_battery_info(self):
# Example acpi raw output: "Battery 0: Discharging, 43%, 00:59:20 remaining"
acpi_raw = subprocess.check_output(["acpi"], stderr=subprocess.STDOUT)
acpi_unicode = acpi_raw.decode("UTF-8")
# Example list: ['Battery', '0:', 'Discharging', '43%', '00:59:20', 'remaining']
self.acpi_list = acpi_unicode.split(' ')
self.charging = self.acpi_list[2][:8] == "Charging"
self.percent_charged = int(self.acpi_list[3][:-2])
def _update_ascii_bar(self):
self.ascii_bar = FULL_BLOCK * int(self.percent_charged / 10)
if self.charging:
self.ascii_bar += EMPTY_BLOCK_CHARGING * (
10 - int(self.percent_charged / 10))
else:
self.ascii_bar += EMPTY_BLOCK_DISCHARGING * (
10 - int(self.percent_charged / 10))
def _update_icon(self):
if self.charging:
self.icon = self.charging_character
else:
self.icon = self.blocks[int(math.ceil(self.percent_charged / 100 *
(len(self.blocks) - 1)))]
def _update_full_text(self):
self.full_text = self.format.format(ascii_bar=self.ascii_bar,
icon=self.icon,
percent=self.percent_charged)
def _build_response(self):
self.response = {}
self._set_bar_text()
self._set_bar_color()
self._set_cache_timeout()
return self.response
def _set_bar_text(self):
if self.percent_charged == 100 and self.hide_when_full:
self.response['full_text'] = ''
else:
self.response['full_text'] = self.full_text
def _set_bar_color(self):
if self.charging:
self.response['color'] = self.color_charging
elif self.percent_charged < 10:
self.response['color'
] = self.color_bad or self.i3s_config['color_bad']
elif self.percent_charged < 30:
self.response['color'] = self.color_degraded or self.i3s_config[
'color_degraded'
]
elif self.percent_charged == 100:
self.response['color'
] = self.color_good or self.i3s_config['color_good']
def _set_cache_timeout(self):
self.response['cached_until'] = time() + self.cache_timeout
if __name__ == "__main__":
from time import sleep
x = Py3status()
config = {
'color_bad': '#FF0000',
'color_degraded': '#FFFF00',
'color_good': '#00FF00',
}
while True:
print(x.battery_level([], config))
sleep(1)
|
hburg1234/py3status
|
py3status/modules/battery_level.py
|
Python
|
bsd-3-clause
| 7,421
|
from kraken.core.maths import *
from kraken.core.maths.xfo import Xfo, xfoFromDirAndUpV, aimAt
from kraken.core.maths.rotation_order import RotationOrder
from kraken.core.maths.constants import *
from kraken.core.objects.components.base_example_component import BaseExampleComponent
from kraken.core.objects.attributes.attribute_group import AttributeGroup
from kraken.core.objects.attributes.scalar_attribute import ScalarAttribute
from kraken.core.objects.attributes.bool_attribute import BoolAttribute
from kraken.core.objects.attributes.string_attribute import StringAttribute
from kraken.core.objects.attributes.integer_attribute import IntegerAttribute
from kraken.core.objects.constraints.pose_constraint import PoseConstraint
from kraken.core.objects.component_group import ComponentGroup
from kraken.core.objects.hierarchy_group import HierarchyGroup
from kraken.core.objects.transform import Transform
from kraken.core.objects.joint import Joint
from kraken.core.objects.space import Space
from kraken.core.objects.control import Control
from kraken.core.objects.locator import Locator
from kraken.core.objects.operators.kl_operator import KLOperator
from kraken.core.profiler import Profiler
from kraken.helpers.utility_methods import logHierarchy
from OSS.OSS_control import *
from OSS.OSS_component import OSS_Component
COMPONENT_NAME = "limb"
# Sweet Sweet
class OSSLimbComponent(OSS_Component):
"""Limb Component"""
def __init__(self, name=COMPONENT_NAME, parent=None):
super(OSSLimbComponent, self).__init__(name, parent)
# ===========
# Declare IO
# ===========
# Declare Inputs Xfos
# If the useOtherIKGoalInput is True, this will be actual ik goal as offset by another component like the foot
self.ikgoal_cmpIn = None
# Declare Output Xfos
self.uplimb_cmpOut = self.createOutput('uplimb', dataType='Xfo', parent=self.outputHrcGrp).getTarget()
self.lolimb_cmpOut = self.createOutput('lolimb', dataType='Xfo', parent=self.outputHrcGrp).getTarget()
self.endlimb_cmpOut = self.createOutput('endlimb', dataType='Xfo', parent=self.outputHrcGrp).getTarget()
self.endTwistParent_cmpIn = self.createInput('endTwistParent', dataType='Xfo', parent=self.inputHrcGrp).getTarget()
# Declare Input Attrs
# Declare Output Attrs
self.drawDebugOutputAttr = self.createOutput('drawDebug', dataType='Boolean', value=False, parent=self.cmpOutputAttrGrp).getTarget()
class OSSLimbComponentGuide(OSSLimbComponent):
"""Limb Component Guide"""
def __init__(self, name=COMPONENT_NAME, parent=None):
Profiler.getInstance().push("Construct Limb Guide Component:" + name)
super(OSSLimbComponentGuide, self).__init__(name, parent)
# =========
# Controls
# ========
# Guide Settings
self.untwistUplimb = BoolAttribute('untwistUplimb', value=False, parent=self.guideSettingsAttrGrp)
self.addPartialJoints = BoolAttribute('addPartialJoints', value=False, parent=self.guideSettingsAttrGrp)
self.addMidControlsInput = BoolAttribute('addMidControls', value=True, parent=self.guideSettingsAttrGrp)
self.useOtherIKGoalInput = BoolAttribute('useOtherIKGoal', value=True, parent=self.guideSettingsAttrGrp)
self.uplimbName = StringAttribute('uplimbName', value="uplimb", parent=self.guideSettingsAttrGrp)
self.lolimbName = StringAttribute('lolimbName', value="lolimb", parent=self.guideSettingsAttrGrp)
self.ikHandleName = StringAttribute('ikHandleName', value="limbIK", parent=self.guideSettingsAttrGrp)
self.FKIKComponent = StringAttribute('FKIKComponent', value="limb", parent=self.guideSettingsAttrGrp)
self.addTwistJoints = BoolAttribute('addTwistJoints', value=False, parent=self.guideSettingsAttrGrp)
self.uplimbNumTwistJoints = IntegerAttribute('uplimbNumTwistJoints', value=5, minValue=2, maxValue=20, parent=self.guideSettingsAttrGrp)
self.lolimbNumTwistJoints = IntegerAttribute('lolimbNumTwistJoints', value=5, minValue=2, maxValue=20, parent=self.guideSettingsAttrGrp)
# Guide Controls
self.uplimbCtrl = Control('uplimb', parent=self.ctrlCmpGrp, shape="sphere")
self.lolimbCtrl = Control('lolimb', parent=self.ctrlCmpGrp, shape="sphere")
self.handleCtrl = Control('handle', parent=self.ctrlCmpGrp, shape="jack")
self.useOtherIKGoalInput.setValueChangeCallback(self.updateUseOtherIKGoal)
data = {
"name": name,
"location": "L",
"uplimbXfo": Xfo(Vec3(0.9811, 9.769, -0.4572)),
"lolimbXfo": Xfo(Vec3(1.4488, 5.4418, -0.5348)),
"handleXfo": Xfo(Vec3(1.85, 1.2, -1.2)),
}
self.loadData(data)
Profiler.getInstance().pop()
# =============
# Data Methods
# =============
def saveData(self):
"""Save the data for the component to be persisted.
Return:
The JSON data object
"""
data = super(OSSLimbComponentGuide, self).saveData()
data['uplimbXfo'] = self.uplimbCtrl.xfo
data['lolimbXfo'] = self.lolimbCtrl.xfo
data['handleXfo'] = self.handleCtrl.xfo
data['globalComponentCtrlSize'] = self.globalComponentCtrlSizeInputAttr.getValue()
return data
def loadData(self, data):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
#Reset all shapes, but really we should just recreate all controls from loadData instead of init
for ctrl in self.getHierarchyNodes(classType="Control"):
ctrl.setShape(ctrl.getShape())
#Grab the guide settings in case we want to use them here (and are not stored in data arg)
existing_data = self.saveData()
existing_data.update(data)
data = existing_data
super(OSSLimbComponentGuide, self).loadData(data)
if "uplimbXfo" in data.keys():
self.uplimbCtrl.xfo = data['uplimbXfo']
if "lolimbXfo" in data.keys():
self.lolimbCtrl.xfo = data['lolimbXfo']
if "handleXfo" in data.keys():
self.handleCtrl.xfo = data['handleXfo']
globalScale = data['globalComponentCtrlSize']
self.globalScaleVec =Vec3(globalScale, globalScale, globalScale)
self.uplimbCtrl.scalePoints(self.globalScaleVec)
self.lolimbCtrl.scalePoints(self.globalScaleVec)
self.handleCtrl.scalePoints(self.globalScaleVec)
# maybe roll this into a createControls() function like in the Rig object?
if "uplimbName" in data.keys():
self.uplimbCtrl.setName(data['uplimbName'])
if "lolimbName" in data.keys():
self.lolimbCtrl.setName(data['lolimbName'])
if "ikHandleName" in data.keys():
self.handleCtrl.setName(data['ikHandleName'])
if "useOtherIKGoalInput" in data.keys():
self.updateUseOtherIKGoal(bool(data["useOtherIKGoalInput"]))
return True
def updateUseOtherIKGoal(self, useOtherIKGoal):
""" Callback to changing the component setting 'useOtherIKGoalInput' """
if useOtherIKGoal:
if self.ikgoal_cmpIn is None:
self.ikgoal_cmpIn = self.createInput('ikGoalInput', dataType='Xfo', parent=self.inputHrcGrp).getTarget()
self.ikBlendAttr = self.createInput('ikBlend', dataType='Float', parent=self.cmpInputAttrGrp)
self.softIKAttr = self.createInput('softIK', dataType='Float', parent=self.cmpInputAttrGrp)
self.squashhAttr = self.createInput('squash', dataType='Float', parent=self.cmpInputAttrGrp)
self.stretchAttr = self.createInput('stretch', dataType='Float', parent=self.cmpInputAttrGrp)
else:
if self.ikgoal_cmpIn is not None:
# self.deleteInput('ikGoalInput', parent=self.inputHrcGrp)
# self.deleteInput('ikBlend', parent=self.cmpInputAttrGrp)
# self.deleteInput('softIK', parent=self.cmpInputAttrGrp)
# self.deleteInput('stretch', parent=self.cmpInputAttrGrp)
self.ikgoal_cmpIn = None
def getRigBuildData(self):
"""Returns the Guide data used by the Rig Component to define the layout of the final rig..
Return:
The JSON rig data object.
"""
data = super(OSSLimbComponentGuide, self).getRigBuildData()
self.boneAxisStr = "POSX"
if self.getLocation() == 'R':
self.boneAxisStr = "NEGX"
self.boneAxis = AXIS_NAME_TO_TUPLE_MAP[self.boneAxisStr]
self.upAxisStr = "POSZ"
if self.getLocation() == 'R':
self.upAxisStr = "NEGZ"
self.upAxis = AXIS_NAME_TO_TUPLE_MAP[self.upAxisStr]
# Values
uplimbPosition = self.uplimbCtrl.xfo.tr
lolimbPosition = self.lolimbCtrl.xfo.tr
handlePosition = self.handleCtrl.xfo.tr
# Calculate uplimb Xfo
uplimbXfo = Xfo(self.uplimbCtrl.xfo)
# self.upAxis neg Y assumes the lolimb is bent forward. To avoid this stuff, build a guide system with an actual upVector
# to get rid of any ambiguity
#aimAt(uplimbXfo, self.lolimbCtrl.xfo.tr, upPos=self.handleCtrl.xfo.tr, aimAxis=self.boneAxis, upAxis=self.upAxis.negate())
aimAt(uplimbXfo, aimPos=self.lolimbCtrl.xfo.tr, upPos=self.handleCtrl.xfo.tr, aimAxis=self.boneAxis, upAxis=tuple([-x for x in self.upAxis]))
# Calculate lolimb Xfo
lolimbXfo = Xfo(self.lolimbCtrl.xfo)
# self.upAxis neg Y assumes the lolimb is bent forward. To avoid this stuff, build a guide system with an actual upVector
# to get rid of any ambiguity
#aimAt(lolimbXfo, self.toeCtrl.xfo.tr, upPos=self.uplimbCtrl.xfo.tr, aimAxis=self.boneAxis, upAxis=self.upAxis.negate())
aimAt(lolimbXfo, aimPos=self.handleCtrl.xfo.tr, upPos=self.uplimbCtrl.xfo.tr, aimAxis=self.boneAxis, upAxis=tuple([-x for x in self.upAxis]))
# Get lengths
uplimbLen = uplimbPosition.subtract(lolimbPosition).length()
lolimbLen = lolimbPosition.subtract(handlePosition).length()
handleXfo = self.handleCtrl.xfo
upVXfo = Xfo()
offset = [x * uplimbLen * 2 for x in self.upAxis]
upVXfo.tr = lolimbXfo.transformVector(Vec3(offset[0], offset[1], offset[2]))
data['uplimbXfo'] = uplimbXfo
data['lolimbXfo'] = lolimbXfo
data['handleXfo'] = handleXfo
data['upVXfo'] = upVXfo
data['uplimbLen'] = uplimbLen
data['lolimbLen'] = lolimbLen
return data
# ==============
# Class Methods
# ==============
@classmethod
def getComponentType(cls):
"""Enables introspection of the class prior to construction to determine if it is a guide component.
Return:
The true if this component is a guide component.
"""
return 'Guide'
@classmethod
def getRigComponentClass(cls):
"""Returns the corresponding rig component class for this guide component class
Return:
The rig component class.
"""
return OSSLimbComponentRig
class OSSLimbComponentRig(OSSLimbComponent):
"""Limb Component"""
def __init__(self, name=COMPONENT_NAME, parent=None):
Profiler.getInstance().push("Construct Limb Rig Component:" + name)
super(OSSLimbComponentRig, self).__init__(name, parent)
def createControls(self, data):
name = data["name"]
self.uplimbName = data['uplimbName']
self.lolimbName = data['lolimbName']
self.ikHandleName = data['ikHandleName']
self.useOtherIKGoal = bool(data['useOtherIKGoal'])
self.tagNames = data.get('tagNames', "").strip()
globalScale = data['globalComponentCtrlSize']
self.globalScaleVec =Vec3(globalScale, globalScale, globalScale)
self.untwistUplimb = bool(data['untwistUplimb']) #This should be a simple method instead
self.addPartialJoints = bool(data['addPartialJoints']) #This should be a simple method instead
self.addTwistJoints = bool(data['addTwistJoints']) #This should be a simple method instead
self.addMidControls = bool(data['addMidControls']) #This should be a simple method instead
self.addTwistJointsORaddMidControls = bool(data['addTwistJoints']) or bool(data['addMidControls'])
# =========
# Controls
# =========
# World and Parent Space for Aligns (add more if needed)
self.uplimbWorldSpace = Space(self.uplimbName + 'WorldSpace', parent=self.ctrlCmpGrp)
self.uplimbParentSpace = Space(self.uplimbName + 'ParentSpace', parent=self.ctrlCmpGrp)
self.uplimbWorldSpace.xfo = data['uplimbXfo']
self.uplimbParentSpace.xfo = data['uplimbXfo']
# uplimb
self.uplimbFKSpace = Space(self.uplimbName, parent=self.ctrlCmpGrp)
self.uplimbFKCtrl = FKControl(self.uplimbName, parent=self.uplimbFKSpace, shape="cube")
self.uplimbFKCtrl.xfo = data['uplimbXfo']
self.uplimbFKSpace.xfo = data['uplimbXfo']
self.uplimbFKCtrl.ro = RotationOrder(ROT_ORDER_STR_TO_INT_MAP["XYZ"]) #Set with component settings later
#adding FK IK matching Attributes
self.upLimbAttrGrp = AttributeGroup('loLimbAttrGrp', parent=self.uplimbFKCtrl)
BoolAttribute('isFKRoot', value=True, parent=self.upLimbAttrGrp)
StringAttribute('FKIKComponent', value=data['FKIKComponent'], parent=self.upLimbAttrGrp)
StringAttribute('match_FK_target', value='1', parent=self.upLimbAttrGrp)
StringAttribute('match_IK_source', value='0', parent=self.upLimbAttrGrp)
if self.untwistUplimb:
# We should be able to insert a space to any kind of 3D object, not just controls
self.uplimbUntwistBase = Space(name=self.uplimbName+"UntwistBase", parent=self.uplimbParentSpace)
self.uplimbUntwistBase.xfo = data['uplimbXfo']
# lolimb
self.lolimbFKSpace = Space(self.lolimbName, parent=self.uplimbFKCtrl)
self.lolimbFKCtrl = FKControl(self.lolimbName, parent=self.lolimbFKSpace)
self.lolimbFKSpace.xfo = data['lolimbXfo']
self.lolimbFKCtrl.xfo = data['lolimbXfo']
self.lolimbFKCtrl.ro = RotationOrder(ROT_ORDER_STR_TO_INT_MAP["XYZ"]) #Set with component settings later
#adding FK IK matching Attributes
self.loLimbAttrGrp = AttributeGroup('loLimbAttrGrp', parent=self.lolimbFKCtrl)
StringAttribute('FKIKComponent', value=data['FKIKComponent'], parent=self.loLimbAttrGrp)
StringAttribute('match_FK_target', value='1', parent=self.loLimbAttrGrp)
StringAttribute('match_IK_source', value='1', parent=self.loLimbAttrGrp)
BoolAttribute('isFKUpVec', value=True, parent=self.loLimbAttrGrp)
# lolimbIK
self.lolimbIKCtrl = IKControl(self.lolimbName, parent=self.ctrlCmpGrp, shape="circle", scale=globalScale*0.8)
self.lolimbIKSpace = self.lolimbIKCtrl.insertSpace(name=self.lolimbName+"_ik")
self.lolimbIKSpace.xfo = data['lolimbXfo']
self.lolimbIKCtrl.xfo = data['lolimbXfo']
self.lolimbIKCtrl.ro = RotationOrder(ROT_ORDER_STR_TO_INT_MAP["XZY"]) #Set with component settings later
self.lolimbIKCtrl.lockRotation(x=True, y=True, z=True)
self.lolimbIKCtrl.lockScale(x=True, y=True, z=True)
self.lolimbIKCtrl.rotatePoints(0, 0, 90)
# MidCtrls (Bend/Bow) Creation - may need to make this an option
# uplimbMid
if self.addMidControls:
self.uplimbMidSpace = Space(self.uplimbName+'Mid', parent=self.ctrlCmpGrp)
self.uplimbMidCtrl = FKControl(self.uplimbName+'Mid', parent=self.uplimbMidSpace, shape="circle", scale=globalScale*1.0)
self.lolimbMidSpace = Space(self.lolimbName+'Mid', parent=self.ctrlCmpGrp)
self.lolimbMidCtrl = FKControl(self.lolimbName+'Mid', parent=self.lolimbMidSpace, shape="circle", scale=globalScale*0.8)
for ctrl in [self.uplimbMidCtrl, self.uplimbMidSpace]:
ctrl.xfo = data['uplimbXfo']
ctrl.xfo.tr = data['uplimbXfo'].tr.linearInterpolate(data['lolimbXfo'].tr, 0.5)
# lolimbMid
for ctrl in [self.lolimbMidCtrl, self.lolimbMidSpace]:
ctrl.xfo = data['lolimbXfo']
ctrl.xfo.tr = data['lolimbXfo'].tr.linearInterpolate(data['handleXfo'].tr, 0.5)
for ctrl in [self.uplimbMidCtrl, self.lolimbMidCtrl]:
ctrl.lockRotation(x=True, y=True, z=True)
ctrl.lockScale(x=True, y=True, z=True)
ctrl.rotatePoints(0, 0, 90)
box = False
if box:
for ctrl in [self.uplimbFKCtrl, self.lolimbFKCtrl]:
ctrl.setShape("cube")
ctrl.alignOnXAxis()
self.uplimbFKCtrl.scalePointsOnAxis(data['uplimbLen'], self.boneAxisStr)
self.lolimbFKCtrl.scalePointsOnAxis(data['lolimbLen'], self.boneAxisStr)
else:
self.uplimbFKCtrl.setShape("square")
self.lolimbFKCtrl.setShape("squarePointed")
for ctrl in [self.uplimbFKCtrl, self.lolimbFKCtrl]:
if self.getLocation() == 'R':
ctrl.scalePoints(Vec3(-1,-1,-1))
ctrl.rotatePoints(0,90,90)
self.limbIKSpace = Space(self.ikHandleName, parent=self.ctrlCmpGrp)
self.limbIKSpace.xfo = data['handleXfo']
# hand
if self.useOtherIKGoal: #Do not use this as a control, hide it
self.limbIKCtrl = Transform(self.ikHandleName, parent=self.limbIKSpace)
else:
self.limbIKCtrl = IKControl(self.ikHandleName, parent=self.limbIKSpace, shape="jack")
self.limbIKCtrl.xfo = data['handleXfo']
# Add Component Params to IK control
limbSettingsAttrGrp = AttributeGroup("DisplayInfo_LimbSettings", parent=self.limbIKCtrl)
if self.useOtherIKGoal:
self.ikgoal_cmpIn = self.createInput('ikGoalInput', dataType='Xfo', parent=self.inputHrcGrp).getTarget()
self.limbIKCtrl.constrainTo(self.ikgoal_cmpIn, maintainOffset=True)
self.ikBlendAttr = self.createInput('ikBlend', dataType='Float', value=0.0, minValue=0.0, maxValue=1.0, parent=self.cmpInputAttrGrp).getTarget()
self.softIKAttr = self.createInput('softIK', dataType='Float', value=0.0, minValue=0.0, parent=self.cmpInputAttrGrp).getTarget()
self.squashAttr = self.createInput('squash', dataType='Float', value=0.0, minValue=0.0, maxValue=1.0, parent=self.cmpInputAttrGrp).getTarget()
self.stretchAttr = self.createInput('stretch', dataType='Float', value=1.0, minValue=0.0, maxValue=1.0, parent=self.cmpInputAttrGrp).getTarget()
else:
self.ikgoal_cmpIn = None
self.ikBlendAttr = ScalarAttribute('ikBlend', value=0.0, minValue=0.0, maxValue=1.0, parent=limbSettingsAttrGrp)
self.softIKAttr = ScalarAttribute('softIK', value=0.0, minValue=0.0, parent=limbSettingsAttrGrp)
self.squashAttr = ScalarAttribute('squash', value=0.0, minValue=0.0, maxValue=1.0, parent=limbSettingsAttrGrp)
self.stretchAttr = ScalarAttribute('stretch', value=1.0, minValue=0.0, maxValue=1.0, parent=limbSettingsAttrGrp)
self.limbBone0LenInputAttr = ScalarAttribute('bone0Len', value=1.0, parent=limbSettingsAttrGrp)
self.limbBone1LenInputAttr = ScalarAttribute('bone1Len', value=1.0, parent=limbSettingsAttrGrp)
self.limbDrawDebugAttr = BoolAttribute('drawDebug', value=False, parent=limbSettingsAttrGrp)
self.drawDebugInputAttr.connect(self.limbDrawDebugAttr)
# UpV
self.limbUpVCtrl = Control(name+'UpV', parent=self.ctrlCmpGrp, shape="triangle")
self.limbUpVCtrl.xfo = data['upVXfo']
self.limbUpVCtrl.alignOnZAxis()
self.limbUpVSpace = self.limbUpVCtrl.insertSpace()
#adding FK IK matching Attributes
self.limbUpVAttrGrp = AttributeGroup('limbUpAttrGrp', parent=self.limbUpVCtrl)
StringAttribute('FKIKComponent', value=data['FKIKComponent'], parent=self.limbUpVAttrGrp)
BoolAttribute('isUpVec', value=True, parent=self.limbUpVAttrGrp)
self.limbUpVCtrlIKSpace = Space(name+'UpVIK', parent=self.ctrlCmpGrp)
self.limbUpVCtrlIKSpace.xfo = data['upVXfo']
if self.useOtherIKGoal:
self.limbUpVCtrlIKSpaceConstraint = self.limbUpVCtrlIKSpace.constrainTo(self.ikgoal_cmpIn, maintainOffset=True)
else:
self.limbUpVCtrlIKSpaceConstraint = self.limbUpVCtrlIKSpace.constrainTo(self.limbIKCtrl, maintainOffset=True)
self.limbUpVCtrlMasterSpace = Space(name+'IKMaster', parent=self.ctrlCmpGrp)
self.limbUpVCtrlMasterSpace.xfo = data['upVXfo']
self.limbUpVCtrlMasterSpaceConstraint = self.limbUpVCtrlMasterSpace.constrainTo(self.globalSRTInputTgt, maintainOffset=True)
upVAttrGrp = AttributeGroup("UpVAttrs", parent=self.limbUpVCtrl)
upVSpaceBlendInputAttr = ScalarAttribute(self.ikHandleName+'Space', value=0.0, minValue=0.0, maxValue=1.0, parent=upVAttrGrp)
# ==========
# Deformers
# ==========
self.uplimbDef = Joint(self.uplimbName, parent=self.deformersParent)
self.uplimbDef.setComponent(self)
#adding FK IK matching Attributes
self.uplimbDefAttrGrp = AttributeGroup('uplimbDefAttrGrp', parent=self.uplimbDef)
BoolAttribute('isIKRoot', value=True, parent=self.upLimbAttrGrp)
StringAttribute('FKIKComponent', value=data['FKIKComponent'], parent=self.uplimbDefAttrGrp)
StringAttribute('match_IK_target', value='0', parent=self.uplimbDefAttrGrp)
self.lolimbDef = Joint(self.lolimbName, parent=self.uplimbDef)
self.lolimbDef.setComponent(self)
# Don't want to change RO for fbx output right now
# self.lolimbDef.ro = RotationOrder(ROT_ORDER_STR_TO_INT_MAP["XYZ"]) #Set with component settings later
#adding FK IK matching Attributes
self.lolimbDefAttrGrp = AttributeGroup('lolimbDefAttrGrp', parent=self.lolimbDef)
StringAttribute('FKIKComponent', value=data['FKIKComponent'], parent=self.lolimbDefAttrGrp)
StringAttribute('match_FK_source', value='2', parent=self.lolimbDefAttrGrp)
StringAttribute('match_IK_target', value='1', parent=self.lolimbDefAttrGrp)
self.limbendDef = Joint(name+'end', parent=self.lolimbDef)
self.limbendDef.setComponent(self)
#adding FK IK matching Attributes
self.limbendDefAttrGrp = AttributeGroup('lolimbDefAttrGrp', parent=self.limbendDef)
StringAttribute('FKIKComponent', value=data['FKIKComponent'], parent=self.limbendDefAttrGrp)
StringAttribute('match_FK_source', value='2', parent=self.limbendDefAttrGrp)
self.parentSpaceInputTgt.childJoints = [self.uplimbDef]
# ==============
# Constrain I/O
# ==============
# Constraint inputs
# self.uplimbFKSpaceConstraint = self.uplimbFKSpace.constrainTo(self.parentSpaceInputTgt, maintainOffset=True)
self.uplimbParentSpaceConstraint = self.uplimbParentSpace.constrainTo(self.parentSpaceInputTgt, maintainOffset=True)
self.uplimbWorldSpaceConstraint = self.uplimbWorldSpace.constrainTo(self.ctrlCmpGrp, maintainOffset=True)
# Blend the Spaces (should make this a sub proc)
self.limbUpVSpaceHierBlendSolver = KLOperator(self.getName()+'UpVSpace', 'OSS_HierBlendSolver', 'OSS_Kraken')
self.addOperator(self.limbUpVSpaceHierBlendSolver)
self.limbUpVSpaceHierBlendSolver.setInput('blend', upVSpaceBlendInputAttr)
upVSpaceBlendInputAttr.setValue(0.0)
self.limbUpVSpaceHierBlendSolver.setInput('parentIndexes', [-1])
# Add Att Inputs
self.limbUpVSpaceHierBlendSolver.setInput('drawDebug', self.drawDebugInputAttr)
self.limbUpVSpaceHierBlendSolver.setInput('rigScale', self.rigScaleInputAttr)
# Add Xfo Inputs
self.limbUpVSpaceHierBlendSolver.setInput('hierA', [self.limbUpVCtrlMasterSpace])
self.limbUpVSpaceHierBlendSolver.setInput('hierB', [self.limbUpVCtrlIKSpace])
# Add Xfo Outputs
self.limbUpVSpaceHierBlendSolver.setOutput('hierOut', [self.limbUpVSpace])
# ===============
# Add KL Ops
# ===============
# WorldSpace Blend Aim
limbSettingsAttrGrp = AttributeGroup("DisplayInfo_Settings", parent=self.uplimbFKCtrl)
self.worldSpaceAttr = ScalarAttribute('alignToWorld', value=0.0, minValue=0.0, maxValue=1.0, parent=limbSettingsAttrGrp)
self.armAlignOp = self.blend_two_xfos(
self.uplimbFKSpace,
self.uplimbParentSpace, self.uplimbWorldSpace,
blendTranslate=0,
blendRotate=self.worldSpaceAttr,
blendScale=0,
blend=self.worldSpaceAttr,
name= self.uplimbName + 'BlendKLOp')
# Add FK/IK Blend Limb KL Op
self.limbIKKLOp = KLOperator(self.getName()+'IKFK', 'OSS_TwoBoneIKSolver', 'OSS_Kraken')
self.addOperator(self.limbIKKLOp)
# Add Att Inputs
self.limbIKKLOp.setInput('drawDebug', self.drawDebugInputAttr)
self.limbIKKLOp.setInput('rigScale', self.rigScaleInputAttr)
self.limbIKKLOp.setInput('bone0Len', self.limbBone0LenInputAttr)
self.limbIKKLOp.setInput('bone1Len', self.limbBone1LenInputAttr)
self.limbIKKLOp.setInput('ikBlend', self.ikBlendAttr)
self.limbIKKLOp.setInput('softIK', self.softIKAttr)
self.limbIKKLOp.setInput('squash', self.squashAttr)
self.limbIKKLOp.setInput('stretch', self.stretchAttr)
# Add Xfo Inputs
self.limbIKKLOp.setInput('root', self.uplimbFKSpace)
self.limbIKKLOp.setInput('bone0FK', self.uplimbFKCtrl)
self.limbIKKLOp.setInput('bone1FK', self.lolimbFKCtrl)
self.limbIKKLOp.setInput('upV', self.limbUpVCtrl)
self.limbIKKLOp.setInput('boneAxis', AXIS_NAME_TO_INT_MAP[self.boneAxisStr])
self.limbIKKLOp.setInput('upAxis', AXIS_NAME_TO_INT_MAP[self.upAxisStr])
self.limbIKKLOp.setInput('ikHandle', self.limbIKCtrl)
# Add lolimb IK
self.limbIKKLOp.setOutput('bone0Out', self.uplimb_cmpOut)
self.limbIKKLOp.setOutput('bone1Out', self.lolimbIKSpace)
self.limbIKKLOp.setOutput('bone2Out', self.endlimb_cmpOut)
# MidCtrl (Bend/Bow) Creation - may need to make this an option
if self.addMidControls:
sourceA = self.uplimb_cmpOut
sourceB = self.lolimbIKCtrl
self.uplimbMidCtrlRigOp = KLOperator(self.uplimbName + "Mid", 'OSS_BlendTRSConstraintSolver', 'OSS_Kraken')
self.addOperator(self.uplimbMidCtrlRigOp)
self.uplimbMidCtrlRigOp.setInput('blendTranslate', 0.5)
self.uplimbMidCtrlRigOp.setInput('blendRotate', 0)
self.uplimbMidCtrlRigOp.setInput('blendScale', 0.5)
self.uplimbMidCtrlRigOp.setInput('constrainerTranslateA', sourceA)
self.uplimbMidCtrlRigOp.setInput('constrainerTranslateB', sourceB)
self.uplimbMidCtrlRigOp.setInput('constrainerRotateA', sourceA)
self.uplimbMidCtrlRigOp.setInput('constrainerRotateB', sourceB)
self.uplimbMidCtrlRigOp.setInput('constrainerScaleA', sourceA)
self.uplimbMidCtrlRigOp.setInput('constrainerScaleB', sourceB)
self.uplimbMidCtrlRigOp.setOutput('result', self.uplimbMidSpace)
sourceA = self.lolimbIKCtrl
sourceB = self.endlimb_cmpOut
self.lolimbMidCtrlRigOp = KLOperator(self.lolimbName + "Mid", 'OSS_BlendTRSConstraintSolver', 'OSS_Kraken')
self.addOperator(self.lolimbMidCtrlRigOp)
self.lolimbMidCtrlRigOp.setInput('blendTranslate', 0.5)
self.lolimbMidCtrlRigOp.setInput('blendRotate', 0)
self.lolimbMidCtrlRigOp.setInput('blendScale', 0.5)
self.lolimbMidCtrlRigOp.setInput('constrainerTranslateA', sourceA)
self.lolimbMidCtrlRigOp.setInput('constrainerTranslateB', sourceB)
self.lolimbMidCtrlRigOp.setInput('constrainerRotateA', sourceA)
self.lolimbMidCtrlRigOp.setInput('constrainerRotateB', sourceB)
self.lolimbMidCtrlRigOp.setInput('constrainerScaleA', sourceA)
self.lolimbMidCtrlRigOp.setInput('constrainerScaleB', sourceB)
self.lolimbMidCtrlRigOp.setOutput('result', self.lolimbMidSpace)
self.limbIKKLOp.setOutput('bone0Out', self.uplimb_cmpOut)
self.limbIKKLOp.setOutput('bone1Out', self.lolimbIKSpace)
self.limbIKKLOp.setOutput('bone2Out', self.endlimb_cmpOut)
if self.untwistUplimb:
uplimbSolverOut = self.createOutput(self.uplimbName+"uplimbSolverOut", dataType='Xfo', parent=self.outputHrcGrp).getTarget()
self.limbIKKLOp.setOutput('bone0Out', uplimbSolverOut)
self.untwistKLOp = KLOperator(self.getName(), 'OSS_UntwistSolver', 'OSS_Kraken')
self.addOperator(self.untwistKLOp)
self.untwistKLOp.setInput('drawDebug', self.drawDebugInputAttr)
self.untwistKLOp.setInput('rigScale', self.rigScaleInputAttr)
self.untwistKLOp.setInput('inMatrix', uplimbSolverOut)
self.untwistKLOp.setInput('inBaseMatrix', self.uplimbUntwistBase)
self.untwistKLOp.setInput('axis', AXIS_NAME_TO_INT_MAP[self.boneAxisStr])
self.untwistKLOp.setOutput('untwistedMatrix', self.uplimb_cmpOut)
Profiler.getInstance().pop()
# =============
# Data Methods
# =============
def loadData(self, data=None):
"""Load a saved guide representation from persisted data.
Arguments:
data -- object, The JSON data object.
Return:
True if successful.
"""
super(OSSLimbComponentRig, self).loadData( data )
# TODO: make this a property of the component
self.boneAxisStr = "POSX"
if self.getLocation() == 'R':
self.boneAxisStr = "NEGX"
self.boneAxis = AXIS_NAME_TO_TUPLE_MAP[self.boneAxisStr]
self.upAxisStr = "POSZ"
if self.getLocation() == 'R':
self.upAxisStr = "NEGZ"
self.upAxis = AXIS_NAME_TO_TUPLE_MAP[self.upAxisStr]
self.createControls(data)
self.drawDebugInputAttr.setValue(False)
if self.getLocation() == "R":
pass
#self.limbIKCtrl.rotatePoints(0, 90, 0)
#self.limbIKCtrl.translatePoints(Vec3(-1.0, 0.0, 0.0))
else:
pass
#self.limbIKCtrl.rotatePoints(0, -90, 0)
#self.limbIKCtrl.translatePoints(Vec3(1.0, 0.0, 0.0))
self.limbBone0LenInputAttr.setMin(0.0)
self.limbBone0LenInputAttr.setMax(data['uplimbLen'] * 3.0)
self.limbBone0LenInputAttr.setValue(data['uplimbLen'])
self.limbBone1LenInputAttr.setMin(0.0)
self.limbBone1LenInputAttr.setMax(data['lolimbLen'] * 3.0)
self.limbBone1LenInputAttr.setValue(data['lolimbLen'])
# ====================
# Evaluate Fabric Ops
# ====================
# Eval Operators # Order is important
self.evalOperators()
# Add lolimb IK Constrain
self.lolimb_cmpOut.constrainTo(self.lolimbIKCtrl).evaluate()
# Add Deformer Joint Constrain
self.uplimbDef.constrainTo(self.uplimb_cmpOut).evaluate()
self.uplimb_cmpOut.parentJoint = self.uplimbDef
self.lolimbDef.constrainTo(self.lolimb_cmpOut).evaluate()
self.lolimb_cmpOut.parentJoint = self.lolimbDef
self.limbendDef.constrainTo(self.endlimb_cmpOut).evaluate()
self.endlimb_cmpOut.parentJoint = self.limbendDef
# ====================
# Evaluate Output Constraints (needed for building input/output connection constraints in next pass)
# ====================
# Evaluate the *output* constraints to ensure the outputs are now in the correct location.
# None
# Don't eval *input* constraints because they should all have maintainOffset on and get evaluated at the end during build()
if self.addTwistJointsORaddMidControls:
bone_axis = AXIS_NAME_TO_TUPLE_MAP[self.boneAxisStr]
boneAxisVec = Vec3(bone_axis[0], bone_axis[1], bone_axis[2])
up_axis = AXIS_NAME_TO_TUPLE_MAP[self.upAxisStr]
upAxisVec = Vec3(up_axis[0], up_axis[1], up_axis[2])
#sideAxisVec = boneAxisVec.cross(upAxisVec)
#sideAxisStr = TUPLE_TO_AXIS_NAME_MAP[sideAxisVec.x, sideAxisVec.y, sideAxisVec.z]
uplimbStartTwistXfo = self.createOutput(self.uplimbName+"StartTwist", dataType='Xfo', parent=self.outputHrcGrp).getTarget()
uplimbStartTwistXfo.xfo = self.uplimb_cmpOut.xfo
uplimbStartTwistXfo.constrainTo(self.parentSpaceInputTgt, maintainOffset=True)
lolimbEndTwistXfo = self.createOutput(self.lolimbName+"EndTwist", dataType='Xfo', parent=self.outputHrcGrp).getTarget()
lolimbEndTwistXfo.xfo = self.endlimb_cmpOut.xfo
if self.useOtherIKGoal:
lolimbEndTwistXfo.constrainTo(self.endTwistParent_cmpIn, maintainOffset=True)
else:
lolimbEndTwistXfo.constrainTo(self.endlimb_cmpOut, maintainOffset=True)
if self.addMidControls:
self.uplimbTwInputs = [self.uplimb_cmpOut, self.uplimbMidCtrl, self.lolimb_cmpOut]
self.lolimbTwInputs = [self.lolimb_cmpOut, self.lolimbMidCtrl, lolimbEndTwistXfo]
else:
self.uplimbTwInputs = [self.uplimb_cmpOut, self.lolimb_cmpOut]
self.lolimbTwInputs = [self.lolimb_cmpOut, lolimbEndTwistXfo]
self.uplimbTwistKLOp = self.createTwistJoints(
self.uplimbName+"_twist",
self.uplimbDef,
self.uplimbTwInputs,
numDeformers=int(data['uplimbNumTwistJoints']),
#skipStart=True,
aimAxisStr=self.boneAxisStr, #This would be an offset to the ctrlAxis
sideAxisStr=self.upAxisStr.replace("POS", "NEG"),
#ctrlAimAxisStr=self.boneAxisStr, # Don't invert the Xaxis of the control - remember this is relative to existing ctrls
ctrlNormalAxisStr=self.upAxisStr) #We want the normal to the curve to be in Y so MAP this (Z default to Y)
self.lolimbTwistKLOp = self.createTwistJoints(
self.lolimbName+"_twist",
self.lolimbDef,
self.lolimbTwInputs,
numDeformers=int(data['lolimbNumTwistJoints']),
#skipStart=True,
aimAxisStr=self.boneAxisStr, #This would be an offset to the ctrlAxis
sideAxisStr=self.upAxisStr.replace("POS", "NEG"),
#ctrlAimAxisStr=self.boneAxisStr, # Don't invert the Xaxis of the control - remember this is relative to existing ctrls
ctrlNormalAxisStr=self.upAxisStr)
self.uplimbTwistKLOp.evaluate
self.lolimbTwistKLOp.evaluate
if self.addPartialJoints:
if self.untwistUplimb:
uplimbBaseRotate = self.uplimbUntwistBase
else:
uplimbBaseRotate = self.uplimbFKSpace
uplimbPartialDef = self.createPartialJoint(self.uplimbDef, baseTranslate=self.uplimbDef, baseRotate=uplimbBaseRotate, parent=self.uplimbDef.getParent())
lolimbPartialConstrainer = self.uplimb_cmpOut
if self.addTwistJointsORaddMidControls:
lolimbPartialConstrainer = self.uplimbTwistKLOp.getOutput("outputs")[-1] #Use the last twist joint to blend ori from
#Re-write the PartialJointBlend solver and python function to better accommodate random inputs.
# We actually want to blend between the end of the uplimb twist and the start of the lolimb twist - because of scale interp
lolimbTargetDef = self.lolimbDef
if self.addTwistJointsORaddMidControls:
lolimbTargetDef = self.lolimbTwistKLOp.getOutput("outputs")[0] #Use the last twist joint to blend ori from
lolimb_ik_base = Locator(self.lolimbDef.getName()+"_ik_base_null" , parent=self.ctrlCmpGrp)
lolimb_ik_base.setShapeVisibility(False)
lolimb_ik_base.xfo = self.lolimb_cmpOut.xfo #should be up to date by now, keep orientation of lolimb in relation to uplimb
lolimb_ik_base.constrainTo(lolimbPartialConstrainer, maintainOffset=True)
lolimbPartialDef = self.createPartialJoint(lolimbTargetDef,
name=self.lolimbDef.getName()+"_part",
baseTranslate=lolimbTargetDef,
baseRotate=lolimb_ik_base,
baseScale=lolimb_ik_base,
parent=self.lolimbDef.getParent())
self.parentSpaceInputTgt.childJoints.append(uplimbPartialDef)
# PSD
psdAttrGrp = AttributeGroup("PSD", parent=self.lolimbDef)
self.lolimbAngleBetweenSolver = KLOperator(self.lolimbName, 'OSS_AngleBetweenSolver', 'OSS_Kraken')
self.addOperator(self.lolimbAngleBetweenSolver)
# Add Att Inputs
self.lolimbAngleBetweenSolver.setInput('drawDebug', self.drawDebugInputAttr)
self.lolimbAngleBetweenSolver.setInput('rigScale', self.rigScaleInputAttr)
# Add Xfo Inputs
self.lolimbAngleBetweenSolver.setInput('matrixA', self.uplimbDef)
self.lolimbAngleBetweenSolver.setInput('matrixB', self.lolimbDef)
self.lolimbAngleBetweenSolver.setInput('axisA', AXIS_NAME_TO_INT_MAP[self.boneAxisStr])
self.lolimbAngleBetweenSolver.setInput('axisB', AXIS_NAME_TO_INT_MAP[self.boneAxisStr])
self.lolimbAngleBetweenSolver.setInput('radians', True)
# Add Xfo Outputs
angleAttr = ScalarAttribute("angleResult", value=0.0, parent=psdAttrGrp)
self.lolimbAngleBetweenSolver.setOutput('angle', angleAttr)
enablePSDAttr = ScalarAttribute("enablePSD", value=0.0, parent=psdAttrGrp)
self.lolimbConditionSolver = self.createConditionSolver(enablePSDAttr, angleAttr, 0, name=self.lolimbName)
psdName = "PSD_"+self.lolimbDef.getBuildName()+"_bsShape" # naming just to keep current convention
psdAttr = ScalarAttribute(psdName, value=0.0, parent=psdAttrGrp)
psdAttr.setLock(True)
psdAttr.setMetaDataItem("SCALAR_OUTPUT", psdName)
psdAttr.appendMetaDataListItem("TAGS", self.getDecoratedName())
psdAttr.appendMetaDataListItem("TAGS", "PSD")
self.lolimbConditionSolver.setOutput('result', psdAttr)
#JSON data at this point is generated by guide rig and passed to this rig, should include all defaults+loaded info
self.lolimbFKCtrl.scalePoints(Vec3(1, self.globalScaleVec.y, self.globalScaleVec.z))
self.uplimbFKCtrl.scalePoints(Vec3(1, self.globalScaleVec.y, self.globalScaleVec.z))
# self.uplimbFKCtrl.rotatePoints(0, -90, 0)
# self.lolimbFKCtrl.rotatePoints(0, -90, 0)
if not self.useOtherIKGoal:
self.limbIKCtrl.scalePoints(self.globalScaleVec)
self.limbUpVCtrl.scalePoints(self.globalScaleVec)
self.connectReverse(self.ikBlendAttr, self.uplimbFKCtrl.getVisibilityAttr())
self.connectReverse(self.ikBlendAttr, self.lolimbFKCtrl.getVisibilityAttr())
self.evalOperators()
#self.uplimbRBFWeightSolver = self.createRBFWeightsSolver(self.uplimbDef, self.uplimbDef.getParent(), self.uplimbFKCtrl, name=self.uplimbName)
#self.lolimbRBFWeightSolver = self.createRBFWeightsSolver(self.lolimbDef, self.lolimbDef.getParent(), self.lolimbFKCtrl, name=self.lolimbName)
self.evalOperators()
self.tagAllComponentJoints([self.getDecoratedName()] + (self.tagNames or []))
from kraken.core.kraken_system import KrakenSystem
ks = KrakenSystem.getInstance()
ks.registerComponent(OSSLimbComponentGuide)
ks.registerComponent(OSSLimbComponentRig)
|
oculusstorystudio/kraken
|
Python/OSS/OSS_limb_component.py
|
Python
|
bsd-3-clause
| 40,304
|
import warnings
import re
import csv
import mimetypes
import time
from werkzeug import secure_filename
from flask import (request, redirect, flash, abort, json, Response,
get_flashed_messages, stream_with_context)
from jinja2 import contextfunction
try:
import tablib
except ImportError:
tablib = None
from wtforms.fields import HiddenField
from wtforms.fields.core import UnboundField
from wtforms.validators import ValidationError, InputRequired
from flask_admin.babel import gettext
from flask_admin.base import BaseView, expose
from flask_admin.form import BaseForm, FormOpts, rules
from flask_admin.model import filters, typefmt, template
from flask_admin.actions import ActionsMixin
from flask_admin.helpers import (get_form_data, validate_form_on_submit,
get_redirect_target, flash_errors)
from flask_admin.tools import rec_getattr
from flask_admin._backwards import ObsoleteAttr
from flask_admin._compat import (iteritems, itervalues, OrderedDict,
as_unicode, csv_encode, text_type)
from .helpers import prettify_name, get_mdict_item_or_list
from .ajax import AjaxModelLoader
# Used to generate filter query string name
filter_char_re = re.compile('[^a-z0-9 ]')
filter_compact_re = re.compile(' +')
class ViewArgs(object):
"""
List view arguments.
"""
def __init__(self, page=None, sort=None, sort_desc=None, search=None, filters=None, extra_args=None):
self.page = page
self.sort = sort
self.sort_desc = bool(sort_desc)
self.search = search
self.filters = filters
if not self.search:
self.search = None
self.extra_args = extra_args or dict()
def clone(self, **kwargs):
if self.filters:
flt = list(self.filters)
else:
flt = None
kwargs.setdefault('page', self.page)
kwargs.setdefault('sort', self.sort)
kwargs.setdefault('sort_desc', self.sort_desc)
kwargs.setdefault('search', self.search)
kwargs.setdefault('filters', flt)
kwargs.setdefault('extra_args', dict(self.extra_args))
return ViewArgs(**kwargs)
class FilterGroup(object):
def __init__(self, label):
self.label = label
self.filters = []
def append(self, filter):
self.filters.append(filter)
def non_lazy(self):
filters = []
for item in self.filters:
copy = dict(item)
copy['operation'] = as_unicode(copy['operation'])
options = copy['options']
if options:
copy['options'] = [(k, text_type(v)) for k, v in options]
filters.append(copy)
return as_unicode(self.label), filters
def __iter__(self):
return iter(self.filters)
class BaseModelView(BaseView, ActionsMixin):
"""
Base model view.
This view does not make any assumptions on how models are stored or managed, but expects the following:
1. The provided model is an object
2. The model contains properties
3. Each model contains an attribute which uniquely identifies it (i.e. a primary key for a database model)
4. It is possible to retrieve a list of sorted models with pagination applied from a data source
5. You can get one model by its identifier from the data source
Essentially, if you want to support a new data store, all you have to do is:
1. Derive from the `BaseModelView` class
2. Implement various data-related methods (`get_list`, `get_one`, `create_model`, etc)
3. Implement automatic form generation from the model representation (`scaffold_form`)
"""
# Permissions
can_create = True
"""Is model creation allowed"""
can_edit = True
"""Is model editing allowed"""
can_delete = True
"""Is model deletion allowed"""
can_view_details = False
"""
Setting this to true will enable the details view. This is recommended
when there are too many columns to display in the list_view.
"""
can_export = False
"""Is model list export allowed"""
# Templates
list_template = 'admin/model/list.html'
"""Default list view template"""
edit_template = 'admin/model/edit.html'
"""Default edit template"""
create_template = 'admin/model/create.html'
"""Default create template"""
details_template = 'admin/model/details.html'
"""Default details view template"""
# Modal Templates
edit_modal_template = 'admin/model/modals/edit.html'
"""Default edit modal template"""
create_modal_template = 'admin/model/modals/create.html'
"""Default create modal template"""
details_modal_template = 'admin/model/modals/details.html'
"""Default details modal view template"""
# Modals
edit_modal = False
"""Setting this to true will display the edit_view as a modal dialog."""
create_modal = False
"""Setting this to true will display the create_view as a modal dialog."""
details_modal = False
"""Setting this to true will display the details_view as a modal dialog."""
# Customizations
column_list = ObsoleteAttr('column_list', 'list_columns', None)
"""
Collection of the model field names for the list view.
If set to `None`, will get them from the model.
For example::
class MyModelView(BaseModelView):
column_list = ('name', 'last_name', 'email')
(Added in 1.4.0) SQLAlchemy model attributes can be used instead of strings::
class MyModelView(BaseModelView):
column_list = ('name', User.last_name)
When using SQLAlchemy models, you can reference related columns like this::
class MyModelView(BaseModelView):
column_list = ('<relationship>.<related column name>',)
"""
column_exclude_list = ObsoleteAttr('column_exclude_list',
'excluded_list_columns', None)
"""
Collection of excluded list column names.
For example::
class MyModelView(BaseModelView):
column_exclude_list = ('last_name', 'email')
"""
column_details_list = None
"""
Collection of the field names included in the details view.
If set to `None`, will get them from the model.
"""
column_details_exclude_list = None
"""
Collection of fields excluded from the details view.
"""
column_export_list = None
"""
Collection of the field names included in the export.
If set to `None`, will get them from the model.
"""
column_export_exclude_list = None
"""
Collection of fields excluded from the export.
"""
column_formatters = ObsoleteAttr('column_formatters', 'list_formatters', dict())
"""
Dictionary of list view column formatters.
For example, if you want to show price multiplied by
two, you can do something like this::
class MyModelView(BaseModelView):
column_formatters = dict(price=lambda v, c, m, p: m.price*2)
or using Jinja2 `macro` in template::
from flask_admin.model.template import macro
class MyModelView(BaseModelView):
column_formatters = dict(price=macro('render_price'))
# in template
{% macro render_price(model, column) %}
{{ model.price * 2 }}
{% endmacro %}
The Callback function has the prototype::
def formatter(view, context, model, name):
# `view` is current administrative view
# `context` is instance of jinja2.runtime.Context
# `model` is model instance
# `name` is property name
pass
"""
column_formatters_export = None
"""
Dictionary of list view column formatters to be used for export.
Defaults to column_formatters when set to None.
Functions the same way as column_formatters except
that macros are not supported.
"""
column_type_formatters = ObsoleteAttr('column_type_formatters', 'list_type_formatters', None)
"""
Dictionary of value type formatters to be used in the list view.
By default, three types are formatted:
1. ``None`` will be displayed as an empty string
2. ``bool`` will be displayed as a checkmark if it is ``True``
3. ``list`` will be joined using ', '
If you don't like the default behavior and don't want any type formatters
applied, just override this property with an empty dictionary::
class MyModelView(BaseModelView):
column_type_formatters = dict()
If you want to display `NULL` instead of an empty string, you can do
something like this. Also comes with bonus `date` formatter::
from datetime import date
from flask_admin.model import typefmt
def date_format(view, value):
return value.strftime('%d.%m.%Y')
MY_DEFAULT_FORMATTERS = dict(typefmt.BASE_FORMATTERS)
MY_DEFAULT_FORMATTERS.update({
type(None): typefmt.null_formatter,
date: date_format
})
class MyModelView(BaseModelView):
column_type_formatters = MY_DEFAULT_FORMATTERS
Type formatters have lower priority than list column formatters.
The callback function has following prototype::
def type_formatter(view, value):
# `view` is current administrative view
# `value` value to format
pass
"""
column_type_formatters_export = None
"""
Dictionary of value type formatters to be used in the export.
By default, two types are formatted:
1. ``None`` will be displayed as an empty string
2. ``list`` will be joined using ', '
Functions the same way as column_type_formatters.
"""
column_labels = ObsoleteAttr('column_labels', 'rename_columns', None)
"""
Dictionary where key is column name and value is string to display.
For example::
class MyModelView(BaseModelView):
column_labels = dict(name='Name', last_name='Last Name')
"""
column_descriptions = None
"""
Dictionary where key is column name and
value is description for `list view` column or add/edit form field.
For example::
class MyModelView(BaseModelView):
column_descriptions = dict(
full_name='First and Last name'
)
"""
column_sortable_list = ObsoleteAttr('column_sortable_list',
'sortable_columns',
None)
"""
Collection of the sortable columns for the list view.
If set to `None`, will get them from the model.
For example::
class MyModelView(BaseModelView):
column_sortable_list = ('name', 'last_name')
If you want to explicitly specify field/column to be used while
sorting, you can use a tuple::
class MyModelView(BaseModelView):
column_sortable_list = ('name', ('user', 'user.username'))
When using SQLAlchemy models, model attributes can be used instead
of strings::
class MyModelView(BaseModelView):
column_sortable_list = ('name', ('user', User.username))
"""
column_default_sort = None
"""
Default sort column if no sorting is applied.
Example::
class MyModelView(BaseModelView):
column_default_sort = 'user'
You can use tuple to control ascending descending order. In following example, items
will be sorted in descending order::
class MyModelView(BaseModelView):
column_default_sort = ('user', True)
"""
column_searchable_list = ObsoleteAttr('column_searchable_list',
'searchable_columns',
None)
"""
A collection of the searchable columns. It is assumed that only
text-only fields are searchable, but it is up to the model
implementation to decide.
Example::
class MyModelView(BaseModelView):
column_searchable_list = ('name', 'email')
"""
column_editable_list = None
"""
Collection of the columns which can be edited from the list view.
For example::
class MyModelView(BaseModelView):
column_editable_list = ('name', 'last_name')
"""
column_choices = None
"""
Map choices to columns in list view
Example::
class MyModelView(BaseModelView):
column_choices = {
'my_column': [
('db_value', 'display_value'),
]
}
"""
column_filters = None
"""
Collection of the column filters.
Can contain either field names or instances of :class:`~flask_admin.model.filters.BaseFilter` classes.
Example::
class MyModelView(BaseModelView):
column_filters = ('user', 'email')
"""
named_filter_urls = False
"""
Set to True to use human-readable names for filters in URL parameters.
False by default so as to be robust across translations.
Changing this parameter will break any existing URLs that have filters.
"""
column_display_pk = ObsoleteAttr('column_display_pk',
'list_display_pk',
False)
"""
Controls if the primary key should be displayed in the list view.
"""
column_display_actions = True
"""
Controls the display of the row actions (edit, delete, details, etc.)
column in the list view.
Useful for preventing a blank column from displaying if your view does
not use any build-in or custom row actions.
This column is not hidden automatically due to backwards compatibility.
Note: This only affects display and does not control whether the row
actions endpoints are accessible.
"""
column_extra_row_actions = None
"""
List of row actions (instances of :class:`~flask_admin.model.template.BaseListRowAction`).
Flask-Admin will generate standard per-row actions (edit, delete, etc)
and will append custom actions from this list right after them.
For example::
from flask_admin.model.template import EndpointLinkRowAction, LinkRowAction
class MyModelView(BaseModelView):
column_extra_row_actions = [
LinkRowAction('glyphicon glyphicon-off', 'http://direct.link/?id={row_id}'),
EndpointLinkRowAction('glyphicon glyphicon-test', 'my_view.index_view')
]
"""
simple_list_pager = False
"""
Enable or disable simple list pager.
If enabled, model interface would not run count query and will only show prev/next pager buttons.
"""
form = None
"""
Form class. Override if you want to use custom form for your model.
Will completely disable form scaffolding functionality.
For example::
class MyForm(Form):
name = StringField('Name')
class MyModelView(BaseModelView):
form = MyForm
"""
form_base_class = BaseForm
"""
Base form class. Will be used by form scaffolding function when creating model form.
Useful if you want to have custom constructor or override some fields.
Example::
class MyBaseForm(Form):
def do_something(self):
pass
class MyModelView(BaseModelView):
form_base_class = MyBaseForm
"""
form_args = None
"""
Dictionary of form field arguments. Refer to WTForms documentation for
list of possible options.
Example::
from wtforms.validators import DataRequired
class MyModelView(BaseModelView):
form_args = dict(
name=dict(label='First Name', validators=[DataRequired()])
)
"""
form_columns = None
"""
Collection of the model field names for the form. If set to `None` will
get them from the model.
Example::
class MyModelView(BaseModelView):
form_columns = ('name', 'email')
(Added in 1.4.0) SQLAlchemy model attributes can be used instead of
strings::
class MyModelView(BaseModelView):
form_columns = ('name', User.last_name)
SQLA Note: Model attributes must be on the same model as your ModelView
or you will need to use `inline_models`.
"""
form_excluded_columns = ObsoleteAttr('form_excluded_columns',
'excluded_form_columns',
None)
"""
Collection of excluded form field names.
For example::
class MyModelView(BaseModelView):
form_excluded_columns = ('last_name', 'email')
"""
form_overrides = None
"""
Dictionary of form column overrides.
Example::
class MyModelView(BaseModelView):
form_overrides = dict(name=wtf.FileField)
"""
form_widget_args = None
"""
Dictionary of form widget rendering arguments.
Use this to customize how widget is rendered without using custom template.
Example::
class MyModelView(BaseModelView):
form_widget_args = {
'description': {
'rows': 10,
'style': 'color: black'
},
'other_field': {
'disabled': True
}
}
Changing the format of a DateTimeField will require changes to both form_widget_args and form_args.
Example::
form_args = dict(
start=dict(format='%Y-%m-%d %I:%M %p') # changes how the input is parsed by strptime (12 hour time)
)
form_widget_args = dict(
start={'data-date-format': u'yyyy-mm-dd HH:ii P', 'data-show-meridian': 'True'} # changes how the DateTimeField displays the time
)
"""
form_extra_fields = None
"""
Dictionary of additional fields.
Example::
class MyModelView(BaseModelView):
form_extra_fields = {
'password': PasswordField('Password')
}
You can control order of form fields using ``form_columns`` property. For example::
class MyModelView(BaseModelView):
form_columns = ('name', 'email', 'password', 'secret')
form_extra_fields = {
'password': PasswordField('Password')
}
In this case, password field will be put between email and secret fields that are autogenerated.
"""
form_ajax_refs = None
"""
Use AJAX for foreign key model loading.
Should contain dictionary, where key is field name and value is either a dictionary which
configures AJAX lookups or backend-specific `AjaxModelLoader` class instance.
For example, it can look like::
class MyModelView(BaseModelView):
form_ajax_refs = {
'user': {
'fields': ('first_name', 'last_name', 'email'),
'page_size': 10
}
}
Or with SQLAlchemy backend like this::
class MyModelView(BaseModelView):
form_ajax_refs = {
'user': QueryAjaxModelLoader('user', db.session, User, fields=['email'], page_size=10)
}
If you need custom loading functionality, you can implement your custom loading behavior
in your `AjaxModelLoader` class.
"""
form_rules = None
"""
List of rendering rules for model creation form.
This property changed default form rendering behavior and makes possible to rearrange order
of rendered fields, add some text between fields, group them, etc. If not set, will use
default Flask-Admin form rendering logic.
Here's simple example which illustrates how to use::
from flask_admin.form import rules
class MyModelView(ModelView):
form_rules = [
# Define field set with header text and four fields
rules.FieldSet(('first_name', 'last_name', 'email', 'phone'), 'User'),
# ... and it is just shortcut for:
rules.Header('User'),
rules.Field('first_name'),
rules.Field('last_name'),
# ...
# It is possible to create custom rule blocks:
MyBlock('Hello World'),
# It is possible to call macros from current context
rules.Macro('my_macro', foobar='baz')
]
"""
form_edit_rules = None
"""
Customized rules for the edit form. Override `form_rules` if present.
"""
form_create_rules = None
"""
Customized rules for the create form. Override `form_rules` if present.
"""
# Actions
action_disallowed_list = ObsoleteAttr('action_disallowed_list',
'disallowed_actions',
[])
"""
Set of disallowed action names. For example, if you want to disable
mass model deletion, do something like this:
class MyModelView(BaseModelView):
action_disallowed_list = ['delete']
"""
# Export settings
export_max_rows = 0
"""
Maximum number of rows allowed for export.
Unlimited by default. Uses `page_size` if set to `None`.
"""
export_types = ['csv']
"""
A list of available export filetypes. `csv` only is default, but any
filetypes supported by tablib can be used.
Check tablib for https://github.com/kennethreitz/tablib/blob/master/README.rst
for supported types.
"""
# Various settings
page_size = 20
"""
Default page size for pagination.
"""
def __init__(self, model,
name=None, category=None, endpoint=None, url=None, static_folder=None,
menu_class_name=None, menu_icon_type=None, menu_icon_value=None):
"""
Constructor.
:param model:
Model class
:param name:
View name. If not provided, will use the model class name
:param category:
View category
:param endpoint:
Base endpoint. If not provided, will use the model name.
:param url:
Base URL. If not provided, will use endpoint as a URL.
:param menu_class_name:
Optional class name for the menu item.
:param menu_icon_type:
Optional icon. Possible icon types:
- `flask_admin.consts.ICON_TYPE_GLYPH` - Bootstrap glyph icon
- `flask_admin.consts.ICON_TYPE_FONT_AWESOME` - Font Awesome icon
- `flask_admin.consts.ICON_TYPE_IMAGE` - Image relative to Flask static directory
- `flask_admin.consts.ICON_TYPE_IMAGE_URL` - Image with full URL
:param menu_icon_value:
Icon glyph name or URL, depending on `menu_icon_type` setting
"""
self.model = model
# If name not provided, it is model name
if name is None:
name = '%s' % self._prettify_class_name(model.__name__)
super(BaseModelView, self).__init__(name, category, endpoint, url, static_folder,
menu_class_name=menu_class_name,
menu_icon_type=menu_icon_type,
menu_icon_value=menu_icon_value)
# Actions
self.init_actions()
# Scaffolding
self._refresh_cache()
# Endpoint
def _get_endpoint(self, endpoint):
if endpoint:
return super(BaseModelView, self)._get_endpoint(endpoint)
return self.model.__name__.lower()
# Caching
def _refresh_forms_cache(self):
# Forms
self._form_ajax_refs = self._process_ajax_references()
if self.form_widget_args is None:
self.form_widget_args = {}
self._create_form_class = self.get_create_form()
self._edit_form_class = self.get_edit_form()
self._delete_form_class = self.get_delete_form()
# List View In-Line Editing
if self.column_editable_list:
self._list_form_class = self.get_list_form()
else:
self.column_editable_list = {}
def _refresh_filters_cache(self):
self._filters = self.get_filters()
if self._filters:
self._filter_groups = OrderedDict()
self._filter_args = {}
for i, flt in enumerate(self._filters):
key = as_unicode(flt.name)
if key not in self._filter_groups:
self._filter_groups[key] = FilterGroup(flt.name)
self._filter_groups[key].append({
'index': i,
'arg': self.get_filter_arg(i, flt),
'operation': flt.operation(),
'options': flt.get_options(self) or None,
'type': flt.data_type
})
self._filter_args[self.get_filter_arg(i, flt)] = (i, flt)
else:
self._filter_groups = None
self._filter_args = None
def _refresh_form_rules_cache(self):
if self.form_create_rules:
self._form_create_rules = rules.RuleSet(self, self.form_create_rules)
else:
self._form_create_rules = None
if self.form_edit_rules:
self._form_edit_rules = rules.RuleSet(self, self.form_edit_rules)
else:
self._form_edit_rules = None
if self.form_rules:
form_rules = rules.RuleSet(self, self.form_rules)
if not self._form_create_rules:
self._form_create_rules = form_rules
if not self._form_edit_rules:
self._form_edit_rules = form_rules
def _refresh_cache(self):
"""
Refresh various cached variables.
"""
# List view
self._list_columns = self.get_list_columns()
self._sortable_columns = self.get_sortable_columns()
# Details view
if self.can_view_details:
self._details_columns = self.get_details_columns()
# Export view
if self.can_export:
self._export_columns = self.get_export_columns()
# Labels
if self.column_labels is None:
self.column_labels = {}
# Forms
self._refresh_forms_cache()
# Search
self._search_supported = self.init_search()
# Choices
if self.column_choices:
self._column_choices_map = dict([
(column, dict(choices))
for column, choices in self.column_choices.items()
])
else:
self.column_choices = self._column_choices_map = dict()
# Column formatters
if self.column_formatters_export is None:
self.column_formatters_export = self.column_formatters
# Type formatters
if self.column_type_formatters is None:
self.column_type_formatters = dict(typefmt.BASE_FORMATTERS)
if self.column_type_formatters_export is None:
self.column_type_formatters_export = dict(typefmt.EXPORT_FORMATTERS)
if self.column_descriptions is None:
self.column_descriptions = dict()
# Filters
self._refresh_filters_cache()
# Form rendering rules
self._refresh_form_rules_cache()
# Process form rules
self._validate_form_class(self._form_edit_rules, self._edit_form_class)
self._validate_form_class(self._form_create_rules, self._create_form_class)
# Primary key
def get_pk_value(self, model):
"""
Return PK value from a model object.
"""
raise NotImplementedError()
# List view
def scaffold_list_columns(self):
"""
Return list of the model field names. Must be implemented in
the child class.
Expected return format is list of tuples with field name and
display text. For example::
['name', 'first_name', 'last_name']
"""
raise NotImplementedError('Please implement scaffold_list_columns method')
def get_column_name(self, field):
"""
Return a human-readable column name.
:param field:
Model field name.
"""
if self.column_labels and field in self.column_labels:
return self.column_labels[field]
else:
return self._prettify_name(field)
def get_list_columns(self):
"""
Returns a list of tuples with the model field name and formatted
field name. If `column_list` was set, returns it. Otherwise calls
`scaffold_list_columns` to generate the list from the model.
"""
columns = self.column_list
if columns is None:
columns = self.scaffold_list_columns()
# Filter excluded columns
if self.column_exclude_list:
columns = [c for c in columns if c not in self.column_exclude_list]
return [(c, self.get_column_name(c)) for c in columns]
def get_list_row_actions(self):
"""
Return list of row action objects, each is instance of :class:`~flask_admin.model.template.BaseListRowAction`
"""
actions = []
if self.can_view_details:
if self.details_modal:
actions.append(template.ViewPopupRowAction())
else:
actions.append(template.ViewRowAction())
if self.can_edit:
if self.edit_modal:
actions.append(template.EditPopupRowAction())
else:
actions.append(template.EditRowAction())
if self.can_delete:
actions.append(template.DeleteRowAction())
return actions + (self.column_extra_row_actions or [])
def get_details_columns(self):
"""
Returns a list of the model field names in the details view. If
`column_details_list` was set, returns it. Otherwise calls
`scaffold_list_columns` to generate the list from the model.
"""
columns = self.column_details_list
if columns is None:
columns = self.scaffold_list_columns()
# Filter excluded columns
if self.column_details_exclude_list:
columns = [c for c in columns
if c not in self.column_details_exclude_list]
return [(c, self.get_column_name(c)) for c in columns]
def get_export_columns(self):
"""
Returns a list of the model field names in the export view. If
`column_export_list` was set, returns it. Otherwise, if
`column_list` was set, returns it. Otherwise calls
`scaffold_list_columns` to generate the list from the model.
"""
columns = self.column_export_list
if columns is None:
columns = self.column_list
if columns is None:
columns = self.scaffold_list_columns()
# Filter excluded columns
if self.column_export_exclude_list:
columns = [c for c in columns
if c not in self.column_export_exclude_list]
return [(c, self.get_column_name(c)) for c in columns]
def scaffold_sortable_columns(self):
"""
Returns dictionary of sortable columns. Must be implemented in
the child class.
Expected return format is a dictionary, where keys are field names and
values are property names.
"""
raise NotImplementedError('Please implement scaffold_sortable_columns method')
def get_sortable_columns(self):
"""
Returns a dictionary of the sortable columns. Key is a model
field name and value is sort column (for example - attribute).
If `column_sortable_list` is set, will use it. Otherwise, will call
`scaffold_sortable_columns` to get them from the model.
"""
if self.column_sortable_list is None:
return self.scaffold_sortable_columns() or dict()
else:
result = dict()
for c in self.column_sortable_list:
if isinstance(c, tuple):
result[c[0]] = c[1]
else:
result[c] = c
return result
def init_search(self):
"""
Initialize search. If data provider does not support search,
`init_search` will return `False`.
"""
return False
# Filter helpers
def scaffold_filters(self, name):
"""
Generate filter object for the given name
:param name:
Name of the field
"""
return None
def is_valid_filter(self, filter):
"""
Verify that the provided filter object is valid.
Override in model backend implementation to verify if
the provided filter type is allowed.
:param filter:
Filter object to verify.
"""
return isinstance(filter, filters.BaseFilter)
def handle_filter(self, filter):
"""
Postprocess (add joins, etc) for a filter.
:param filter:
Filter object to postprocess
"""
return filter
def get_filters(self):
"""
Return a list of filter objects.
If your model backend implementation does not support filters,
override this method and return `None`.
"""
if self.column_filters:
collection = []
for n in self.column_filters:
if self.is_valid_filter(n):
collection.append(self.handle_filter(n))
else:
flt = self.scaffold_filters(n)
if flt:
collection.extend(flt)
else:
raise Exception('Unsupported filter type %s' % n)
return collection
else:
return None
def get_filter_arg(self, index, flt):
"""
Given a filter `flt`, return a unique name for that filter in
this view.
Does not include the `flt[n]_` portion of the filter name.
:param index:
Filter index in _filters array
:param flt:
Filter instance
"""
if self.named_filter_urls:
name = ('%s %s' % (flt.name, as_unicode(flt.operation()))).lower()
name = filter_char_re.sub('', name)
name = filter_compact_re.sub('_', name)
return name
else:
return str(index)
def _get_filter_groups(self):
"""
Returns non-lazy version of filter strings
"""
if self._filter_groups:
results = OrderedDict()
for group in itervalues(self._filter_groups):
key, items = group.non_lazy()
results[key] = items
return results
return None
# Form helpers
def scaffold_form(self):
"""
Create `form.BaseForm` inherited class from the model. Must be
implemented in the child class.
"""
raise NotImplementedError('Please implement scaffold_form method')
def scaffold_list_form(self, widget=None, validators=None):
"""
Create form for the `index_view` using only the columns from
`self.column_editable_list`.
:param widget:
WTForms widget class. Defaults to `XEditableWidget`.
:param validators:
`form_args` dict with only validators
{'name': {'validators': [DataRequired()]}}
Must be implemented in the child class.
"""
raise NotImplementedError('Please implement scaffold_list_form method')
def get_form(self):
"""
Get form class.
If ``self.form`` is set, will return it and will call
``self.scaffold_form`` otherwise.
Override to implement customized behavior.
"""
if self.form is not None:
return self.form
return self.scaffold_form()
def get_list_form(self):
"""
Get form class for the editable list view.
Uses only validators from `form_args` to build the form class.
Allows overriding the editable list view field/widget. For example::
from flask_admin.model.widgets import XEditableWidget
class CustomWidget(XEditableWidget):
def get_kwargs(self, subfield, kwargs):
if subfield.type == 'TextAreaField':
kwargs['data-type'] = 'textarea'
kwargs['data-rows'] = '20'
# elif: kwargs for other fields
return kwargs
class MyModelView(BaseModelView):
def get_list_form(self):
return self.scaffold_list_form(widget=CustomWidget)
"""
if self.form_args:
# get only validators, other form_args can break FieldList wrapper
validators = dict(
(key, {'validators': value["validators"]})
for key, value in iteritems(self.form_args)
if value.get("validators")
)
else:
validators = None
return self.scaffold_list_form(validators=validators)
def get_create_form(self):
"""
Create form class for model creation view.
Override to implement customized behavior.
"""
return self.get_form()
def get_edit_form(self):
"""
Create form class for model editing view.
Override to implement customized behavior.
"""
return self.get_form()
def get_delete_form(self):
"""
Create form class for model delete view.
Override to implement customized behavior.
"""
class DeleteForm(self.form_base_class):
id = HiddenField(validators=[InputRequired()])
url = HiddenField()
return DeleteForm
def create_form(self, obj=None):
"""
Instantiate model creation form and return it.
Override to implement custom behavior.
"""
return self._create_form_class(get_form_data(), obj=obj)
def edit_form(self, obj=None):
"""
Instantiate model editing form and return it.
Override to implement custom behavior.
"""
return self._edit_form_class(get_form_data(), obj=obj)
def delete_form(self):
"""
Instantiate model delete form and return it.
Override to implement custom behavior.
The delete form originally used a GET request, so delete_form
accepts both GET and POST request for backwards compatibility.
"""
if request.form:
return self._delete_form_class(request.form)
elif request.args:
# allow request.args for backward compatibility
return self._delete_form_class(request.args)
else:
return self._delete_form_class()
def list_form(self, obj=None):
"""
Instantiate model editing form for list view and return it.
Override to implement custom behavior.
"""
return self._list_form_class(get_form_data(), obj=obj)
def validate_form(self, form):
"""
Validate the form on submit.
:param form:
Form to validate
"""
return validate_form_on_submit(form)
def get_save_return_url(self, model, is_created=False):
"""
Return url where user is redirected after successful form save.
:param model:
Saved object
:param is_created:
Whether new object was created or existing one was updated
For example, redirect use to object details view after form save::
class MyModelView(ModelView):
can_view_details = True
def get_save_return_url(self, model, is_created):
return self.get_url('.details_view', id=model.id)
"""
return get_redirect_target() or self.get_url('.index_view')
def _get_ruleset_missing_fields(self, ruleset, form):
missing_fields = []
if ruleset:
visible_fields = ruleset.visible_fields
for field in form:
if field.name not in visible_fields:
missing_fields.append(field.name)
return missing_fields
def _show_missing_fields_warning(self, text):
warnings.warn(text)
def _validate_form_class(self, ruleset, form_class, remove_missing=True):
form_fields = []
for name, obj in iteritems(form_class.__dict__):
if isinstance(obj, UnboundField):
form_fields.append(name)
missing_fields = []
if ruleset:
visible_fields = ruleset.visible_fields
for field_name in form_fields:
if field_name not in visible_fields:
missing_fields.append(field_name)
if missing_fields:
self._show_missing_fields_warning('Fields missing from ruleset: %s' % (','.join(missing_fields)))
if remove_missing:
self._remove_fields_from_form_class(missing_fields, form_class)
def _validate_form_instance(self, ruleset, form, remove_missing=True):
missing_fields = self._get_ruleset_missing_fields(ruleset=ruleset, form=form)
if missing_fields:
self._show_missing_fields_warning('Fields missing from ruleset: %s' % (','.join(missing_fields)))
if remove_missing:
self._remove_fields_from_form_instance(missing_fields, form)
def _remove_fields_from_form_instance(self, field_names, form):
for field_name in field_names:
form.__delitem__(field_name)
def _remove_fields_from_form_class(self, field_names, form_class):
for field_name in field_names:
delattr(form_class, field_name)
# Helpers
def is_sortable(self, name):
"""
Verify if column is sortable.
Not case-sensitive.
:param name:
Column name.
"""
return name.lower() in (x.lower() for x in self._sortable_columns)
def is_editable(self, name):
"""
Verify if column is editable.
:param name:
Column name.
"""
return name in self.column_editable_list
def _get_column_by_idx(self, idx):
"""
Return column index by
"""
if idx is None or idx < 0 or idx >= len(self._list_columns):
return None
return self._list_columns[idx]
def _get_default_order(self):
"""
Return default sort order
"""
if self.column_default_sort:
if isinstance(self.column_default_sort, tuple):
return self.column_default_sort
else:
return self.column_default_sort, False
return None
# Database-related API
def get_list(self, page, sort_field, sort_desc, search, filters,
page_size=None):
"""
Return a paginated and sorted list of models from the data source.
Must be implemented in the child class.
:param page:
Page number, 0 based. Can be set to None if it is first page.
:param sort_field:
Sort column name or None.
:param sort_desc:
If set to True, sorting is in descending order.
:param search:
Search query
:param filters:
List of filter tuples. First value in a tuple is a search
index, second value is a search value.
:param page_size:
Number of results. Defaults to ModelView's page_size. Can be
overriden to change the page_size limit. Removing the page_size
limit requires setting page_size to 0 or False.
"""
raise NotImplementedError('Please implement get_list method')
def get_one(self, id):
"""
Return one model by its id.
Must be implemented in the child class.
:param id:
Model id
"""
raise NotImplementedError('Please implement get_one method')
# Exception handler
def handle_view_exception(self, exc):
if isinstance(exc, ValidationError):
flash(as_unicode(exc))
return True
if self._debug:
raise
return False
# Model event handlers
def on_model_change(self, form, model, is_created):
"""
Perform some actions before a model is created or updated.
Called from create_model and update_model in the same transaction
(if it has any meaning for a store backend).
By default does nothing.
:param form:
Form used to create/update model
:param model:
Model that will be created/updated
:param is_created:
Will be set to True if model was created and to False if edited
"""
pass
def _on_model_change(self, form, model, is_created):
"""
Compatibility helper.
"""
try:
self.on_model_change(form, model, is_created)
except TypeError:
msg = ('%s.on_model_change() now accepts third ' +
'parameter is_created. Please update your code') % self.model
warnings.warn(msg)
self.on_model_change(form, model)
def after_model_change(self, form, model, is_created):
"""
Perform some actions after a model was created or updated and
committed to the database.
Called from create_model after successful database commit.
By default does nothing.
:param form:
Form used to create/update model
:param model:
Model that was created/updated
:param is_created:
True if model was created, False if model was updated
"""
pass
def on_model_delete(self, model):
"""
Perform some actions before a model is deleted.
Called from delete_model in the same transaction
(if it has any meaning for a store backend).
By default do nothing.
"""
pass
def after_model_delete(self, model):
"""
Perform some actions after a model was deleted and
committed to the database.
Called from delete_model after successful database commit
(if it has any meaning for a store backend).
By default does nothing.
:param model:
Model that was deleted
"""
pass
def on_form_prefill (self, form, id):
"""
Perform additional actions to pre-fill the edit form.
Called from edit_view, if the current action is rendering
the form rather than receiving client side input, after
default pre-filling has been performed.
By default does nothing.
You only need to override this if you have added custom
fields that depend on the database contents in a way that
Flask-admin can't figure out by itself. Fields that were
added by name of a normal column or relationship should
work out of the box.
:param form:
Form instance
:param id:
id of the object that is going to be edited
"""
pass
def create_model(self, form):
"""
Create model from the form.
Returns the model instance if operation succeeded.
Must be implemented in the child class.
:param form:
Form instance
"""
raise NotImplementedError()
def update_model(self, form, model):
"""
Update model from the form.
Returns `True` if operation succeeded.
Must be implemented in the child class.
:param form:
Form instance
:param model:
Model instance
"""
raise NotImplementedError()
def delete_model(self, model):
"""
Delete model.
Returns `True` if operation succeeded.
Must be implemented in the child class.
:param model:
Model instance
"""
raise NotImplementedError()
# Various helpers
def _prettify_name(self, name):
"""
Prettify pythonic variable name.
For example, 'hello_world' will be converted to 'Hello World'
:param name:
Name to prettify
"""
return prettify_name(name)
def get_empty_list_message(self):
return gettext('There are no items in the table.')
# URL generation helpers
def _get_list_filter_args(self):
if self._filters:
filters = []
for n in request.args:
if not n.startswith('flt'):
continue
if '_' not in n:
continue
pos, key = n[3:].split('_', 1)
if key in self._filter_args:
idx, flt = self._filter_args[key]
value = request.args[n]
if flt.validate(value):
filters.append((pos, (idx, as_unicode(flt.name), value)))
else:
flash(gettext('Invalid Filter Value: %(value)s', value=value))
# Sort filters
return [v[1] for v in sorted(filters, key=lambda n: n[0])]
return None
def _get_list_extra_args(self):
"""
Return arguments from query string.
"""
return ViewArgs(page=request.args.get('page', 0, type=int),
sort=request.args.get('sort', None, type=int),
sort_desc=request.args.get('desc', None, type=int),
search=request.args.get('search', None),
filters=self._get_list_filter_args())
# URL generation helpers
def _get_list_url(self, view_args):
"""
Generate page URL with current page, sort column and
other parameters.
:param view:
View name
:param view_args:
ViewArgs object with page number, filters, etc.
"""
page = view_args.page or None
desc = 1 if view_args.sort_desc else None
kwargs = dict(page=page, sort=view_args.sort, desc=desc, search=view_args.search)
kwargs.update(view_args.extra_args)
if view_args.filters:
for i, pair in enumerate(view_args.filters):
idx, flt_name, value = pair
key = 'flt%d_%s' % (i, self.get_filter_arg(idx, self._filters[idx]))
kwargs[key] = value
return self.get_url('.index_view', **kwargs)
# Actions
def is_action_allowed(self, name):
"""
Override this method to allow or disallow actions based
on some condition.
The default implementation only checks if the particular action
is not in `action_disallowed_list`.
"""
return name not in self.action_disallowed_list
def _get_field_value(self, model, name):
"""
Get unformatted field value from the model
"""
return rec_getattr(model, name)
def _get_list_value(self, context, model, name, column_formatters,
column_type_formatters):
"""
Returns the value to be displayed.
:param context:
:py:class:`jinja2.runtime.Context` if available
:param model:
Model instance
:param name:
Field name
:param column_formatters:
column_formatters to be used.
:param column_type_formatters:
column_type_formatters to be used.
"""
column_fmt = column_formatters.get(name)
if column_fmt is not None:
value = column_fmt(self, context, model, name)
else:
value = self._get_field_value(model, name)
choices_map = self._column_choices_map.get(name, {})
if choices_map:
return choices_map.get(value) or value
type_fmt = None
for typeobj, formatter in column_type_formatters.items():
if isinstance(value, typeobj):
type_fmt = formatter
break
if type_fmt is not None:
value = type_fmt(self, value)
return value
@contextfunction
def get_list_value(self, context, model, name):
"""
Returns the value to be displayed in the list view
:param context:
:py:class:`jinja2.runtime.Context`
:param model:
Model instance
:param name:
Field name
"""
return self._get_list_value(
context,
model,
name,
self.column_formatters,
self.column_type_formatters,
)
def get_export_value(self, model, name):
"""
Returns the value to be displayed in export.
Allows export to use different (non HTML) formatters.
:param model:
Model instance
:param name:
Field name
"""
return self._get_list_value(
None,
model,
name,
self.column_formatters_export,
self.column_type_formatters_export,
)
def get_export_name(self, export_type='csv'):
"""
:return: The exported csv file name.
"""
filename = '%s_%s.%s' % (self.name,
time.strftime("%Y-%m-%d_%H-%M-%S"),
export_type)
return filename
# AJAX references
def _process_ajax_references(self):
"""
Process `form_ajax_refs` and generate model loaders that
will be used by the `ajax_lookup` view.
"""
result = {}
if self.form_ajax_refs:
for name, options in iteritems(self.form_ajax_refs):
if isinstance(options, dict):
result[name] = self._create_ajax_loader(name, options)
elif isinstance(options, AjaxModelLoader):
result[name] = options
else:
raise ValueError('%s.form_ajax_refs can not handle %s types' % (self, type(options)))
return result
def _create_ajax_loader(self, name, options):
"""
Model backend will override this to implement AJAX model loading.
"""
raise NotImplementedError()
# Views
@expose('/')
def index_view(self):
"""
List view
"""
if self.can_delete:
delete_form = self.delete_form()
else:
delete_form = None
# Grab parameters from URL
view_args = self._get_list_extra_args()
# Map column index to column name
sort_column = self._get_column_by_idx(view_args.sort)
if sort_column is not None:
sort_column = sort_column[0]
# Get count and data
count, data = self.get_list(view_args.page, sort_column, view_args.sort_desc,
view_args.search, view_args.filters)
list_forms = {}
if self.column_editable_list:
for row in data:
list_forms[self.get_pk_value(row)] = self.list_form(obj=row)
# Calculate number of pages
if count is not None:
num_pages = count // self.page_size
if count % self.page_size != 0:
num_pages += 1
else:
num_pages = None
# Various URL generation helpers
def pager_url(p):
# Do not add page number if it is first page
if p == 0:
p = None
return self._get_list_url(view_args.clone(page=p))
def sort_url(column, invert=False):
desc = None
if invert and not view_args.sort_desc:
desc = 1
return self._get_list_url(view_args.clone(sort=column, sort_desc=desc))
# Actions
actions, actions_confirmation = self.get_actions_list()
clear_search_url = self._get_list_url(view_args.clone(page=0,
sort=view_args.sort,
sort_desc=view_args.sort_desc,
search=None,
filters=None))
return self.render(
self.list_template,
data=data,
list_forms=list_forms,
delete_form=delete_form,
# List
list_columns=self._list_columns,
sortable_columns=self._sortable_columns,
editable_columns=self.column_editable_list,
list_row_actions=self.get_list_row_actions(),
# Pagination
count=count,
pager_url=pager_url,
num_pages=num_pages,
page=view_args.page,
page_size=self.page_size,
# Sorting
sort_column=view_args.sort,
sort_desc=view_args.sort_desc,
sort_url=sort_url,
# Search
search_supported=self._search_supported,
clear_search_url=clear_search_url,
search=view_args.search,
# Filters
filters=self._filters,
filter_groups=self._get_filter_groups(),
active_filters=view_args.filters,
# Actions
actions=actions,
actions_confirmation=actions_confirmation,
# Misc
enumerate=enumerate,
get_pk_value=self.get_pk_value,
get_value=self.get_list_value,
return_url=self._get_list_url(view_args),
)
@expose('/new/', methods=('GET', 'POST'))
def create_view(self):
"""
Create model view
"""
return_url = get_redirect_target() or self.get_url('.index_view')
if not self.can_create:
return redirect(return_url)
form = self.create_form()
if not hasattr(form, '_validated_ruleset') or not form._validated_ruleset:
self._validate_form_instance(ruleset=self._form_create_rules, form=form)
if self.validate_form(form):
# in versions 1.1.0 and before, this returns a boolean
# in later versions, this is the model itself
model = self.create_model(form)
if model:
flash(gettext('Record was successfully created.'))
if '_add_another' in request.form:
return redirect(request.url)
elif '_continue_editing' in request.form:
# if we have a valid model, try to go to the edit view
if model is not True:
url = self.get_url('.edit_view', id=self.get_pk_value(model), url=return_url)
else:
url = return_url
return redirect(url)
else:
# save button
return redirect(self.get_save_return_url(model, is_created=True))
form_opts = FormOpts(widget_args=self.form_widget_args,
form_rules=self._form_create_rules)
if self.create_modal and request.args.get('modal'):
template = self.create_modal_template
else:
template = self.create_template
return self.render(template,
form=form,
form_opts=form_opts,
return_url=return_url)
@expose('/edit/', methods=('GET', 'POST'))
def edit_view(self):
"""
Edit model view
"""
return_url = get_redirect_target() or self.get_url('.index_view')
if not self.can_edit:
return redirect(return_url)
id = get_mdict_item_or_list(request.args, 'id')
if id is None:
return redirect(return_url)
model = self.get_one(id)
if model is None:
flash(gettext('Record does not exist.'))
return redirect(return_url)
form = self.edit_form(obj=model)
if not hasattr(form, '_validated_ruleset') or not form._validated_ruleset:
self._validate_form_instance(ruleset=self._form_edit_rules, form=form)
if self.validate_form(form):
if self.update_model(form, model):
flash(gettext('Record was successfully saved.'))
if '_add_another' in request.form:
return redirect(self.get_url('.create_view', url=return_url))
elif '_continue_editing' in request.form:
return redirect(request.url)
else:
# save button
return redirect(self.get_save_return_url(model, is_created=False))
if request.method == 'GET':
self.on_form_prefill(form, id)
form_opts = FormOpts(widget_args=self.form_widget_args,
form_rules=self._form_edit_rules)
if self.edit_modal and request.args.get('modal'):
template = self.edit_modal_template
else:
template = self.edit_template
return self.render(template,
model=model,
form=form,
form_opts=form_opts,
return_url=return_url)
@expose('/details/')
def details_view(self):
"""
Details model view
"""
return_url = get_redirect_target() or self.get_url('.index_view')
if not self.can_view_details:
return redirect(return_url)
id = get_mdict_item_or_list(request.args, 'id')
if id is None:
return redirect(return_url)
model = self.get_one(id)
if model is None:
flash(gettext('Record does not exist.'))
return redirect(return_url)
if self.details_modal and request.args.get('modal'):
template = self.details_modal_template
else:
template = self.details_template
return self.render(template,
model=model,
details_columns=self._details_columns,
get_value=self.get_list_value,
return_url=return_url)
@expose('/delete/', methods=('POST',))
def delete_view(self):
"""
Delete model view. Only POST method is allowed.
"""
return_url = get_redirect_target() or self.get_url('.index_view')
if not self.can_delete:
return redirect(return_url)
form = self.delete_form()
if self.validate_form(form):
# id is InputRequired()
id = form.id.data
model = self.get_one(id)
if model is None:
flash(gettext('Record does not exist.'))
return redirect(return_url)
# message is flashed from within delete_model if it fails
if self.delete_model(model):
flash(gettext('Record was successfully deleted.'))
return redirect(return_url)
else:
flash_errors(form, message='Failed to delete record. %(error)s')
return redirect(return_url)
@expose('/action/', methods=('POST',))
def action_view(self):
"""
Mass-model action view.
"""
return self.handle_action()
def _export_data(self):
# Macros in column_formatters are not supported.
# Macros will have a function name 'inner'
# This causes non-macro functions named 'inner' not work.
for col, func in iteritems(self.column_formatters_export):
# skip checking columns not being exported
if col not in [col for col, _ in self._export_columns]:
continue
if func.__name__ == 'inner':
raise NotImplementedError(
'Macros are not implemented in export. Exclude column in'
' column_formatters_export, column_export_list, or '
' column_export_exclude_list. Column: %s' % (col,)
)
# Grab parameters from URL
view_args = self._get_list_extra_args()
# Map column index to column name
sort_column = self._get_column_by_idx(view_args.sort)
if sort_column is not None:
sort_column = sort_column[0]
# Get count and data
count, data = self.get_list(0, sort_column, view_args.sort_desc,
view_args.search, view_args.filters,
page_size=self.export_max_rows)
return count, data
@expose('/export/<export_type>/')
def export(self, export_type):
return_url = get_redirect_target() or self.get_url('.index_view')
if not self.can_export or (export_type not in self.export_types):
flash(gettext('Permission denied.'))
return redirect(return_url)
if export_type == 'csv':
return self._export_csv(return_url)
else:
return self._export_tablib(export_type, return_url)
def _export_csv(self, return_url):
"""
Export a CSV of records as a stream.
"""
count, data = self._export_data()
# https://docs.djangoproject.com/en/1.8/howto/outputting-csv/
class Echo(object):
"""
An object that implements just the write method of the file-like
interface.
"""
def write(self, value):
"""
Write the value by returning it, instead of storing
in a buffer.
"""
return value
writer = csv.writer(Echo())
def generate():
# Append the column titles at the beginning
titles = [csv_encode(c[1]) for c in self._export_columns]
yield writer.writerow(titles)
for row in data:
vals = [csv_encode(self.get_export_value(row, c[0]))
for c in self._export_columns]
yield writer.writerow(vals)
filename = self.get_export_name(export_type='csv')
disposition = 'attachment;filename=%s' % (secure_filename(filename),)
return Response(
stream_with_context(generate()),
headers={'Content-Disposition': disposition},
mimetype='text/csv'
)
def _export_tablib(self, export_type, return_url):
"""
Exports a variety of formats using the tablib library.
"""
if tablib is None:
flash(gettext('Tablib dependency not installed.'))
return redirect(return_url)
filename = self.get_export_name(export_type)
disposition = 'attachment;filename=%s' % (secure_filename(filename),)
mimetype, encoding = mimetypes.guess_type(filename)
if not mimetype:
mimetype = 'application/octet-stream'
if encoding:
mimetype = '%s; charset=%s' % (mimetype, encoding)
ds = tablib.Dataset(headers=[c[1] for c in self._export_columns])
count, data = self._export_data()
for row in data:
vals = [self.get_export_value(row, c[0]) for c in self._export_columns]
ds.append(vals)
try:
try:
response_data = ds.export(format=export_type)
except AttributeError:
response_data = getattr(ds, export_type)
except (AttributeError, tablib.UnsupportedFormat):
flash(gettext('Export type "%(type)s not supported.',
type=export_type))
return redirect(return_url)
return Response(
response_data,
headers={'Content-Disposition': disposition},
mimetype=mimetype,
)
@expose('/ajax/lookup/')
def ajax_lookup(self):
name = request.args.get('name')
query = request.args.get('query')
offset = request.args.get('offset', type=int)
limit = request.args.get('limit', 10, type=int)
loader = self._form_ajax_refs.get(name)
if not loader:
abort(404)
data = [loader.format(m) for m in loader.get_list(query, offset, limit)]
return Response(json.dumps(data), mimetype='application/json')
@expose('/ajax/update/', methods=('POST',))
def ajax_update(self):
"""
Edits a single column of a record in list view.
"""
if not self.column_editable_list:
abort(404)
form = self.list_form()
# prevent validation issues due to submitting a single field
# delete all fields except the submitted fields and csrf token
for field in list(form):
if (field.name in request.form) or (field.name == 'csrf_token'):
pass
else:
form.__delitem__(field.name)
if self.validate_form(form):
pk = form.list_form_pk.data
record = self.get_one(pk)
if record is None:
return gettext('Record does not exist.'), 500
if self.update_model(form, record):
# Success
return gettext('Record was successfully saved.')
else:
# Error: No records changed, or problem saving to database.
msgs = ", ".join([msg for msg in get_flashed_messages()])
return gettext('Failed to update record. %(error)s',
error=msgs), 500
else:
for field in form:
for error in field.errors:
# return validation error to x-editable
if isinstance(error, list):
return gettext('Failed to update record. %(error)s',
error=", ".join(error)), 500
else:
return gettext('Failed to update record. %(error)s',
error=error), 500
|
jschneier/flask-admin
|
flask_admin/model/base.py
|
Python
|
bsd-3-clause
| 73,203
|
# -*- coding: utf-8 -*-
# Copyright (c) 2013, Mayo Clinic
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# Neither the name of the Mayo Clinic nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
__all__ = ['check_digit', 'jsonutils' 'kwutil' 'lfu_cache', 'link', 'listutils', 'sctid', 'sctid_generator' 'urlutil', 'xmlutils']
|
cts2/rf2db
|
rf2db/utils/__init__.py
|
Python
|
bsd-3-clause
| 1,704
|
"""
:Copyright: 2006-2021 Jochen Kupperschmidt
:License: Revised BSD (see `LICENSE` file for details)
"""
from datetime import date, datetime
import pytest
from byceps.services.party import service
from byceps.services.party.transfer.models import Party
@pytest.mark.parametrize(
'starts_at, ends_at, expected',
[
(
datetime(2020, 8, 22, 9, 30, 0),
datetime(2020, 8, 22, 23, 30, 0),
[
date(2020, 8, 22),
]
),
(
datetime(2020, 3, 16, 14, 0, 0),
datetime(2020, 3, 18, 11, 0, 0),
[
date(2020, 3, 16),
date(2020, 3, 17),
date(2020, 3, 18),
]
),
(
datetime(2020, 12, 10, 17, 0, 0),
datetime(2020, 12, 13, 14, 0, 0),
[
date(2020, 12, 10),
date(2020, 12, 11),
date(2020, 12, 12),
date(2020, 12, 13),
],
),
],
)
def test_get_party_days(starts_at, ends_at, expected):
party = create_party(starts_at, ends_at)
assert service.get_party_days(party) == expected
# helpers
def create_party(starts_at: datetime, ends_at: datetime) -> Party:
return Party(
'anylan-20',
'anylan',
'AnyLAN #20',
starts_at,
ends_at,
0,
False,
False,
False,
False,
)
|
homeworkprod/byceps
|
tests/unit/services/party/test_get_party_days.py
|
Python
|
bsd-3-clause
| 1,468
|
# -*- coding: utf-8 -*-
"""
Definition of a hierarchy of classes for kernel functions to be used
in convolution, e.g., for data smoothing (low pass filtering) or
firing rate estimation.
Symmetric kernels
~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: toctree/kernels/
RectangularKernel
TriangularKernel
EpanechnikovLikeKernel
GaussianKernel
LaplacianKernel
Asymmetric kernels
~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: toctree/kernels/
ExponentialKernel
AlphaKernel
Examples
--------
>>> import quantities as pq
>>> kernel1 = GaussianKernel(sigma=100*pq.ms)
>>> kernel2 = ExponentialKernel(sigma=8*pq.ms, invert=True)
:copyright: Copyright 2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import math
import numpy as np
import quantities as pq
import scipy.optimize
import scipy.special
import scipy.stats
from elephant.utils import deprecated_alias
__all__ = [
'RectangularKernel', 'TriangularKernel', 'EpanechnikovLikeKernel',
'GaussianKernel', 'LaplacianKernel', 'ExponentialKernel', 'AlphaKernel'
]
class Kernel(object):
r"""
This is the base class for commonly used kernels.
**General definition of a kernel:**
A function :math:`K(x, y)` is called a kernel function if
:math:`\int{K(x, y) g(x) g(y) \textrm{d}x \textrm{d}y} \ \geq 0 \quad
\forall g \in L_2`
**Currently implemented kernels are:**
* rectangular
* triangular
* epanechnikovlike
* gaussian
* laplacian
* exponential (asymmetric)
* alpha function (asymmetric)
In neuroscience, a popular application of kernels is in performing
smoothing operations via convolution. In this case, the kernel has the
properties of a probability density, i.e., it is positive and normalized
to one. Popular choices are the rectangular or Gaussian kernels.
Exponential and alpha kernels may also be used to represent the
postsynaptic current/potentials in a linear (current-based) model.
Parameters
----------
sigma : pq.Quantity
Standard deviation of the kernel.
invert : bool, optional
If True, asymmetric kernels (e.g., exponential or alpha kernels) are
inverted along the time axis.
Default: False.
Raises
------
TypeError
If `sigma` is not `pq.Quantity`.
If `sigma` is negative.
If `invert` is not `bool`.
"""
def __init__(self, sigma, invert=False):
if not isinstance(sigma, pq.Quantity):
raise TypeError("'sigma' must be a quantity")
if sigma.magnitude < 0:
raise ValueError("'sigma' cannot be negative")
if not isinstance(invert, bool):
raise ValueError("'invert' must be bool")
self.sigma = sigma
self.invert = invert
def __repr__(self):
return "{cls}(sigma={sigma}, invert={invert})".format(
cls=self.__class__.__name__, sigma=self.sigma, invert=self.invert)
@deprecated_alias(t='times')
def __call__(self, times):
"""
Evaluates the kernel at all points in the array `times`.
Parameters
----------
times : pq.Quantity
A vector with time intervals on which the kernel is evaluated.
Returns
-------
pq.Quantity
Vector with the result of the kernel evaluations.
Raises
------
TypeError
If `times` is not `pq.Quantity`.
If the dimensionality of `times` and :attr:`sigma` are different.
"""
self._check_time_input(times)
return self._evaluate(times)
def _evaluate(self, times):
"""
Evaluates the kernel Probability Density Function, PDF.
Parameters
----------
times : pq.Quantity
Vector with the interval on which the kernel is evaluated, not
necessarily a time interval.
Returns
-------
pq.Quantity
Vector with the result of the kernel evaluation.
"""
raise NotImplementedError(
"The Kernel class should not be used directly, "
"instead the subclasses for the single kernels.")
def boundary_enclosing_area_fraction(self, fraction):
"""
Calculates the boundary :math:`b` so that the integral from
:math:`-b` to :math:`b` encloses a certain fraction of the
integral over the complete kernel.
By definition the returned value is hence non-negative, even if the
whole probability mass of the kernel is concentrated over negative
support for inverted kernels.
Parameters
----------
fraction : float
Fraction of the whole area which has to be enclosed.
Returns
-------
pq.Quantity
Boundary of the kernel containing area `fraction` under the
kernel density.
Raises
------
ValueError
If `fraction` was chosen too close to one, such that in
combination with integral approximation errors the calculation of
a boundary was not possible.
"""
raise NotImplementedError(
"The Kernel class should not be used directly, "
"instead the subclasses for the single kernels.")
def _check_fraction(self, fraction):
"""
Checks the input variable of the method
:attr:`boundary_enclosing_area_fraction` for validity of type and
value.
Parameters
----------
fraction : float or int
Fraction of the area under the kernel function.
Raises
------
TypeError
If `fraction` is neither a float nor an int.
If `fraction` is not in the interval [0, 1).
"""
if not isinstance(fraction, (float, int)):
raise TypeError("`fraction` must be float or integer")
if isinstance(self, (TriangularKernel, RectangularKernel)):
valid = 0 <= fraction <= 1
bracket = ']'
else:
valid = 0 <= fraction < 1
bracket = ')'
if not valid:
raise ValueError("`fraction` must be in the interval "
"[0, 1{}".format(bracket))
def _check_time_input(self, t):
if not isinstance(t, pq.Quantity):
raise TypeError("The argument 't' of the kernel callable must be "
"of type Quantity")
if t.dimensionality.simplified != self.sigma.dimensionality.simplified:
raise TypeError("The dimensionality of sigma and the input array "
"to the callable kernel object must be the same. "
"Otherwise a normalization to 1 of the kernel "
"cannot be performed.")
@deprecated_alias(t='time')
def cdf(self, time):
r"""
Cumulative Distribution Function, CDF.
Parameters
----------
time : pq.Quantity
The input time scalar.
Returns
-------
float
CDF at `time`.
"""
raise NotImplementedError
def icdf(self, fraction):
r"""
Inverse Cumulative Distribution Function, ICDF, also known as a
quantile.
Parameters
----------
fraction : float
The fraction of CDF to compute the quantile from.
Returns
-------
pq.Quantity
The time scalar `times` such that `CDF(t) = fraction`.
"""
raise NotImplementedError
@deprecated_alias(t='times')
def median_index(self, times):
r"""
Estimates the index of the Median of the kernel.
We define the Median index :math:`i` of a kernel as:
.. math::
t_i = \text{ICDF}\left( \frac{\text{CDF}(t_0) +
\text{CDF}(t_{N-1})}{2} \right)
where :math:`t_0` and :math:`t_{N-1}` are the first and last entries of
the input array, CDF and ICDF stand for Cumulative Distribution
Function and its Inverse, respectively.
This function is not mandatory for symmetrical kernels but it is
required when asymmetrical kernels have to be aligned at their median.
Parameters
----------
times : pq.Quantity
Vector with the interval on which the kernel is evaluated.
Returns
-------
int
Index of the estimated value of the kernel median.
Raises
------
TypeError
If the input array is not a time pq.Quantity array.
ValueError
If the input array is empty.
If the input array is not sorted.
See Also
--------
Kernel.cdf : cumulative distribution function
Kernel.icdf : inverse cumulative distribution function
"""
self._check_time_input(times)
if len(times) == 0:
raise ValueError("The input time array is empty.")
if len(times) <= 2:
# either left or right; choose left
return 0
is_sorted = (np.diff(times.magnitude) >= 0).all()
if not is_sorted:
raise ValueError("The input time array must be sorted (in "
"ascending order).")
cdf_mean = 0.5 * (self.cdf(times[0]) + self.cdf(times[-1]))
if cdf_mean == 0.:
# any index of the kernel non-support is valid; choose median
return len(times) // 2
icdf = self.icdf(fraction=cdf_mean)
icdf = icdf.rescale(times.units).magnitude
# icdf is guaranteed to be in (t_start, t_end) interval
median_index = np.nonzero(times.magnitude >= icdf)[0][0]
return median_index
def is_symmetric(self):
r"""
True for symmetric kernels and False otherwise (asymmetric kernels).
A kernel is symmetric if its PDF is symmetric w.r.t. time:
.. math::
\text{pdf}(-t) = \text{pdf}(t)
Returns
-------
bool
Whether the kernels is symmetric or not.
"""
return isinstance(self, SymmetricKernel)
@property
def min_cutoff(self):
"""
Half width of the kernel.
Returns
-------
float
The returned value varies according to the kernel type.
"""
raise NotImplementedError
class SymmetricKernel(Kernel):
"""
Base class for symmetric kernels.
"""
class RectangularKernel(SymmetricKernel):
r"""
Class for rectangular kernels.
.. math::
K(t) = \left\{\begin{array}{ll} \frac{1}{2 \tau}, & |t| < \tau \\
0, & |t| \geq \tau \end{array} \right.
with :math:`\tau = \sqrt{3} \sigma` corresponding to the half width
of the kernel.
The parameter `invert` has no effect on symmetric kernels.
Examples
--------
.. plot::
:include-source:
from elephant import kernels
import quantities as pq
import numpy as np
import matplotlib.pyplot as plt
time_array = np.linspace(-3, 3, num=100) * pq.s
kernel = kernels.RectangularKernel(sigma=1*pq.s)
kernel_time = kernel(time_array)
plt.plot(time_array, kernel_time)
plt.title("RectangularKernel with sigma=1s")
plt.xlabel("time, s")
plt.ylabel("kernel, 1/s")
plt.show()
"""
@property
def min_cutoff(self):
min_cutoff = np.sqrt(3.0)
return min_cutoff
def _evaluate(self, times):
t_units = times.units
t_abs = np.abs(times.magnitude)
tau = math.sqrt(3) * self.sigma.rescale(t_units).magnitude
kernel = (t_abs < tau) * 1 / (2 * tau)
kernel = pq.Quantity(kernel, units=1 / t_units)
return kernel
@deprecated_alias(t='time')
def cdf(self, time):
self._check_time_input(time)
tau = math.sqrt(3) * self.sigma.rescale(time.units).magnitude
time = np.clip(time.magnitude, a_min=-tau, a_max=tau)
cdf = (time + tau) / (2 * tau)
return cdf
def icdf(self, fraction):
self._check_fraction(fraction)
tau = math.sqrt(3) * self.sigma
icdf = tau * (2 * fraction - 1)
return icdf
def boundary_enclosing_area_fraction(self, fraction):
self._check_fraction(fraction)
return np.sqrt(3.0) * self.sigma * fraction
class TriangularKernel(SymmetricKernel):
r"""
Class for triangular kernels.
.. math::
K(t) = \left\{ \begin{array}{ll} \frac{1}{\tau} (1
- \frac{|t|}{\tau}), & |t| < \tau \\
0, & |t| \geq \tau \end{array} \right.
with :math:`\tau = \sqrt{6} \sigma` corresponding to the half width of
the kernel.
The parameter `invert` has no effect on symmetric kernels.
Examples
--------
.. plot::
:include-source:
from elephant import kernels
import quantities as pq
import numpy as np
import matplotlib.pyplot as plt
time_array = np.linspace(-3, 3, num=1000) * pq.s
kernel = kernels.TriangularKernel(sigma=1*pq.s)
kernel_time = kernel(time_array)
plt.plot(time_array, kernel_time)
plt.title("TriangularKernel with sigma=1s")
plt.xlabel("time, s")
plt.ylabel("kernel, 1/s")
plt.show()
"""
@property
def min_cutoff(self):
min_cutoff = np.sqrt(6.0)
return min_cutoff
def _evaluate(self, times):
tau = math.sqrt(6) * self.sigma.rescale(times.units).magnitude
kernel = scipy.stats.triang.pdf(times.magnitude, c=0.5, loc=-tau,
scale=2 * tau)
kernel = pq.Quantity(kernel, units=1 / times.units)
return kernel
@deprecated_alias(t='time')
def cdf(self, time):
self._check_time_input(time)
tau = math.sqrt(6) * self.sigma.rescale(time.units).magnitude
cdf = scipy.stats.triang.cdf(time.magnitude, c=0.5, loc=-tau,
scale=2 * tau)
return cdf
def icdf(self, fraction):
self._check_fraction(fraction)
tau = math.sqrt(6) * self.sigma.magnitude
icdf = scipy.stats.triang.ppf(fraction, c=0.5, loc=-tau, scale=2 * tau)
return icdf * self.sigma.units
def boundary_enclosing_area_fraction(self, fraction):
self._check_fraction(fraction)
return np.sqrt(6.0) * self.sigma * (1 - np.sqrt(1 - fraction))
class EpanechnikovLikeKernel(SymmetricKernel):
r"""
Class for Epanechnikov-like kernels.
.. math::
K(t) = \left\{\begin{array}{ll} (3 /(4 d)) (1 - (t / d)^2),
& |t| < d \\
0, & |t| \geq d \end{array} \right.
with :math:`d = \sqrt{5} \sigma` being the half width of the kernel.
The Epanechnikov kernel under full consideration of its axioms has a half
width of :math:`\sqrt{5}`. Ignoring one axiom also the respective kernel
with half width = 1 can be called Epanechnikov kernel [1]_.
However, arbitrary width of this type of kernel is here preferred to be
called 'Epanechnikov-like' kernel.
The parameter `invert` has no effect on symmetric kernels.
References
----------
.. [1] https://de.wikipedia.org/wiki/Epanechnikov-Kern
Examples
--------
.. plot::
:include-source:
from elephant import kernels
import quantities as pq
import numpy as np
import matplotlib.pyplot as plt
time_array = np.linspace(-3, 3, num=100) * pq.s
kernel = kernels.EpanechnikovLikeKernel(sigma=1*pq.s)
kernel_time = kernel(time_array)
plt.plot(time_array, kernel_time)
plt.title("EpanechnikovLikeKernel with sigma=1s")
plt.xlabel("time, s")
plt.ylabel("kernel, 1/s")
plt.show()
"""
@property
def min_cutoff(self):
min_cutoff = np.sqrt(5.0)
return min_cutoff
def _evaluate(self, times):
tau = math.sqrt(5) * self.sigma.rescale(times.units).magnitude
t_div_tau = np.clip(times.magnitude / tau, a_min=-1, a_max=1)
kernel = 3. / (4. * tau) * np.maximum(0., 1 - t_div_tau ** 2)
kernel = pq.Quantity(kernel, units=1 / times.units)
return kernel
@deprecated_alias(t='time')
def cdf(self, time):
self._check_time_input(time)
tau = math.sqrt(5) * self.sigma.rescale(time.units).magnitude
t_div_tau = np.clip(time.magnitude / tau, a_min=-1, a_max=1)
cdf = 3. / 4 * (t_div_tau - t_div_tau ** 3 / 3.) + 0.5
return cdf
def icdf(self, fraction):
self._check_fraction(fraction)
# CDF(t) = -1/4 t^3 + 3/4 t + 1/2
coefs = [-1. / 4, 0, 3. / 4, 0.5 - fraction]
roots = np.roots(coefs)
icdf = next(root for root in roots if -1 <= root <= 1)
tau = math.sqrt(5) * self.sigma
return icdf * tau
def boundary_enclosing_area_fraction(self, fraction):
r"""
Refer to :func:`Kernel.boundary_enclosing_area_fraction` for the
documentation.
Notes
-----
For Epanechnikov-like kernels, integration of its density within
the boundaries 0 and :math:`b`, and then solving for :math:`b` leads
to the problem of finding the roots of a polynomial of third order.
The implemented formulas are based on the solution of this problem
given in [1]_, where the following 3 solutions are given:
* :math:`u_1 = 1`, solution on negative side;
* :math:`u_2 = \frac{-1 + i\sqrt{3}}{2}`, solution for larger
values than zero crossing of the density;
* :math:`u_3 = \frac{-1 - i\sqrt{3}}{2}`, solution for smaller
values than zero crossing of the density.
The solution :math:`u_3` is the relevant one for the problem at hand,
since it involves only positive area contributions.
References
----------
.. [1] https://en.wikipedia.org/wiki/Cubic_function
"""
self._check_fraction(fraction)
# Python's complex-operator cannot handle quantities, hence the
# following construction on quantities is necessary:
Delta_0 = complex(1.0 / (5.0 * self.sigma.magnitude ** 2), 0) / \
self.sigma.units ** 2
Delta_1 = complex(2.0 * np.sqrt(5.0) * fraction /
(25.0 * self.sigma.magnitude ** 3), 0) / \
self.sigma.units ** 3
C = ((Delta_1 + (Delta_1 ** 2.0 - 4.0 * Delta_0 ** 3.0) ** (
1.0 / 2.0)) /
2.0) ** (1.0 / 3.0)
u_3 = complex(-1.0 / 2.0, -np.sqrt(3.0) / 2.0)
b = -5.0 * self.sigma ** 2 * (u_3 * C + Delta_0 / (u_3 * C))
return b.real
class GaussianKernel(SymmetricKernel):
r"""
Class for gaussian kernels.
.. math::
K(t) = (\frac{1}{\sigma \sqrt{2 \pi}}) \exp(-\frac{t^2}{2 \sigma^2})
with :math:`\sigma` being the standard deviation.
The parameter `invert` has no effect on symmetric kernels.
Examples
--------
.. plot::
:include-source:
from elephant import kernels
import quantities as pq
import numpy as np
import matplotlib.pyplot as plt
time_array = np.linspace(-3, 3, num=100) * pq.s
kernel = kernels.GaussianKernel(sigma=1*pq.s)
kernel_time = kernel(time_array)
plt.plot(time_array, kernel_time)
plt.title("GaussianKernel with sigma=1s")
plt.xlabel("time, s")
plt.ylabel("kernel, 1/s")
plt.show()
"""
@property
def min_cutoff(self):
min_cutoff = 3.0
return min_cutoff
def _evaluate(self, times):
sigma = self.sigma.rescale(times.units).magnitude
kernel = scipy.stats.norm.pdf(times.magnitude, loc=0, scale=sigma)
kernel = pq.Quantity(kernel, units=1 / times.units)
return kernel
@deprecated_alias(t='time')
def cdf(self, time):
self._check_time_input(time)
sigma = self.sigma.rescale(time.units).magnitude
cdf = scipy.stats.norm.cdf(time, loc=0, scale=sigma)
return cdf
def icdf(self, fraction):
self._check_fraction(fraction)
icdf = scipy.stats.norm.ppf(fraction, loc=0,
scale=self.sigma.magnitude)
return icdf * self.sigma.units
def boundary_enclosing_area_fraction(self, fraction):
self._check_fraction(fraction)
return self.sigma * np.sqrt(2.0) * scipy.special.erfinv(fraction)
class LaplacianKernel(SymmetricKernel):
r"""
Class for laplacian kernels.
.. math::
K(t) = \frac{1}{2 \tau} \exp\left(-\left|\frac{t}{\tau}\right|\right)
with :math:`\tau = \sigma / \sqrt{2}`.
The parameter `invert` has no effect on symmetric kernels.
Examples
--------
.. plot::
:include-source:
from elephant import kernels
import quantities as pq
import numpy as np
import matplotlib.pyplot as plt
time_array = np.linspace(-3, 3, num=1000) * pq.s
kernel = kernels.LaplacianKernel(sigma=1*pq.s)
kernel_time = kernel(time_array)
plt.plot(time_array, kernel_time)
plt.title("LaplacianKernel with sigma=1s")
plt.xlabel("time, s")
plt.ylabel("kernel, 1/s")
plt.show()
"""
@property
def min_cutoff(self):
min_cutoff = 3.0
return min_cutoff
def _evaluate(self, times):
tau = self.sigma.rescale(times.units).magnitude / math.sqrt(2)
kernel = scipy.stats.laplace.pdf(times.magnitude, loc=0, scale=tau)
kernel = pq.Quantity(kernel, units=1 / times.units)
return kernel
@deprecated_alias(t='time')
def cdf(self, time):
self._check_time_input(time)
tau = self.sigma.rescale(time.units).magnitude / math.sqrt(2)
cdf = scipy.stats.laplace.cdf(time.magnitude, loc=0, scale=tau)
return cdf
def icdf(self, fraction):
self._check_fraction(fraction)
tau = self.sigma.magnitude / math.sqrt(2)
icdf = scipy.stats.laplace.ppf(fraction, loc=0, scale=tau)
return icdf * self.sigma.units
def boundary_enclosing_area_fraction(self, fraction):
self._check_fraction(fraction)
return -self.sigma * np.log(1.0 - fraction) / np.sqrt(2.0)
# Potential further symmetric kernels from Wiki Kernels (statistics):
# Quartic (biweight), Triweight, Tricube, Cosine, Logistics, Silverman
class ExponentialKernel(Kernel):
r"""
Class for exponential kernels.
.. math::
K(t) = \left\{\begin{array}{ll} (1 / \tau) \exp{(-t / \tau)},
& t > 0 \\
0, & t \leq 0 \end{array} \right.
with :math:`\tau = \sigma`.
Examples
--------
.. plot::
:include-source:
from elephant import kernels
import quantities as pq
import numpy as np
import matplotlib.pyplot as plt
time_array = np.linspace(-1, 4, num=100) * pq.s
kernel = kernels.ExponentialKernel(sigma=1*pq.s)
kernel_time = kernel(time_array)
plt.plot(time_array, kernel_time)
plt.title("ExponentialKernel with sigma=1s")
plt.xlabel("time, s")
plt.ylabel("kernel, 1/s")
plt.show()
"""
@property
def min_cutoff(self):
min_cutoff = 3.0
return min_cutoff
def _evaluate(self, times):
tau = self.sigma.rescale(times.units).magnitude
if self.invert:
times = -times
kernel = scipy.stats.expon.pdf(times.magnitude, loc=0, scale=tau)
kernel = pq.Quantity(kernel, units=1 / times.units)
return kernel
@deprecated_alias(t='time')
def cdf(self, time):
self._check_time_input(time)
tau = self.sigma.rescale(time.units).magnitude
time = time.magnitude
if self.invert:
time = np.minimum(time, 0)
return np.exp(time / tau)
time = np.maximum(time, 0)
return 1. - np.exp(-time / tau)
def icdf(self, fraction):
self._check_fraction(fraction)
if self.invert:
return self.sigma * np.log(fraction)
return -self.sigma * np.log(1.0 - fraction)
def boundary_enclosing_area_fraction(self, fraction):
# the boundary b, which encloses a 'fraction' of CDF in [-b, b] range,
# does not depend on the invert, if the kernel is cut at zero.
# It's easier to compute 'b' for a kernel that has not been inverted.
kernel = self.__class__(sigma=self.sigma, invert=False)
return kernel.icdf(fraction)
class AlphaKernel(Kernel):
r"""
Class for alpha kernels.
.. math::
K(t) = \left\{\begin{array}{ll} (1 / \tau^2)
\ t\ \exp{(-t / \tau)}, & t > 0 \\
0, & t \leq 0 \end{array} \right.
with :math:`\tau = \sigma / \sqrt{2}`.
Examples
--------
.. plot::
:include-source:
from elephant import kernels
import quantities as pq
import numpy as np
import matplotlib.pyplot as plt
time_array = np.linspace(-1, 4, num=100) * pq.s
kernel = kernels.AlphaKernel(sigma=1*pq.s)
kernel_time = kernel(time_array)
plt.plot(time_array, kernel_time)
plt.title("AlphaKernel with sigma=1s")
plt.xlabel("time, s")
plt.ylabel("kernel, 1/s")
plt.show()
"""
@property
def min_cutoff(self):
min_cutoff = 3.0
return min_cutoff
def _evaluate(self, times):
t_units = times.units
tau = self.sigma.rescale(t_units).magnitude / math.sqrt(2)
times = times.magnitude
if self.invert:
times = -times
kernel = (times >= 0) * 1 / tau ** 2 * times * np.exp(-times / tau)
kernel = pq.Quantity(kernel, units=1 / t_units)
return kernel
@deprecated_alias(t='time')
def cdf(self, time):
self._check_time_input(time)
tau = self.sigma.rescale(time.units).magnitude / math.sqrt(2)
cdf = self._cdf_stripped(time.magnitude, tau)
return cdf
def _cdf_stripped(self, t, tau):
# CDF without time units
if self.invert:
t = np.minimum(t, 0)
return np.exp(t / tau) * (tau - t) / tau
t = np.maximum(t, 0)
return 1 - np.exp(-t / tau) * (t + tau) / tau
def icdf(self, fraction):
self._check_fraction(fraction)
tau = self.sigma.magnitude / math.sqrt(2)
def cdf(x):
# CDF fof the AlphaKernel, subtracted 'fraction'
# evaluates the error of the root of cdf(x) = fraction
return self._cdf_stripped(x, tau) - fraction
# fraction is a good starting point for CDF approximation
x0 = fraction if not self.invert else fraction - 1
x_quantile = scipy.optimize.fsolve(cdf, x0=x0, xtol=1e-7)[0]
x_quantile = pq.Quantity(x_quantile, units=self.sigma.units)
return x_quantile
def boundary_enclosing_area_fraction(self, fraction):
# the boundary b, which encloses a 'fraction' of CDF in [-b, b] range,
# does not depend on the invert, if the kernel is cut at zero.
# It's easier to compute 'b' for a kernel that has not been inverted.
kernel = self.__class__(sigma=self.sigma, invert=False)
return kernel.icdf(fraction)
|
alperyeg/elephant
|
elephant/kernels.py
|
Python
|
bsd-3-clause
| 27,594
|
#!/usr/bin/env python
######################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2010, Rice University
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the Rice University nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
######################################################################
# Author: Mark Moll, Ioan Sucan, Luis G. Torres
from sys import argv, exit
from os.path import basename, splitext, exists
import os
import sqlite3
import datetime
plottingEnabled=True
try:
import matplotlib
matplotlib.use('pdf')
from matplotlib import __version__ as matplotlibversion
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
import numpy as np
from math import floor
except:
print('Matplotlib or Numpy was not found; disabling plotting capabilities...')
plottingEnabled=False
from optparse import OptionParser, OptionGroup
# Given a text line, split it into tokens (by space) and return the token
# at the desired index. Additionally, test that some expected tokens exist.
# Return None if they do not.
def readLogValue(filevar, desired_token_index, expected_tokens) :
start_pos = filevar.tell()
tokens = filevar.readline().split()
for token_index in expected_tokens:
if not tokens[token_index] == expected_tokens[token_index]:
# undo the read, if we failed to parse.
filevar.seek(start_pos)
return None
return tokens[desired_token_index]
def readOptionalLogValue(filevar, desired_token_index, expected_tokens = {}) :
return readLogValue(filevar, desired_token_index, expected_tokens)
def readRequiredLogValue(name, filevar, desired_token_index, expected_tokens = {}) :
result = readLogValue(filevar, desired_token_index, expected_tokens)
if result == None:
raise Exception("Unable to read " + name)
return result
def ensurePrefix(line, prefix):
if not line.startswith(prefix):
raise Exception("Expected prefix " + prefix + " was not found")
return line
def readOptionalMultilineValue(filevar):
start_pos = filevar.tell()
line = filevar.readline()
if not line.startswith("<<<|"):
filevar.seek(start_pos)
return None
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line == None:
raise Exception("Expected token |>>> missing")
return value
def readRequiredMultilineValue(filevar):
ensurePrefix(filevar.readline(), "<<<|")
value = ''
line = filevar.readline()
while not line.startswith('|>>>'):
value = value + line
line = filevar.readline()
if line == None:
raise Exception("Expected token |>>> missing")
return value
def readBenchmarkLog(dbname, filenames, moveitformat):
"""Parse benchmark log files and store the parsed data in a sqlite3 database."""
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
# create all tables if they don't already exist
c.executescript("""CREATE TABLE IF NOT EXISTS experiments
(id INTEGER PRIMARY KEY AUTOINCREMENT, name VARCHAR(512),
totaltime REAL, timelimit REAL, memorylimit REAL, runcount INTEGER,
version VARCHAR(128), hostname VARCHAR(1024), cpuinfo TEXT,
date DATETIME, seed INTEGER, setup TEXT);
CREATE TABLE IF NOT EXISTS plannerConfigs
(id INTEGER PRIMARY KEY AUTOINCREMENT,
name VARCHAR(512) NOT NULL, settings TEXT);
CREATE TABLE IF NOT EXISTS enums
(name VARCHAR(512), value INTEGER, description TEXT,
PRIMARY KEY (name, value));
CREATE TABLE IF NOT EXISTS runs
(id INTEGER PRIMARY KEY AUTOINCREMENT, experimentid INTEGER, plannerid INTEGER,
FOREIGN KEY (experimentid) REFERENCES experiments(id) ON DELETE CASCADE,
FOREIGN KEY (plannerid) REFERENCES plannerConfigs(id) ON DELETE CASCADE);
CREATE TABLE IF NOT EXISTS progress
(runid INTEGER, time REAL, PRIMARY KEY (runid, time),
FOREIGN KEY (runid) REFERENCES runs(id) ON DELETE CASCADE)""")
for filename in filenames:
print('Processing ' + filename)
logfile = open(filename,'r')
start_pos = logfile.tell()
libname = readOptionalLogValue(logfile, 0, {1 : "version"})
if libname == None:
libname = "OMPL"
logfile.seek(start_pos)
version = readOptionalLogValue(logfile, -1, {1 : "version"})
if version == None:
# set the version number to make Planner Arena happy
version = "0.0.0"
version = ' '.join([libname, version])
expname = readRequiredLogValue("experiment name", logfile, -1, {0 : "Experiment"})
# optional experiment properties
nrexpprops = int(readOptionalLogValue(logfile, 0, {-2: "experiment", -1: "properties"}) or 0)
expprops = {}
for i in range(nrexpprops):
entry = logfile.readline().strip().split('=')
nameAndType = entry[0].split(' ')
expprops[nameAndType[0]] = (entry[1], nameAndType[1])
# adding columns to experiments table
c.execute('PRAGMA table_info(experiments)')
columnNames = [col[1] for col in c.fetchall()]
for name in sorted(expprops.keys()):
# only add column if it doesn't exist
if name not in columnNames:
c.execute('ALTER TABLE experiments ADD %s %s' % (name, expprops[name][1]))
hostname = readRequiredLogValue("hostname", logfile, -1, {0 : "Running"})
date = ' '.join(ensurePrefix(logfile.readline(), "Starting").split()[2:])
if moveitformat:
expsetup = readRequiredLogValue("goal name", logfile, -1, {0: "Goal", 1: "name"})
cpuinfo = None
rseed = 0
timelimit = float(readRequiredLogValue("time limit", logfile, 0, {-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = 0
else:
expsetup = readRequiredMultilineValue(logfile)
cpuinfo = readOptionalMultilineValue(logfile)
rseed = int(readRequiredLogValue("random seed", logfile, 0, {-2 : "random", -1 : "seed"}))
timelimit = float(readRequiredLogValue("time limit", logfile, 0, {-3 : "seconds", -2 : "per", -1 : "run"}))
memorylimit = float(readRequiredLogValue("memory limit", logfile, 0, {-3 : "MB", -2 : "per", -1 : "run"}))
nrrunsOrNone = readOptionalLogValue(logfile, 0, {-3 : "runs", -2 : "per", -1 : "planner"})
nrruns = -1
if nrrunsOrNone != None:
nrruns = int(nrrunsOrNone)
totaltime = float(readRequiredLogValue("total time", logfile, 0, {-3 : "collect", -2 : "the", -1 : "data"}))
numEnums = 0
numEnumsOrNone = readOptionalLogValue(logfile, 0, {-2 : "enum"})
if numEnumsOrNone != None:
numEnums = int(numEnumsOrNone)
for i in range(numEnums):
enum = logfile.readline()[:-1].split('|')
c.execute('SELECT * FROM enums WHERE name IS "%s"' % enum[0])
if c.fetchone() == None:
for j in range(len(enum)-1):
c.execute('INSERT INTO enums VALUES (?,?,?)',
(enum[0],j,enum[j+1]))
# Creating entry in experiments table
experimentEntries = [None, expname, totaltime, timelimit, memorylimit, nrruns, version,
hostname, cpuinfo, date, rseed, expsetup]
for name in sorted(expprops.keys()): # sort to ensure correct order
experimentEntries.append(expprops[name][0])
c.execute('INSERT INTO experiments VALUES (' + ','.join('?' for i in experimentEntries) + ')', experimentEntries)
experimentId = c.lastrowid
numPlanners = int(readRequiredLogValue("planner count", logfile, 0, {-1 : "planners"}))
for i in range(numPlanners):
plannerName = logfile.readline()[:-1]
print('Parsing data for ' + plannerName)
# read common data for planner
numCommon = int(logfile.readline().split()[0])
settings = ''
for j in range(numCommon):
settings = settings + logfile.readline() + ';'
# find planner id
c.execute('SELECT id FROM plannerConfigs WHERE (name=? AND settings=?)',
(plannerName, settings,))
p = c.fetchone()
if p==None:
c.execute('INSERT INTO plannerConfigs VALUES (?,?,?)',
(None, plannerName, settings,))
plannerId = c.lastrowid
else:
plannerId = p[0]
# get current column names
c.execute('PRAGMA table_info(runs)')
columnNames = [col[1] for col in c.fetchall()]
# read properties and add columns as necessary
numProperties = int(logfile.readline().split()[0])
propertyNames = ['experimentid', 'plannerid']
for j in range(numProperties):
field = logfile.readline().split()
propertyType = field[-1]
propertyName = '_'.join(field[:-1])
if propertyName not in columnNames:
c.execute('ALTER TABLE runs ADD %s %s' % (propertyName, propertyType))
propertyNames.append(propertyName)
# read measurements
insertFmtStr = 'INSERT INTO runs (' + ','.join(propertyNames) + \
') VALUES (' + ','.join('?'*len(propertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
runIds = []
for j in range(numRuns):
values = tuple([experimentId, plannerId] + \
[None if len(x) == 0 or x == 'nan' or x == 'inf' else x
for x in logfile.readline().split('; ')[:-1]])
c.execute(insertFmtStr, values)
# extract primary key of each run row so we can reference them
# in the planner progress data table if needed
runIds.append(c.lastrowid)
nextLine = logfile.readline().strip()
# read planner progress data if it's supplied
if nextLine != '.':
# get current column names
c.execute('PRAGMA table_info(progress)')
columnNames = [col[1] for col in c.fetchall()]
# read progress properties and add columns as necesary
numProgressProperties = int(nextLine.split()[0])
progressPropertyNames = ['runid']
for i in range(numProgressProperties):
field = logfile.readline().split()
progressPropertyType = field[-1]
progressPropertyName = "_".join(field[:-1])
if progressPropertyName not in columnNames:
c.execute('ALTER TABLE progress ADD %s %s' %
(progressPropertyName, progressPropertyType))
progressPropertyNames.append(progressPropertyName)
# read progress measurements
insertFmtStr = 'INSERT INTO progress (' + \
','.join(progressPropertyNames) + ') VALUES (' + \
','.join('?'*len(progressPropertyNames)) + ')'
numRuns = int(logfile.readline().split()[0])
for j in range(numRuns):
dataSeries = logfile.readline().split(';')[:-1]
for dataSample in dataSeries:
values = tuple([runIds[j]] + \
[None if len(x) == 0 or x == 'nan' or x == 'inf' else x
for x in dataSample.split(',')[:-1]])
try:
c.execute(insertFmtStr, values)
except sqlite3.IntegrityError:
print('Ignoring duplicate progress data. Consider increasing ompl::tools::Benchmark::Request::timeBetweenUpdates.')
pass
logfile.readline()
logfile.close()
conn.commit()
c.close()
def plotAttribute(cur, planners, attribute, typename):
"""Create a plot for a particular attribute. It will include data for
all planners that have data for this attribute."""
labels = []
measurements = []
nanCounts = []
if typename == 'ENUM':
cur.execute('SELECT description FROM enums where name IS "%s"' % attribute)
descriptions = [ t[0] for t in cur.fetchall() ]
numValues = len(descriptions)
for planner in planners:
cur.execute('SELECT %s FROM runs WHERE plannerid = %s AND %s IS NOT NULL' \
% (attribute, planner[0], attribute))
measurement = [ t[0] for t in cur.fetchall() if t[0] != None ]
if len(measurement) > 0:
cur.execute('SELECT count(*) FROM runs WHERE plannerid = %s AND %s IS NULL' \
% (planner[0], attribute))
nanCounts.append(cur.fetchone()[0])
labels.append(planner[1])
if typename == 'ENUM':
scale = 100. / len(measurement)
measurements.append([measurement.count(i)*scale for i in range(numValues)])
else:
measurements.append(measurement)
if len(measurements)==0:
print('Skipping "%s": no available measurements' % attribute)
return
plt.clf()
ax = plt.gca()
if typename == 'ENUM':
width = .5
measurements = np.transpose(np.vstack(measurements))
colsum = np.sum(measurements, axis=1)
rows = np.where(colsum != 0)[0]
heights = np.zeros((1,measurements.shape[1]))
ind = range(measurements.shape[1])
legend_labels = []
for i in rows:
plt.bar(ind, measurements[i], width, bottom=heights[0],
color=matplotlib.cm.hot(int(floor(i*256/numValues))),
label=descriptions[i])
heights = heights + measurements[i]
xtickNames = plt.xticks([x+width/2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_',' ') + ' (%)')
box = ax.get_position()
ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
props = matplotlib.font_manager.FontProperties()
props.set_size('small')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5), prop = props)
elif typename == 'BOOLEAN':
width = .5
measurementsPercentage = [sum(m) * 100. / len(m) for m in measurements]
ind = range(len(measurements))
plt.bar(ind, measurementsPercentage, width)
xtickNames = plt.xticks([x + width / 2. for x in ind], labels, rotation=30)
ax.set_ylabel(attribute.replace('_',' ') + ' (%)')
else:
if int(matplotlibversion.split('.')[0])<1:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5)
else:
plt.boxplot(measurements, notch=0, sym='k+', vert=1, whis=1.5, bootstrap=1000)
ax.set_ylabel(attribute.replace('_',' '))
xtickNames = plt.setp(ax,xticklabels=labels)
plt.setp(xtickNames, rotation=25)
ax.set_xlabel('Motion planning algorithm')
ax.yaxis.grid(True, linestyle='-', which='major', color='lightgrey', alpha=0.5)
if max(nanCounts)>0:
maxy = max([max(y) for y in measurements])
for i in range(len(labels)):
x = i+width/2 if typename=='BOOLEAN' else i+1
ax.text(x, .95*maxy, str(nanCounts[i]), horizontalalignment='center', size='small')
plt.show()
def plotProgressAttribute(cur, planners, attribute):
"""Plot data for a single planner progress attribute. Will create an
average time-plot with error bars of the attribute over all runs for
each planner."""
import numpy.ma as ma
plt.clf()
ax = plt.gca()
ax.set_xlabel('time (s)')
ax.set_ylabel(attribute.replace('_',' '))
plannerNames = []
for planner in planners:
cur.execute("""SELECT count(progress.%s) FROM progress INNER JOIN runs
ON progress.runid = runs.id AND runs.plannerid=%s
AND progress.%s IS NOT NULL""" \
% (attribute, planner[0], attribute))
if cur.fetchone()[0] > 0:
plannerNames.append(planner[1])
cur.execute("""SELECT DISTINCT progress.runid FROM progress INNER JOIN runs
WHERE progress.runid=runs.id AND runs.plannerid=?""", (planner[0],))
runids = [t[0] for t in cur.fetchall()]
timeTable = []
dataTable = []
for r in runids:
# Select data for given run
cur.execute('SELECT time, %s FROM progress WHERE runid = %s ORDER BY time' % (attribute,r))
(time, data) = zip(*(cur.fetchall()))
timeTable.append(time)
dataTable.append(data)
# It's conceivable that the sampling process may have
# generated more samples for one run than another; in this
# case, truncate all data series to length of shortest
# one.
fewestSamples = min(len(time[:]) for time in timeTable)
times = np.array(timeTable[0][:fewestSamples])
dataArrays = np.array([data[:fewestSamples] for data in dataTable])
filteredData = ma.masked_array(dataArrays, np.equal(dataArrays, None), dtype=float)
means = np.mean(filteredData, axis=0)
stddevs = np.std(filteredData, axis=0, ddof=1)
# plot average with error bars
plt.errorbar(times, means, yerr=2*stddevs, errorevery=max(1, len(times) // 20))
ax.legend(plannerNames)
if len(plannerNames)>0:
plt.show()
else:
plt.clf()
def plotStatistics(dbname, fname):
"""Create a PDF file with box plots for all attributes."""
print("Generating plots...")
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('SELECT id, name FROM plannerConfigs')
planners = [(t[0],t[1].replace('geometric_','').replace('control_',''))
for t in c.fetchall()]
c.execute('PRAGMA table_info(runs)')
colInfo = c.fetchall()[3:]
pp = PdfPages(fname)
for col in colInfo:
if col[2] == 'BOOLEAN' or col[2] == 'ENUM' or \
col[2] == 'INTEGER' or col[2] == 'REAL':
plotAttribute(c, planners, col[1], col[2])
pp.savefig(plt.gcf())
c.execute('PRAGMA table_info(progress)')
colInfo = c.fetchall()[2:]
for col in colInfo:
plotProgressAttribute(c, planners, col[1])
pp.savefig(plt.gcf())
plt.clf()
pagey = 0.9
pagex = 0.06
c.execute("""SELECT id, name, timelimit, memorylimit FROM experiments""")
experiments = c.fetchall()
for experiment in experiments:
c.execute("""SELECT count(*) FROM runs WHERE runs.experimentid = %d
GROUP BY runs.plannerid""" % experiment[0])
numRuns = [run[0] for run in c.fetchall()]
numRuns = numRuns[0] if len(set(numRuns)) == 1 else ','.join(numRuns)
plt.figtext(pagex, pagey, 'Experiment "%s"' % experiment[1])
plt.figtext(pagex, pagey-0.05, 'Number of averaged runs: %d' % numRuns)
plt.figtext(pagex, pagey-0.10, "Time limit per run: %g seconds" % experiment[2])
plt.figtext(pagex, pagey-0.15, "Memory limit per run: %g MB" % experiment[3])
plt.show()
pp.savefig(plt.gcf())
pp.close()
def saveAsMysql(dbname, mysqldump):
# See http://stackoverflow.com/questions/1067060/perl-to-python
import re
print("Saving as MySQL dump file...")
conn = sqlite3.connect(dbname)
mysqldump = open(mysqldump,'w')
# make sure all tables are dropped in an order that keepd foreign keys valid
c = conn.cursor()
c.execute("SELECT name FROM sqlite_master WHERE type='table'")
table_names = [ str(t[0]) for t in c.fetchall() ]
c.close()
last = ['experiments', 'planner_configs']
for table in table_names:
if table.startswith("sqlite"):
continue
if not table in last:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for table in last:
if table in table_names:
mysqldump.write("DROP TABLE IF EXISTS `%s`;\n" % table)
for line in conn.iterdump():
process = False
for nope in ('BEGIN TRANSACTION','COMMIT',
'sqlite_sequence','CREATE UNIQUE INDEX', 'CREATE VIEW'):
if nope in line: break
else:
process = True
if not process: continue
line = re.sub(r"[\n\r\t ]+", " ", line)
m = re.search('CREATE TABLE ([a-zA-Z0-9_]*)(.*)', line)
if m:
name, sub = m.groups()
sub = sub.replace('"','`')
line = '''CREATE TABLE IF NOT EXISTS %(name)s%(sub)s'''
line = line % dict(name=name, sub=sub)
# make sure we use an engine that supports foreign keys
line = line.rstrip("\n\t ;") + " ENGINE = InnoDB;\n"
else:
m = re.search('INSERT INTO "([a-zA-Z0-9_]*)"(.*)', line)
if m:
line = 'INSERT INTO %s%s\n' % m.groups()
line = line.replace('"', r'\"')
line = line.replace('"', "'")
line = re.sub(r"([^'])'t'(.)", "\\1THIS_IS_TRUE\\2", line)
line = line.replace('THIS_IS_TRUE', '1')
line = re.sub(r"([^'])'f'(.)", "\\1THIS_IS_FALSE\\2", line)
line = line.replace('THIS_IS_FALSE', '0')
line = line.replace('AUTOINCREMENT', 'AUTO_INCREMENT')
mysqldump.write(line)
mysqldump.close()
def computeViews(dbname, moveitformat):
conn = sqlite3.connect(dbname)
c = conn.cursor()
c.execute('PRAGMA FOREIGN_KEYS = ON')
c.execute('PRAGMA table_info(runs)')
if moveitformat:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
# kinodynamic paths cannot be simplified (or least not easily),
# so simplification_time may not exist as a database column
elif 'simplification_time' in [col[1] for col in c.fetchall()]:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time + simplification_time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
else:
s0 = """SELECT plannerid, plannerConfigs.name AS plannerName, experimentid, solved, time AS total_time
FROM plannerConfigs INNER JOIN experiments INNER JOIN runs
ON plannerConfigs.id=runs.plannerid AND experiments.id=runs.experimentid"""
s1 = """SELECT plannerid, plannerName, experimentid, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid, experimentid""" % s0
s2 = """SELECT plannerid, experimentid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName, experimentid ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigsPerExperiment')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigsPerExperiment AS %s' % s2)
s1 = """SELECT plannerid, plannerName, AVG(solved) AS avg_solved, AVG(total_time) AS avg_total_time
FROM (%s) GROUP BY plannerid""" % s0
s2 = """SELECT plannerid, MIN(avg_solved) AS avg_solved, avg_total_time
FROM (%s) GROUP BY plannerName ORDER BY avg_solved DESC, avg_total_time ASC""" % s1
c.execute('DROP VIEW IF EXISTS bestPlannerConfigs')
c.execute('CREATE VIEW IF NOT EXISTS bestPlannerConfigs AS %s' % s2)
conn.commit()
c.close()
if __name__ == "__main__":
usage = """%prog [options] [<benchmark.log> ...]"""
parser = OptionParser(usage)
parser.add_option("-d", "--database", dest="dbname", default="benchmark.db",
help="Filename of benchmark database [default: %default]")
parser.add_option("-a", "--append", action="store_true", dest="append", default=False,
help="Append data to database (as opposed to overwriting an existing database)")
parser.add_option("-v", "--view", action="store_true", dest="view", default=False,
help="Compute the views for best planner configurations")
if plottingEnabled:
parser.add_option("-p", "--plot", dest="plot", default=None,
help="Create a PDF of plots")
parser.add_option("-m", "--mysql", dest="mysqldb", default=None,
help="Save SQLite3 database as a MySQL dump file")
parser.add_option("--moveit", action="store_true", dest="moveit", default=False,
help="Log files are produced by MoveIt!")
(options, args) = parser.parse_args()
if not options.append and exists(options.dbname) and len(args>0):
os.remove(options.dbname)
if len(args)>0:
readBenchmarkLog(options.dbname, args, options.moveit)
# If we update the database, we recompute the views as well
options.view = True
if options.view:
computeViews(options.dbname, options.moveit)
if options.plot:
plotStatistics(options.dbname, options.plot)
if options.mysqldb:
saveAsMysql(options.dbname, options.mysqldb)
|
sonny-tarbouriech/ompl
|
scripts/ompl_benchmark_statistics.py
|
Python
|
bsd-3-clause
| 27,109
|
# -*- coding: utf-8 -*-
from PIL import Image, ImageDraw, ImageFont
import textwrap
from unidecode import unidecode
from reportlab.graphics import renderPM
from reportlab.graphics.barcode import code128
from reportlab.graphics.barcode import createBarcodeDrawing
from reportlab.graphics.barcode import createBarcodeImageInMemory
from reportlab.graphics.shapes import Drawing
from django.conf import settings
def get_barcode(value, width, humanReadable = True):
#value = value.encode('ascii', 'ignore')
value = unidecode(value)
barcode = createBarcodeDrawing('Code128', value = value, humanReadable = humanReadable, fontSize = 8)
drawing_width = width
barcode_scale = drawing_width / barcode.width
drawing_height = barcode.height * barcode_scale
drawing = Drawing(drawing_width, drawing_height)
drawing.scale(barcode_scale, barcode_scale)
drawing.add(barcode, name='barcode')
return drawing
def createPNG(participant, where):
event = participant.event
badge_size_x = event.badge_size_x or 390
badge_size_y = event.badge_size_y or 260
badge_color = event.badge_color or "#FFFFFF"
image_file = settings.MEDIA_ROOT + '/gafete390x260.png'
img = Image.open(image_file)
#img = Image.new('RGBA', (badge_size_x, badge_size_y), badge_color)
draw = ImageDraw.Draw(img)
draw.rectangle(((0,0),(badge_size_x-1, badge_size_y-1)), outline = "black")
if (len(participant.last_name) + len(participant.first_name) > 20):
last_name = participant.last_name.partition(' ')[0] if len(participant.last_name) > 12 else participant.last_name
first_name = participant.first_name.partition(' ')[0] if len(participant.first_name) >= 12 else participant.first_name
else:
last_name = participant.last_name
first_name = participant.first_name
match = {
'event': event.name,
#'name': "%s %s" % (participant.first_name, participant.last_name ),
#'name': "%s %s" % (participant.first_name.partition(' ')[0], participant.last_name.partition(' ')[0]),
'name': "%s %s" % (first_name, last_name),
'first_name': participant.first_name,
'last_name': participant.last_name,
'profession': participant.profession,
'organization': participant.organization,
'country': participant.country.name,
'type': participant.type,
'email': participant.email,
}
for field in event.eventbadge_set.all():
x = field.x
y = field.y
size = field.size
if field.field == 'logo':
if participant.event.logo:
logo = Image.open(participant.event.logo.file.file)
logo.thumbnail((size,size))
img.paste(logo, (x,y))
elif field.field == 'photo':
if participant.photo:
photo = Image.open(participant.photo)
photo.thumbnail((size,size))
img.paste(photo, (x,y))
else:
if field.field == 'text':
content = field.format
else:
content = match[field.field]
fnt = ImageFont.truetype(field.font.filename, size)
color = field.color
text = ("%s") % (content)
textsize = draw.textsize(text, font=fnt)
if textsize[0]+x < badge_size_x:
draw.text((x,y), ("%s") % (content), font=fnt, fill=color)
else:
# calculate maximum size in characters
max_chars = (badge_size_x-(x*2)) * len(text) / textsize[0]
lines = textwrap.fill(text, max_chars).splitlines()
tmp = y
for line in lines:
draw.text((x,y), line, font=fnt, fill=color)
y += size
y = tmp
# FIXME: NO barcode
#short_full_name = "%s: %s" % (participant.id, participant.short_full_name())
#barcode = get_barcode(short_full_name, badge_size_x-4)
#barcode_image = renderPM.drawToPIL(barcode)
#img.paste(barcode_image, (0+2, badge_size_y-70))
img.save(where, "PNG")
|
javierwilson/forocacao
|
forocacao/app/png.py
|
Python
|
bsd-3-clause
| 4,177
|
# -*- coding:utf-8 -*-
from unittest import skipUnless
from django.template import Context, Template
from django.test import TransactionTestCase
from django_mysql.models import ApproximateInt, SmartIterator
from django_mysql.utils import have_program
from django_mysql_tests.models import Author, NameAuthor, VanillaAuthor
from django_mysql_tests.utils import captured_stdout
class ApproximateCountTests(TransactionTestCase):
def setUp(self):
super(ApproximateCountTests, self).setUp()
Author.objects.bulk_create([Author() for i in range(10)])
def test_approx_count(self):
# Theoretically this varies 30-50% of the table size
# For a fresh table with 10 items we seem to always get back the actual
# count, but to be sure we'll just assert it's within 55%
approx_count = Author.objects.approx_count(min_size=1)
self.assertGreaterEqual(approx_count, 4)
self.assertLessEqual(approx_count, 16)
def test_activation_deactivation(self):
qs = Author.objects.all()
self.assertFalse(qs._count_tries_approx)
qs2 = qs.count_tries_approx(min_size=2)
self.assertNotEqual(qs, qs2)
self.assertTrue(qs2._count_tries_approx)
count = qs2.count()
self.assertTrue(isinstance(count, ApproximateInt))
qs3 = qs2.count_tries_approx(False)
self.assertNotEqual(qs2, qs3)
self.assertFalse(qs3._count_tries_approx)
def test_output_in_templates(self):
approx_count = Author.objects.approx_count(min_size=1)
text = Template('{{ var }}').render(Context({'var': approx_count}))
self.assertTrue(text.startswith('Approximately '))
approx_count2 = Author.objects.approx_count(
min_size=1,
return_approx_int=False
)
text = Template('{{ var }}').render(Context({'var': approx_count2}))
self.assertFalse(text.startswith('Approximately '))
def test_fallback_with_filters(self):
filtered = Author.objects.filter(name='')
self.assertEqual(filtered.approx_count(fall_back=True), 10)
with self.assertRaises(ValueError):
filtered.approx_count(fall_back=False)
def test_fallback_with_slice(self):
self.assertEqual(Author.objects.all()[:100].approx_count(), 10)
with self.assertRaises(ValueError):
Author.objects.all()[:100].approx_count(fall_back=False)
def test_fallback_with_distinct(self):
self.assertEqual(Author.objects.distinct().approx_count(), 10)
with self.assertRaises(ValueError):
Author.objects.distinct().approx_count(fall_back=False)
class SmartIteratorTests(TransactionTestCase):
def setUp(self):
super(SmartIteratorTests, self).setUp()
Author.objects.bulk_create([Author() for i in range(10)])
def test_bad_querysets(self):
with self.assertRaises(ValueError) as cm:
Author.objects.all().order_by('name').iter_smart_chunks()
self.assertIn("ordering", str(cm.exception))
with self.assertRaises(ValueError) as cm:
Author.objects.all()[:5].iter_smart_chunks()
self.assertIn("sliced QuerySet", str(cm.exception))
with self.assertRaises(ValueError) as cm:
NameAuthor.objects.all().iter_smart_chunks()
self.assertIn("non-integer primary key", str(cm.exception))
def test_chunks(self):
seen = []
for authors in Author.objects.iter_smart_chunks():
seen.extend(author.id for author in authors)
all_ids = list(Author.objects.order_by('id')
.values_list('id', flat=True))
self.assertEqual(seen, all_ids)
def test_objects(self):
seen = [author.id for author in Author.objects.iter_smart()]
all_ids = list(Author.objects.order_by('id')
.values_list('id', flat=True))
self.assertEqual(seen, all_ids)
def test_objects_non_atomic(self):
seen = [author.id for author in
Author.objects.iter_smart(atomically=False)]
all_ids = list(Author.objects.order_by('id')
.values_list('id', flat=True))
self.assertEqual(seen, all_ids)
def test_objects_max_size(self):
seen = [author.id for author in
Author.objects.iter_smart(chunk_max=1)]
all_ids = list(Author.objects.order_by('id')
.values_list('id', flat=True))
self.assertEqual(seen, all_ids)
def test_no_matching_objects(self):
seen = [author.id for author in
Author.objects.filter(name="Waaa").iter_smart()]
self.assertEqual(seen, [])
def test_no_objects(self):
Author.objects.all().delete()
seen = [author.id for author in Author.objects.iter_smart()]
self.assertEqual(seen, [])
def test_reporting(self):
with captured_stdout() as output:
qs = Author.objects.all()
for authors in qs.iter_smart_chunks(report_progress=True):
list(authors) # fetch them
lines = output.getvalue().split('\n')
reports = lines[0].split('\r')
for report in reports:
self.assertRegexpMatches(
report,
r"AuthorSmartChunkedIterator processed \d+/10 objects "
r"\(\d+\.\d+%\) in \d+ chunks(; highest pk so far \d+)?"
)
self.assertEqual(lines[1], 'Finished!')
def test_reporting_on_uncounted_qs(self):
Author.objects.create(name="pants")
with captured_stdout() as output:
qs = Author.objects.filter(name="pants")
for authors in qs.iter_smart_chunks(report_progress=True):
authors.delete()
lines = output.getvalue().split('\n')
reports = lines[0].split('\r')
for report in reports:
self.assertRegexpMatches(
report,
# We should have ??? since the deletion means the objects
# aren't fetched into python
r"AuthorSmartChunkedIterator processed (0|\?\?\?)/1 objects "
r"\(\d+\.\d+%\) in \d+ chunks(; highest pk so far \d+)?"
)
self.assertEqual(lines[1], 'Finished!')
def test_running_on_non_mysql_model(self):
VanillaAuthor.objects.create(name="Alpha")
VanillaAuthor.objects.create(name="pants")
VanillaAuthor.objects.create(name="Beta")
VanillaAuthor.objects.create(name="pants")
bad_authors = VanillaAuthor.objects.filter(name="pants")
self.assertEqual(bad_authors.count(), 2)
with captured_stdout():
for author in SmartIterator(bad_authors, report_progress=True):
author.delete()
self.assertEqual(bad_authors.count(), 0)
@skipUnless(have_program('pt-visual-explain'),
"pt-visual-explain must be installed")
class VisualExplainTests(TransactionTestCase):
def test_basic(self):
with captured_stdout() as capture:
Author.objects.all().pt_visual_explain()
output = capture.getvalue()
self.assertGreater(output, "")
# Can't be too strict about the output since different database and pt-
# visual-explain versions give different output
self.assertIn("django_mysql_tests_author", output)
self.assertIn("rows", output)
self.assertIn("Table", output)
def test_basic_no_display(self):
output = Author.objects.all().pt_visual_explain(display=False)
self.assertGreater(output, "")
self.assertIn("django_mysql_tests_author", output)
self.assertIn("rows", output)
self.assertIn("Table", output)
def test_subquery(self):
subq = Author.objects.all().values_list('id', flat=True)
output = Author.objects.filter(id__in=subq) \
.pt_visual_explain(display=False)
self.assertGreater(output, "")
self.assertIn("possible_keys", output)
self.assertIn("django_mysql_tests_author", output)
self.assertIn("rows", output)
self.assertIn("Table", output)
|
graingert/django-mysql
|
tests/django_mysql_tests/test_models.py
|
Python
|
bsd-3-clause
| 8,247
|
'''
@author: jnaous
'''
from django.conf.urls.defaults import patterns, url
urlpatterns = patterns('expedient.clearinghouse.project.views',
url(r'^list/$', 'list', name='project_list'),
url(r'^create/$', 'create', name="project_create"),
url(r'^update/(?P<proj_id>\d+)/$', 'update', name="project_update"),
url(r'^detail/(?P<proj_id>\d+)/$', 'detail', name='project_detail'),
url(r'^delete/(?P<proj_id>\d+)/$', 'delete', name="project_delete"),
url(r'^aggregates/add/(?P<proj_id>\d+)/$', 'add_aggregate', name="project_add_agg"),
url(r'^aggregates/update/(?P<proj_id>\d+)/(?P<agg_id>\d+)/$', 'update_aggregate', name="project_update_agg"),
url(r'^aggregates/remove/(?P<proj_id>\d+)/(?P<agg_id>\d+)/$', 'remove_aggregate', name="project_remove_agg"),
url(r'^members/add/(?P<proj_id>\d+)/$', 'add_member', name="project_member_add"),
url(r'^members/update/(?P<proj_id>\d+)/(?P<user_id>\d+)/$', 'update_member', name="project_member_update"),
url(r'^members/remove/(?P<proj_id>\d+)/(?P<user_id>\d+)/$', 'remove_member', name="project_member_remove"),
)
|
avlach/univbris-ocf
|
expedient/src/python/expedient/clearinghouse/project/urls.py
|
Python
|
bsd-3-clause
| 1,095
|
from django.forms.forms import BoundField
from django.template.loader import render_to_string
from django.utils.safestring import mark_safe
class FormLayout(object):
def __unicode__(self):
return self.render_layout()
class Fieldset(FormLayout):
''' Fieldset container. '''
def __init__(self, legend, fields):
self.legend = legend
self.fields = fields
def render_layout(self, form):
fields = form.render_fields(self.fields)
if not fields:
return ''
form._rendered_fieldsets = getattr(form, '_rendered_fieldsets', 0) + 1
return render_to_string('forms/fieldset.html', {
'first': form._rendered_fieldsets == 1,
'legend': self.legend,
'fields': fields,
})
class MultiField(FormLayout):
def __init__(self, label, *fields):
self.label = label
self.fields = fields
def render_field(self, form, name):
if isinstance(name, FormLayout):
return name
form.rendered_fields.append(name)
try:
field = form.fields[name]
except KeyError:
return ''
bf = BoundField(form, field, name)
if bf.is_hidden:
if bf.errors:
form.top_errors.extend(bf.errors)
form.hidden_fields.append(bf)
return ''
return bf
def render_label(self, form):
if isinstance(self.label, tuple):
contents, name = self.label
else:
contents, name = None, self.label
try:
field = form.fields[name]
except KeyError:
return ''
bf = BoundField(form, field, name)
self.required = bf.field.required
return bf.label_tag(contents)
def render_layout(self, form):
fields = [mark_safe(unicode(self.render_field(form, field))) for field in self.fields]
return render_to_string('forms/multifield.html', {
'label': self.render_label(form),
'fields': fields,
'required': self.required,
})
class Span(FormLayout):
def __init__(self, text, classes=[]):
self.text = text
self.classes = classes
def render_layout(self):
return mark_safe('<span class="%s">%s</span>' % (' '.join(self.classes), self.text))
|
hollow/nepal
|
nepal/contrib/forms/layout.py
|
Python
|
bsd-3-clause
| 2,353
|
DATABASES = {
'default': {
'NAME': ':memory:',
'ENGINE': 'django.db.backends.sqlite3',
}
}
SECRET_KEY = 'fake-key'
INSTALLED_APPS = (
'linaro_django_pagination',
)
|
Polyconseil/django-pagination
|
linaro_django_pagination/tests/settings.py
|
Python
|
bsd-3-clause
| 194
|
# Copyright (c) 2015, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
# external
from mixbox.cache import Cached
# internal
import stix
from stix.common import InformationSource
# bindings
import stix.bindings.data_marking as stix_data_marking_binding
class Marking(stix.Entity):
_binding = stix_data_marking_binding
_binding_class = stix_data_marking_binding.MarkingType
_namespace = 'http://data-marking.mitre.org/Marking-1'
def __init__(self, markings=None):
self.markings = _MarkingSpecifications(markings)
@property
def markings(self):
return self._markings
@markings.setter
def markings(self, value):
self._markings = _MarkingSpecifications(value)
def add_marking(self, value):
self._markings.append(value)
def to_obj(self, return_obj=None, ns_info=None):
super(Marking, self).to_obj(
return_obj=return_obj,
ns_info=ns_info
)
obj = self._binding_class()
if self.markings:
obj.Marking = self.markings.to_obj(ns_info=ns_info)
return obj
def to_list(self):
return self.markings.to_list() if self.markings else []
@classmethod
def from_obj(cls, obj, return_obj=None):
if not obj:
return None
if not return_obj:
return_obj = cls()
return_obj.markings = _MarkingSpecifications.from_obj(obj.Marking)
return return_obj
@classmethod
def from_list(cls, markings_list, return_obj=None):
if not markings_list:
return None
if not return_obj:
return_obj = cls()
mlist = _MarkingSpecifications.from_list(markings_list)
return_obj.markings = mlist
return return_obj
to_dict = to_list
from_dict = from_list
class MarkingSpecification(Cached, stix.Entity):
_binding = stix_data_marking_binding
_binding_class = stix_data_marking_binding.MarkingSpecificationType
_namespace = 'http://data-marking.mitre.org/Marking-1'
def __init__(self, controlled_structure=None, marking_structures=None):
super(MarkingSpecification, self).__init__()
self.id_ = None
self.idref = None
self.version = None
self.controlled_structure = controlled_structure
self.marking_structures = _MarkingStructures(marking_structures)
self.information_source = None
@property
def information_source(self):
return self._information_source
@information_source.setter
def information_source(self, value):
self._set_var(InformationSource, try_cast=False, information_source=value)
@property
def marking_structures(self):
return self._marking_structures
@marking_structures.setter
def marking_structures(self, value):
self._marking_structures = _MarkingStructures(value)
def to_obj(self, return_obj=None, ns_info=None):
super(MarkingSpecification, self).to_obj(
return_obj=return_obj,
ns_info=ns_info
)
obj = self._binding_class()
obj.id = self.id_
obj.idref = self.idref
obj.version = self.version
obj.Controlled_Structure = self.controlled_structure
obj.Marking_Structure = self.marking_structures.to_obj(ns_info=ns_info)
if self.information_source:
obj.Information_Source = self.information_source.to_obj(ns_info=ns_info)
return obj
def to_dict(self):
return super(MarkingSpecification, self).to_dict()
@classmethod
def from_obj(cls, obj, return_obj=None):
if not obj:
return None
if not return_obj:
return_obj = cls()
return_obj.id_ = obj.id
return_obj.idref = obj.idref
return_obj.version = obj.version
return_obj.controlled_structure = obj.Controlled_Structure
return_obj.marking_structures = _MarkingStructures.from_obj(obj.Marking_Structure)
return_obj.information_source = InformationSource.from_obj(obj.Information_Source)
return return_obj
@classmethod
def from_dict(cls, d, return_obj=None):
if not d:
return None
if not return_obj:
return_obj = cls()
get = d.get # PEP8 line length fix
return_obj.id_ = get('id')
return_obj.idref = get('idref')
return_obj.version = get('version')
return_obj.controlled_structure = get('controlled_structure')
return_obj.marking_structures = _MarkingStructures.from_dict(
get('marking_structures')
)
return_obj.information_source = InformationSource.from_dict(
get('information_source')
)
return return_obj
class MarkingStructure(Cached, stix.Entity):
_binding = stix_data_marking_binding
_binding_class = stix_data_marking_binding.MarkingStructureType
_namespace = 'http://data-marking.mitre.org/Marking-1'
_XSI_TYPE = None # overridden by subclasses
def __init__(self):
self.id_ = None
self.idref = None
self.marking_model_name = None
self.marking_model_ref = None
def to_obj(self, return_obj=None, ns_info=None):
super(MarkingStructure, self).to_obj(
return_obj=return_obj,
ns_info=ns_info
)
if not return_obj:
return_obj = self._binding_class()
return_obj.id = self.id_
return_obj.idref = self.idref
return_obj.marking_model_name = self.marking_model_name
return_obj.marking_model_ref = self.marking_model_ref
return return_obj
def to_dict(self):
d = {}
if self._XSI_TYPE:
d['xsi:type'] = self._XSI_TYPE
if self.id_:
d['id'] = self.id_
if self.idref:
d['idref'] = self.idref
if self.marking_model_name:
d['marking_model_name'] = self.marking_model_name
if self.marking_model_ref:
d['marking_model_ref'] = self.marking_model_ref
return d
@staticmethod
def lookup_class(xsi_type):
if not xsi_type:
return MarkingStructure
return stix.lookup_extension(xsi_type)
@classmethod
def from_obj(cls, obj, return_obj=None):
import stix.extensions.marking.tlp # noqa
import stix.extensions.marking.simple_marking # noqa
import stix.extensions.marking.terms_of_use_marking # noqa
if not obj:
return None
if return_obj:
m = return_obj
m.id_ = obj.id
m.idref = obj.idref
m.marking_model_name = obj.marking_model_name
m.marking_model_ref = obj.marking_model_ref
else:
klass = stix.lookup_extension(obj, default=cls)
m = klass.from_obj(obj, return_obj=klass())
return m
@classmethod
def from_dict(cls, d, return_obj=None):
import stix.extensions.marking.tlp # noqa
import stix.extensions.marking.simple_marking # noqa
import stix.extensions.marking.terms_of_use_marking # noqa
if not d:
return None
get = d.get
if return_obj is not None:
m = return_obj
m.id_ = get('id')
m.idref = get('idref')
m.marking_model_name = get('marking_model_name')
m.marking_model_ref = get('marking_model_ref')
else:
klass = stix.lookup_extension(get('xsi:type'), default=cls)
m = klass.from_dict(d, return_obj=klass())
return m
# Not Actual STIX Types!
class _MarkingSpecifications(stix.TypedList):
_contained_type = MarkingSpecification
class _MarkingStructures(stix.TypedList):
_contained_type = MarkingStructure
# Backwards compatibility
add_extension = stix.add_extension
|
chriskiehl/python-stix
|
stix/data_marking.py
|
Python
|
bsd-3-clause
| 7,945
|
import os
ADMINS = (
('John Smith', 'john@example.net'),
)
PROJECT_ROOT = os.path.realpath(os.path.dirname(__file__))
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = ':memory:'
TEST_DATABASE_NAME = ':memory:'
INSTALLED_APPS = [
'django.contrib.auth',
'django.contrib.contenttypes',
'mailserver', 'mailserver.testapp']
ROOT_URLCONF = 'mailserver.urls'
ROOT_MAILCONF = 'mailserver.testapp.mailbox'
TEMPLATE_DIRS = (
os.path.join(PROJECT_ROOT, 'templates'),
)
|
telenieko/django-mailserver
|
src/mailserver/testapp/settings.py
|
Python
|
bsd-3-clause
| 482
|
#!/usr/bin/env python
#encoding: utf8
import sys, rospy
from pimouse_ros.msg import LightSensorValues
def get_freq():
f = rospy.get_param('lightsensors_freq', 10)
try:
if f <= 0.0:
raise Exception()
except:
rospy.logerr("value error: lightsensors_freq")
sys.exit(1)
return f
if __name__ == '__main__':
devfile = "/dev/rtlightsensor0"
rospy.init_node("lightsensors")
pub = rospy.Publisher('lightsensors', LightSensorValues, queue_size=1)
freq = get_freq()
rate = rospy.Rate(freq)
while not rospy.is_shutdown():
try:
with open(devfile,'r') as f:
data = f.readline().split()
data = [ int(e) for e in data ]
d = LightSensorValues()
d.right_forward = data[0]
d.right_side = data[1]
d.left_side = data[2]
d.left_forward = data[3]
d.sum_all = sum(data)
d.sum_forward = data[0] + data[3]
pub.publish(d)
except IOError:
rospy.logerr("cannot write to " + devfile)
f = get_freq()
if f != freq:
freq = f
rate = rospy.Rate(freq)
rate.sleep()
|
DaisukeUra/pimouse_ros
|
scripts/lightsensor2.py
|
Python
|
bsd-3-clause
| 1,022
|
#!/usr/bin/python
import sys
from ldif import LDIFParser, LDIFWriter, LDIFRecordList
class LDIFVerifier():
"""
Check that two LDIF files contain equivalent LDAP information. If they
don't, emit a summary of the differences
"""
def __init__( self, file1, file2):
"""
Parameters:
file1
filename of first file to read
file2
filename of second file to read
"""
self.src1 = LDIFRecordList(open(file1))
self.src2 = LDIFRecordList(open(file2))
def emitDifferingValues( self, attributes1, attributes2):
"""
Emit a description of the differences between two dictionaries of attribute values
"""
for attributeName, attributeValues1 in attributes1.iteritems():
if attributeName in attributes2:
attributeValues2 = attributes2[attributeName]
if attributeValues1 != attributeValues2:
print " " + attributeName + ": " + str(attributeValues1) + " != " + str(attributeValues2)
else:
print " " + attributeName + ": missing in second file"
def emitDifferences( self, summary1, summary2):
"""
Emit all differences between the two LDAP objects. The
supplied parameters are dictionaries between the object DN and
a list of attributes
"""
count = 0
for dnLower,wrappedObject1 in summary1.iteritems():
(dn,attributes1) = wrappedObject1
if dnLower in summary2:
wrappedObject2 = summary2 [dnLower]
(dn2,attributes2) = wrappedObject2
if( attributes1 != attributes2):
count += 1
print "\n dn: " + dn
print " [difference in attribute values]\n"
self.emitDifferingValues( attributes1, attributes2)
else:
count += 1
print "\n dn: " + dn
print " [object missing in second file]\n"
self.printSummary( dn, attributes1)
for dnLower,wrappedObject2 in summary2.iteritems():
(dn,attributes2) = wrappedObject2
if not dnLower in summary1:
count += 1
print "\n dn: " + dn
print " [object missing in first file]\n"
self.printSummary( dn, attributes2)
return count
def printSummary( self, dn, attributes):
"""
Print a complete LDAP object
"""
for attributeName, attributeValues in attributes.iteritems():
for attributeValue in attributeValues:
print " " + attributeName + ": " + attributeValue
def buildSummary( self, records):
"""
Build
"""
summary = {}
for record in records:
dn,attributes = record
summary [dn.lower()] = (dn,attributes)
return summary
def compare( self):
"""
Check whether the two named files are equal.
"""
self.src1.parse()
summary1 = self.buildSummary( self.src1.all_records)
self.src2.parse()
summary2 = self.buildSummary( self.src2.all_records)
count = self.emitDifferences( summary1, summary2)
if( count > 0):
exit(1)
if( len(sys.argv) != 3):
sys.exit("Need two arguments")
verifier = LDIFVerifier( sys.argv[1], sys.argv[2])
verifier.compare()
|
paulmillar/Xylophone
|
tests/util/compare.py
|
Python
|
bsd-3-clause
| 3,552
|
import warnings
from contextlib import contextmanager
from copy import copy
from django.utils.deprecation import RemovedInDjango20Warning
# Hard-coded processor for easier use of CSRF protection.
_builtin_context_processors = ('django.template.context_processors.csrf',)
_current_app_undefined = object()
class ContextPopException(Exception):
"pop() has been called more times than push()"
pass
class ContextDict(dict):
def __init__(self, context, *args, **kwargs):
super(ContextDict, self).__init__(*args, **kwargs)
context.dicts.append(self)
self.context = context
def __enter__(self):
return self
def __exit__(self, *args, **kwargs):
self.context.pop()
class BaseContext(object):
def __init__(self, dict_=None):
self._reset_dicts(dict_)
def _reset_dicts(self, value=None):
builtins = {'True': True, 'False': False, 'None': None}
self.dicts = [builtins]
if value is not None:
self.dicts.append(value)
def __copy__(self):
duplicate = copy(super(BaseContext, self))
duplicate.dicts = self.dicts[:]
return duplicate
def __repr__(self):
return repr(self.dicts)
def __iter__(self):
for d in reversed(self.dicts):
yield d
def push(self, *args, **kwargs):
return ContextDict(self, *args, **kwargs)
def pop(self):
if len(self.dicts) == 1:
raise ContextPopException
return self.dicts.pop()
def __setitem__(self, key, value):
"Set a variable in the current context"
self.dicts[-1][key] = value
def __getitem__(self, key):
"Get a variable's value, starting at the current context and going upward"
for d in reversed(self.dicts):
if key in d:
return d[key]
raise KeyError(key)
def __delitem__(self, key):
"Delete a variable from the current context"
del self.dicts[-1][key]
def has_key(self, key):
for d in self.dicts:
if key in d:
return True
return False
def __contains__(self, key):
return self.has_key(key)
def get(self, key, otherwise=None):
for d in reversed(self.dicts):
if key in d:
return d[key]
return otherwise
def new(self, values=None):
"""
Returns a new context with the same properties, but with only the
values given in 'values' stored.
"""
new_context = copy(self)
new_context._reset_dicts(values)
return new_context
def flatten(self):
"""
Returns self.dicts as one dictionary
"""
flat = {}
for d in self.dicts:
flat.update(d)
return flat
def __eq__(self, other):
"""
Compares two contexts by comparing theirs 'dicts' attributes.
"""
if isinstance(other, BaseContext):
# because dictionaries can be put in different order
# we have to flatten them like in templates
return self.flatten() == other.flatten()
# if it's not comparable return false
return False
class Context(BaseContext):
"A stack container for variable context"
def __init__(self, dict_=None, autoescape=True,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of Context is deprecated. Use "
"RequestContext and set the current_app attribute of its "
"request instead.", RemovedInDjango20Warning, stacklevel=2)
self.autoescape = autoescape
self._current_app = current_app
self.use_l10n = use_l10n
self.use_tz = use_tz
self.render_context = RenderContext()
# Set to the original template -- as opposed to extended or included
# templates -- during rendering, see bind_template.
self.template = None
super(Context, self).__init__(dict_)
@property
def current_app(self):
return None if self._current_app is _current_app_undefined else self._current_app
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
try:
yield
finally:
self.template = None
def __copy__(self):
duplicate = super(Context, self).__copy__()
duplicate.render_context = copy(self.render_context)
return duplicate
def update(self, other_dict):
"Pushes other_dict to the stack of dictionaries in the Context"
if not hasattr(other_dict, '__getitem__'):
raise TypeError('other_dict must be a mapping (dictionary-like) object.')
self.dicts.append(other_dict)
return other_dict
class RenderContext(BaseContext):
"""
A stack container for storing Template state.
RenderContext simplifies the implementation of template Nodes by providing a
safe place to store state between invocations of a node's `render` method.
The RenderContext also provides scoping rules that are more sensible for
'template local' variables. The render context stack is pushed before each
template is rendered, creating a fresh scope with nothing in it. Name
resolution fails if a variable is not found at the top of the RequestContext
stack. Thus, variables are local to a specific template and don't affect the
rendering of other templates as they would if they were stored in the normal
template context.
"""
def __iter__(self):
for d in self.dicts[-1]:
yield d
def has_key(self, key):
return key in self.dicts[-1]
def get(self, key, otherwise=None):
return self.dicts[-1].get(key, otherwise)
def __getitem__(self, key):
return self.dicts[-1][key]
class RequestContext(Context):
"""
This subclass of template.Context automatically populates itself using
the processors defined in the engine's configuration.
Additional processors can be specified as a list of callables
using the "processors" keyword argument.
"""
def __init__(self, request, dict_=None, processors=None,
current_app=_current_app_undefined,
use_l10n=None, use_tz=None):
# current_app isn't passed here to avoid triggering the deprecation
# warning in Context.__init__.
super(RequestContext, self).__init__(
dict_, use_l10n=use_l10n, use_tz=use_tz)
if current_app is not _current_app_undefined:
warnings.warn(
"The current_app argument of RequestContext is deprecated. "
"Set the current_app attribute of its request instead.",
RemovedInDjango20Warning, stacklevel=2)
self._current_app = current_app
self.request = request
self._processors = () if processors is None else tuple(processors)
self._processors_index = len(self.dicts)
# placeholder for context processors output
self.update({})
# empty dict for any new modifications
# (so that context processors don't overwrite them)
self.update({})
@contextmanager
def bind_template(self, template):
if self.template is not None:
raise RuntimeError("Context is already bound to a template")
self.template = template
# Set context processors according to the template engine's settings.
processors = (template.engine.template_context_processors +
self._processors)
updates = {}
for processor in processors:
updates.update(processor(self.request))
self.dicts[self._processors_index] = updates
try:
yield
finally:
self.template = None
# Unset context processors.
self.dicts[self._processors_index] = {}
def new(self, values=None):
new_context = super(RequestContext, self).new(values)
# This is for backwards-compatibility: RequestContexts created via
# Context.new don't include values from context processors.
if hasattr(new_context, '_processors_index'):
del new_context._processors_index
return new_context
def make_context(context, request=None):
"""
Create a suitable Context from a plain dict and optionally an HttpRequest.
"""
if request is None:
context = Context(context)
else:
# The following pattern is required to ensure values from
# context override those from template context processors.
original_context = context
context = RequestContext(request)
if original_context:
context.push(original_context)
return context
|
pquentin/django
|
django/template/context.py
|
Python
|
bsd-3-clause
| 9,045
|
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Deleting field 'SliderPlugin.show_ribbon'
db.delete_column('cmsplugin_sliderplugin', 'show_ribbon')
# Renaming field 'SliderPlugin.image_height'
db.rename_column('cmsplugin_sliderplugin', 'image_height', 'height')
# Renaming field 'SliderPlugin.image_width'
db.rename_column('cmsplugin_sliderplugin', 'image_width', 'width')
def backwards(self, orm):
# Adding field 'SliderPlugin.show_ribbon'
db.add_column('cmsplugin_sliderplugin', 'show_ribbon',
self.gf('django.db.models.fields.BooleanField')(default=True),
keep_default=False)
# Renaming field 'SliderPlugin.image_height'
db.rename_column('cmsplugin_sliderplugin', 'height', 'image_height')
# Renaming field 'SliderPlugin.image_width'
db.rename_column('cmsplugin_sliderplugin', 'width', 'image_width')
models = {
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cmsplugin_nivoslider.slideralbum': {
'Meta': {'object_name': 'SliderAlbum'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'images': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cmsplugin_nivoslider.SliderImage']", 'symmetrical': 'False', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150'})
},
'cmsplugin_nivoslider.sliderimage': {
'Meta': {'ordering': "('order', 'name')", 'object_name': 'SliderImage'},
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '150', 'blank': 'True'}),
'order': ('django.db.models.fields.PositiveIntegerField', [], {'default': '100'})
},
'cmsplugin_nivoslider.sliderplugin': {
'Meta': {'object_name': 'SliderPlugin', 'db_table': "'cmsplugin_sliderplugin'", '_ormbases': ['cms.CMSPlugin']},
'album': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cmsplugin_nivoslider.SliderAlbum']"}),
'anim_speed': ('django.db.models.fields.PositiveIntegerField', [], {'default': '500'}),
'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'effect': ('django.db.models.fields.CharField', [], {'default': "'random'", 'max_length': '50'}),
'height': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'}),
'pause_time': ('django.db.models.fields.PositiveIntegerField', [], {'default': '3000'}),
'theme': ('django.db.models.fields.CharField', [], {'default': "'default'", 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'width': ('django.db.models.fields.PositiveIntegerField', [], {'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['cmsplugin_nivoslider']
|
samirasnoun/django_cms_gallery_image
|
cmsplugin_nivoslider/migrations/0006_rename_fields_image_height_width__del_field_show_ribbon.py
|
Python
|
bsd-3-clause
| 5,126
|
import sys
import numpy as np
import matplotlib.pylab as plt
def plot(xs, ys):
maxx=max(xs)
minx=min(xs)
maxy=max(ys)
miny=min(ys)
fig,ax = plt.subplots(figsize=(5,5))
ax.set_ylim([-50,50])
ax.set_xlim([-50,50])
ax.scatter(xs,ys)
ax.plot(xs,ys)
plt.show()
lines=open('/tmp/pfile').readlines()
i = 0
trip = False
for l in lines:
if l[0:len('pdata=')]=='pdata=':
data=l[len('pdata='):]
xs,ys=[],[]
for x,y in eval(data):
xs+=[x]
ys+=[y]
print len(xs), 'points read'
xs+=[xs[0]]
ys+=[ys[0]]
if len(xs)<25:
plot(xs,ys)
trip=True
if l[0:len('edata=')]=='edata=':
data=l[len('edata='):]
xs,ys=[],[]
for x,y in eval(data):
xs+=[x]
ys+=[y]
for i in range(len(xs)): print xs[i],ys[i],' ',
print len(xs), ' <ear points read'
xs+=[xs[0]]
ys+=[ys[0]]
if trip: plot(xs,ys)
|
donbright/piliko
|
experiment/x.py
|
Python
|
bsd-3-clause
| 827
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Generated from FHIR 4.0.0-a53ec6ee1b on 2019-05-07.
# 2019, SMART Health IT.
import os
import io
import unittest
import json
from . import documentreference
from .fhirdate import FHIRDate
class DocumentReferenceTests(unittest.TestCase):
def instantiate_from(self, filename):
datadir = os.environ.get('FHIR_UNITTEST_DATADIR') or ''
with io.open(os.path.join(datadir, filename), 'r', encoding='utf-8') as handle:
js = json.load(handle)
self.assertEqual("DocumentReference", js["resourceType"])
return documentreference.DocumentReference(js)
def testDocumentReference1(self):
inst = self.instantiate_from("documentreference-example.json")
self.assertIsNotNone(inst, "Must have instantiated a DocumentReference instance")
self.implDocumentReference1(inst)
js = inst.as_json()
self.assertEqual("DocumentReference", js["resourceType"])
inst2 = documentreference.DocumentReference(js)
self.implDocumentReference1(inst2)
def implDocumentReference1(self, inst):
self.assertEqual(inst.category[0].coding[0].code, "History and Physical")
self.assertEqual(inst.category[0].coding[0].display, "History and Physical")
self.assertEqual(inst.category[0].coding[0].system, "http://ihe.net/xds/connectathon/classCodes")
self.assertEqual(inst.contained[0].id, "a2")
self.assertEqual(inst.content[0].attachment.contentType, "application/hl7-v3+xml")
self.assertEqual(inst.content[0].attachment.creation.date, FHIRDate("2005-12-24T09:35:00+11:00").date)
self.assertEqual(inst.content[0].attachment.creation.as_json(), "2005-12-24T09:35:00+11:00")
self.assertEqual(inst.content[0].attachment.hash, "2jmj7l5rSw0yVb/vlWAYkK/YBwk=")
self.assertEqual(inst.content[0].attachment.language, "en-US")
self.assertEqual(inst.content[0].attachment.size, 3654)
self.assertEqual(inst.content[0].attachment.title, "Physical")
self.assertEqual(inst.content[0].attachment.url, "http://example.org/xds/mhd/Binary/07a6483f-732b-461e-86b6-edb665c45510")
self.assertEqual(inst.content[0].format.code, "urn:ihe:pcc:handp:2008")
self.assertEqual(inst.content[0].format.display, "History and Physical Specification")
self.assertEqual(inst.content[0].format.system, "urn:oid:1.3.6.1.4.1.19376.1.2.3")
self.assertEqual(inst.context.event[0].coding[0].code, "T-D8200")
self.assertEqual(inst.context.event[0].coding[0].display, "Arm")
self.assertEqual(inst.context.event[0].coding[0].system, "http://ihe.net/xds/connectathon/eventCodes")
self.assertEqual(inst.context.facilityType.coding[0].code, "Outpatient")
self.assertEqual(inst.context.facilityType.coding[0].display, "Outpatient")
self.assertEqual(inst.context.facilityType.coding[0].system, "http://www.ihe.net/xds/connectathon/healthcareFacilityTypeCodes")
self.assertEqual(inst.context.period.end.date, FHIRDate("2004-12-23T08:01:00+11:00").date)
self.assertEqual(inst.context.period.end.as_json(), "2004-12-23T08:01:00+11:00")
self.assertEqual(inst.context.period.start.date, FHIRDate("2004-12-23T08:00:00+11:00").date)
self.assertEqual(inst.context.period.start.as_json(), "2004-12-23T08:00:00+11:00")
self.assertEqual(inst.context.practiceSetting.coding[0].code, "General Medicine")
self.assertEqual(inst.context.practiceSetting.coding[0].display, "General Medicine")
self.assertEqual(inst.context.practiceSetting.coding[0].system, "http://www.ihe.net/xds/connectathon/practiceSettingCodes")
self.assertEqual(inst.date.date, FHIRDate("2005-12-24T09:43:41+11:00").date)
self.assertEqual(inst.date.as_json(), "2005-12-24T09:43:41+11:00")
self.assertEqual(inst.description, "Physical")
self.assertEqual(inst.docStatus, "preliminary")
self.assertEqual(inst.id, "example")
self.assertEqual(inst.identifier[0].system, "urn:ietf:rfc:3986")
self.assertEqual(inst.identifier[0].value, "urn:oid:1.3.6.1.4.1.21367.2005.3.7.1234")
self.assertEqual(inst.masterIdentifier.system, "urn:ietf:rfc:3986")
self.assertEqual(inst.masterIdentifier.value, "urn:oid:1.3.6.1.4.1.21367.2005.3.7")
self.assertEqual(inst.meta.tag[0].code, "HTEST")
self.assertEqual(inst.meta.tag[0].display, "test health data")
self.assertEqual(inst.meta.tag[0].system, "http://terminology.hl7.org/CodeSystem/v3-ActReason")
self.assertEqual(inst.relatesTo[0].code, "appends")
self.assertEqual(inst.securityLabel[0].coding[0].code, "V")
self.assertEqual(inst.securityLabel[0].coding[0].display, "very restricted")
self.assertEqual(inst.securityLabel[0].coding[0].system, "http://terminology.hl7.org/CodeSystem/v3-Confidentiality")
self.assertEqual(inst.status, "current")
self.assertEqual(inst.text.status, "generated")
self.assertEqual(inst.type.coding[0].code, "34108-1")
self.assertEqual(inst.type.coding[0].display, "Outpatient Note")
self.assertEqual(inst.type.coding[0].system, "http://loinc.org")
|
all-of-us/raw-data-repository
|
rdr_service/lib_fhir/fhirclient_4_0_0/models/documentreference_tests.py
|
Python
|
bsd-3-clause
| 5,255
|
# IMPORT ORDER MATTERS!
# inherit from BaseConfig
from cumulusci.core.keychain.BaseProjectKeychain import BaseProjectKeychain
# inherit from BaseProjectKeychain
from cumulusci.core.keychain.BaseEncryptedProjectKeychain import BaseEncryptedProjectKeychain
from cumulusci.core.keychain.EnvironmentProjectKeychain import EnvironmentProjectKeychain
# inherit from BaseEncryptedProjectKeychain
from cumulusci.core.keychain.EncryptedFileProjectKeychain import EncryptedFileProjectKeychain
|
e02d96ec16/CumulusCI
|
cumulusci/core/keychain/__init__.py
|
Python
|
bsd-3-clause
| 486
|