code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
import sys
import os
import pysos
import signal
# these two variables should be changed depending on the test drivers PID
# and the type of message it will be sending, If you are using the generic_test.c
# then it is likely these two values can stay the same
TEST_MODULE = 0x81
MSG_TEST_DATA= 33
ALARM_LEN = 60
START_DATA = 100
FINAL_DATA = 200
TEST_FAIL = 155
TEST_PASS = 255
# variables holding new and old sensor values
# this can be replaces with whatever you want since this is specific to
# what the test driver expects for data
oldstate = {}
state = {}
# a signal handler that will go off for an alarm
# it is highly suggested that you use this since it is the easiest way to test if your
# node has entered panic mode via the script
def panic_handler(signum, frame):
print "it is highly likely that your node has entered panic mode"
print "please reset the node"
sys.exit(1)
# message handler for messages of type MSG_DATA_READY
def generic_test(msg):
""" Small example of test driver usage. It simulates a virtual
dice and shows which side of the dice is up.
"""
global oldstate
global state
print "message recieved"
signal.alarm(ALARM_LEN)
#unpack the values we are expecting, in this case it is a node id, the acclerometer id,
# and a value from the sensor
(node_id, node_state, data) = pysos.unpack("<BBB", msg['data'])
if node_id not in state.keys():
state[node_id] = 0
oldstate[node_id] = 0
# these are some simple calculations to test the sensor value we have gotten
# this is the part which you need to fill in in order to verify that the function is working
if (node_state == START_DATA):
print "initialization began correctly"
if (node_state == 0):
state[node_id] = data
if (node_state == TEST_FAIL):
print >> sys.stderr, "the test for item %d has failed" %data
if (node_state == TEST_PASS):
print "the test for item %d has passed" %data
if (node_state == 1 and state[node_id] != data):
print >> sys.stderr, " a message was lost somewhere on node %d before count %d" %(node_id,data)
if (node_state == FINAL_DATA):
print "finalization worked correctly"
if __name__ == "__main__":
# here we set up a connection to sossrv using the pysos module
# and begin listening for messages
# we also register our function above with the server so that it is called
# when the appropriate message type is recieved
srv = pysos.sossrv()
srv.register_trigger(generic_test, sid=TEST_MODULE, type=MSG_TEST_DATA)
# register the signal handler and begin an alarm that will wait for 60 seconds before going off
# other times for the alarm might be good, use your own judgement based on your test
signal.signal(signal.SIGALRM, panic_handler)
signal.alarm(ALARM_LEN)
# we do this so since the test_suite application has information regarding the amount of time
# each test should be run. after the amount of time specified in test.lst, test_suite will
# end this script and move to another test
while(1):
continue
| nesl/sos-2x | modules/unit_test/modules/kernel/post_raw/source_trick/reciever/source_trick_reciever.py | Python | bsd-3-clause | 3,085 |
from functools import reduce
from glob import glob
import os
import os.path as op
from shutil import copyfile, copytree
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_allclose,
assert_array_equal)
import mne
from mne.datasets import testing
from mne.transforms import (Transform, apply_trans, rotation, translation,
scaling)
from mne.coreg import (fit_matched_points, create_default_subject, scale_mri,
_is_mri_subject, scale_labels, scale_source_space,
coregister_fiducials, get_mni_fiducials)
from mne.io import read_fiducials
from mne.io.constants import FIFF
from mne.utils import requires_nibabel, modified_env, check_version
from mne.source_space import write_source_spaces
data_path = testing.data_path(download=False)
@pytest.fixture
def few_surfaces():
"""Set the _MNE_FEW_SURFACES env var."""
with modified_env(_MNE_FEW_SURFACES='true'):
yield
def test_coregister_fiducials():
"""Test coreg.coregister_fiducials()."""
# prepare head and MRI fiducials
trans = Transform('head', 'mri',
rotation(.4, .1, 0).dot(translation(.1, -.1, .1)))
coords_orig = np.array([[-0.08061612, -0.02908875, -0.04131077],
[0.00146763, 0.08506715, -0.03483611],
[0.08436285, -0.02850276, -0.04127743]])
coords_trans = apply_trans(trans, coords_orig)
def make_dig(coords, cf):
return ({'coord_frame': cf, 'ident': 1, 'kind': 1, 'r': coords[0]},
{'coord_frame': cf, 'ident': 2, 'kind': 1, 'r': coords[1]},
{'coord_frame': cf, 'ident': 3, 'kind': 1, 'r': coords[2]})
mri_fiducials = make_dig(coords_trans, FIFF.FIFFV_COORD_MRI)
info = {'dig': make_dig(coords_orig, FIFF.FIFFV_COORD_HEAD)}
# test coregister_fiducials()
trans_est = coregister_fiducials(info, mri_fiducials)
assert trans_est.from_str == trans.from_str
assert trans_est.to_str == trans.to_str
assert_array_almost_equal(trans_est['trans'], trans['trans'])
@requires_nibabel()
@pytest.mark.slowtest # can take forever on OSX Travis
@testing.requires_testing_data
@pytest.mark.parametrize('scale', (.9, [1, .2, .8]))
def test_scale_mri(tmpdir, few_surfaces, scale):
"""Test creating fsaverage and scaling it."""
# create fsaverage using the testing "fsaverage" instead of the FreeSurfer
# one
tempdir = str(tmpdir)
fake_home = testing.data_path()
create_default_subject(subjects_dir=tempdir, fs_home=fake_home,
verbose=True)
assert _is_mri_subject('fsaverage', tempdir), "Creating fsaverage failed"
fid_path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-fiducials.fif')
os.remove(fid_path)
create_default_subject(update=True, subjects_dir=tempdir,
fs_home=fake_home)
assert op.exists(fid_path), "Updating fsaverage"
# copy MRI file from sample data (shouldn't matter that it's incorrect,
# so here choose a small one)
path_from = op.join(testing.data_path(), 'subjects', 'sample', 'mri',
'T1.mgz')
path_to = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
copyfile(path_from, path_to)
# remove redundant label files
label_temp = op.join(tempdir, 'fsaverage', 'label', '*.label')
label_paths = glob(label_temp)
for label_path in label_paths[1:]:
os.remove(label_path)
# create source space
print('Creating surface source space')
path = op.join(tempdir, 'fsaverage', 'bem', 'fsaverage-%s-src.fif')
src = mne.setup_source_space('fsaverage', 'ico0', subjects_dir=tempdir,
add_dist=False)
mri = op.join(tempdir, 'fsaverage', 'mri', 'orig.mgz')
print('Creating volume source space')
vsrc = mne.setup_volume_source_space(
'fsaverage', pos=50, mri=mri, subjects_dir=tempdir,
add_interpolator=False)
write_source_spaces(path % 'vol-50', vsrc)
# scale fsaverage
write_source_spaces(path % 'ico-0', src, overwrite=True)
with pytest.warns(None): # sometimes missing nibabel
scale_mri('fsaverage', 'flachkopf', scale, True,
subjects_dir=tempdir, verbose='debug')
assert _is_mri_subject('flachkopf', tempdir), "Scaling failed"
spath = op.join(tempdir, 'flachkopf', 'bem', 'flachkopf-%s-src.fif')
assert op.exists(spath % 'ico-0'), "Source space ico-0 was not scaled"
assert os.path.isfile(os.path.join(tempdir, 'flachkopf', 'surf',
'lh.sphere.reg'))
vsrc_s = mne.read_source_spaces(spath % 'vol-50')
for vox in ([0, 0, 0], [1, 0, 0], [0, 1, 0], [0, 0, 1], [1, 2, 3]):
idx = np.ravel_multi_index(vox, vsrc[0]['shape'], order='F')
err_msg = f'idx={idx} @ {vox}, scale={scale}'
assert_allclose(apply_trans(vsrc[0]['src_mri_t'], vox),
vsrc[0]['rr'][idx], err_msg=err_msg)
assert_allclose(apply_trans(vsrc_s[0]['src_mri_t'], vox),
vsrc_s[0]['rr'][idx], err_msg=err_msg)
scale_labels('flachkopf', subjects_dir=tempdir)
# add distances to source space after hacking the properties to make
# it run *much* faster
src_dist = src.copy()
for s in src_dist:
s.update(rr=s['rr'][s['vertno']], nn=s['nn'][s['vertno']],
tris=s['use_tris'])
s.update(np=len(s['rr']), ntri=len(s['tris']),
vertno=np.arange(len(s['rr'])),
inuse=np.ones(len(s['rr']), int))
mne.add_source_space_distances(src_dist)
write_source_spaces(path % 'ico-0', src_dist, overwrite=True)
# scale with distances
os.remove(spath % 'ico-0')
scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
ssrc = mne.read_source_spaces(spath % 'ico-0')
assert ssrc[0]['dist'] is not None
assert ssrc[0]['nearest'] is not None
# check patch info computation (only if SciPy is new enough to be fast)
if check_version('scipy', '1.3'):
for s in src_dist:
for key in ('dist', 'dist_limit'):
s[key] = None
write_source_spaces(path % 'ico-0', src_dist, overwrite=True)
# scale with distances
os.remove(spath % 'ico-0')
scale_source_space('flachkopf', 'ico-0', subjects_dir=tempdir)
ssrc = mne.read_source_spaces(spath % 'ico-0')
assert ssrc[0]['dist'] is None
assert ssrc[0]['nearest'] is not None
@pytest.mark.slowtest # can take forever on OSX Travis
@testing.requires_testing_data
@requires_nibabel()
def test_scale_mri_xfm(tmpdir, few_surfaces):
"""Test scale_mri transforms and MRI scaling."""
# scale fsaverage
tempdir = str(tmpdir)
fake_home = testing.data_path()
# add fsaverage
create_default_subject(subjects_dir=tempdir, fs_home=fake_home,
verbose=True)
# add sample (with few files)
sample_dir = op.join(tempdir, 'sample')
os.mkdir(sample_dir)
os.mkdir(op.join(sample_dir, 'bem'))
for dirname in ('mri', 'surf'):
copytree(op.join(fake_home, 'subjects', 'sample', dirname),
op.join(sample_dir, dirname))
subject_to = 'flachkopf'
spacing = 'oct2'
for subject_from in ('fsaverage', 'sample'):
if subject_from == 'fsaverage':
scale = 1. # single dim
else:
scale = [0.9, 2, .8] # separate
src_from_fname = op.join(tempdir, subject_from, 'bem',
'%s-%s-src.fif' % (subject_from, spacing))
src_from = mne.setup_source_space(
subject_from, spacing, subjects_dir=tempdir, add_dist=False)
write_source_spaces(src_from_fname, src_from)
vertices_from = np.concatenate([s['vertno'] for s in src_from])
assert len(vertices_from) == 36
hemis = ([0] * len(src_from[0]['vertno']) +
[1] * len(src_from[0]['vertno']))
mni_from = mne.vertex_to_mni(vertices_from, hemis, subject_from,
subjects_dir=tempdir)
if subject_from == 'fsaverage': # identity transform
source_rr = np.concatenate([s['rr'][s['vertno']]
for s in src_from]) * 1e3
assert_allclose(mni_from, source_rr)
if subject_from == 'fsaverage':
overwrite = skip_fiducials = False
else:
with pytest.raises(IOError, match='No fiducials file'):
scale_mri(subject_from, subject_to, scale,
subjects_dir=tempdir)
skip_fiducials = True
with pytest.raises(IOError, match='already exists'):
scale_mri(subject_from, subject_to, scale,
subjects_dir=tempdir, skip_fiducials=skip_fiducials)
overwrite = True
if subject_from == 'sample': # support for not needing all surf files
os.remove(op.join(sample_dir, 'surf', 'lh.curv'))
scale_mri(subject_from, subject_to, scale, subjects_dir=tempdir,
verbose='debug', overwrite=overwrite,
skip_fiducials=skip_fiducials)
if subject_from == 'fsaverage':
assert _is_mri_subject(subject_to, tempdir), "Scaling failed"
src_to_fname = op.join(tempdir, subject_to, 'bem',
'%s-%s-src.fif' % (subject_to, spacing))
assert op.exists(src_to_fname), "Source space was not scaled"
# Check MRI scaling
fname_mri = op.join(tempdir, subject_to, 'mri', 'T1.mgz')
assert op.exists(fname_mri), "MRI was not scaled"
# Check MNI transform
src = mne.read_source_spaces(src_to_fname)
vertices = np.concatenate([s['vertno'] for s in src])
assert_array_equal(vertices, vertices_from)
mni = mne.vertex_to_mni(vertices, hemis, subject_to,
subjects_dir=tempdir)
assert_allclose(mni, mni_from, atol=1e-3) # 0.001 mm
def test_fit_matched_points():
"""Test fit_matched_points: fitting two matching sets of points."""
tgt_pts = np.random.RandomState(42).uniform(size=(6, 3))
# rotation only
trans = rotation(2, 6, 3)
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, translate=False,
out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation")
# rotation & translation
trans = np.dot(translation(2, -6, 3), rotation(2, 6, 3))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation and translation.")
# rotation & translation & scaling
trans = reduce(np.dot, (translation(2, -6, 3), rotation(1.5, .3, 1.4),
scaling(.5, .5, .5)))
src_pts = apply_trans(trans, tgt_pts)
trans_est = fit_matched_points(src_pts, tgt_pts, scale=1, out='trans')
est_pts = apply_trans(trans_est, src_pts)
assert_array_almost_equal(tgt_pts, est_pts, 2, "fit_matched_points with "
"rotation, translation and scaling.")
# test exceeding tolerance
tgt_pts[0, :] += 20
pytest.raises(RuntimeError, fit_matched_points, tgt_pts, src_pts, tol=10)
@testing.requires_testing_data
@requires_nibabel()
def test_get_mni_fiducials():
"""Test get_mni_fiducials."""
subjects_dir = op.join(data_path, 'subjects')
fid_fname = op.join(subjects_dir, 'sample', 'bem',
'sample-fiducials.fif')
fids, coord_frame = read_fiducials(fid_fname)
assert coord_frame == FIFF.FIFFV_COORD_MRI
assert [f['ident'] for f in fids] == list(range(1, 4))
fids = np.array([f['r'] for f in fids])
fids_est = get_mni_fiducials('sample', subjects_dir)
fids_est = np.array([f['r'] for f in fids_est])
dists = np.linalg.norm(fids - fids_est, axis=-1) * 1000. # -> mm
assert (dists < 8).all(), dists
| rkmaddox/mne-python | mne/tests/test_coreg.py | Python | bsd-3-clause | 12,343 |
#! /usr/bin/env python
'''
Recursively looks for EPD
Writes to STDOUT and STDERR the found library and the found include directory. In this way,
this script can be executed within CMAKE and the Python Libraries and Includes can be set to the STDOUT and STDERR streams
Checks for a minimum version of Python, default 2.7, but can be specified as a command-line argument
The root paths it starts looking for EPD in are contained in the 'check_dirs' global variable
'''
import os, sys, getpass
check_dirs = ['/opt/local/lib', '/home/%s' % getpass.getuser(), '/usr/share/', '/home/ecuzzill', '/opt/local', '/opt']
def main():
if len(sys.argv) == 2:
min_py_version = sys.argv[1]
#make sure it's a version by casting to float
try:
min_py_version = 'libpython' + str(float(min_py_version))
except ValueError:
min_py_version = 'libpython2.7'
else:
min_py_version = 'libpython2.7'
found = False
for d in check_dirs:
for (dname, dnames, fnames) in os.walk(d):
for r in dnames:
#found an 'epd'-ish directory
if r.find('epd') >= 0:
full_dir = '%s/%s' % (dname, r)
lib_exists = False
#find the library
for lib in [x for x in os.listdir(full_dir + '/lib/') if x.find('libpython') >=0 ]:
lib_version = '.'.join(lib.split('.')[:2])
if lib_version >= min_py_version:
lib_exists = True
break
if not lib_exists:
break
lib = '%s/lib/%s.so' % (full_dir, lib_version)
include_dir = '%s/include/%s' % (full_dir, min_py_version[3:])
#success if this passes
if os.path.isfile(lib) and os.path.isdir(include_dir):
bin_path = '%s/bin/python' % (full_dir)
unicode_support = os.system("%s -c 'import sys; sys.exit(sys.maxunicode > 65535)'" % (bin_path))
if unicode_support == 0:
sys.stdout.write(lib)
sys.stderr.write(include_dir)
found = True
break
if found:
break
if __name__ == '__main__':
main()
| LAIRLAB/libpyarr | find_epd.py | Python | bsd-3-clause | 2,436 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.7 on 2016-06-15 20:21
from __future__ import unicode_literals
from django.db import migrations, models
import posts.models
class Migration(migrations.Migration):
dependencies = [
('posts', '0005_auto_20160615_1249'),
]
operations = [
migrations.AlterField(
model_name='post',
name='image',
field=models.ImageField(blank=True, height_field='height_field', null=True, upload_to=posts.models.upload_location, width_field='width_field'),
),
]
| DJMedhaug/BizSprint | posts/migrations/0006_auto_20160615_1321.py | Python | bsd-3-clause | 570 |
# -*- coding: utf-8 -*-
# Generated by Django 1.9.2 on 2016-03-14 22:11
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('olympic', '0006_allow_seeding_and_results'),
]
operations = [
migrations.AddField(
model_name='category',
name='novice',
field=models.CharField(blank=True, choices=[('N', 'Novice'), ('E', 'Experienced')], max_length=1, null=True),
),
]
| mjtamlyn/archery-scoring | olympic/migrations/0007_add_novice_h2h.py | Python | bsd-3-clause | 528 |
from spectrum import CORRELOGRAMPSD, CORRELATION, pcorrelogram, marple_data
from spectrum import data_two_freqs
from pylab import log10, plot, savefig, linspace
from numpy.testing import assert_array_almost_equal, assert_almost_equal
def test_correlog():
psd = CORRELOGRAMPSD(marple_data, marple_data, lag=15)
assert_almost_equal(psd[0], 0.138216970)
assert_almost_equal(psd[1000-1], 7.900110787)
assert_almost_equal(psd[2000-1], 0.110103858)
assert_almost_equal(psd[3000-1], 0.222184134)
assert_almost_equal(psd[4000-1], -0.036255277)
assert_almost_equal(psd[4096-1], 0.1391839711)
return psd
def test_correlog_auto_cross():
"""Same as test_correlog but x and y provided"""
psd1 = CORRELOGRAMPSD(marple_data, lag=16)
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16)
assert_array_almost_equal(psd1, psd2)
psd1 = CORRELOGRAMPSD(marple_data, lag=16, correlation_method='CORRELATION')
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16, correlation_method='CORRELATION')
assert_array_almost_equal(psd1, psd2)
def test_correlog_correlation_method():
"""test correlogramPSD playing with method argument"""
psd1 = CORRELOGRAMPSD(marple_data, lag=16, correlation_method='CORRELATION')
psd2 = CORRELOGRAMPSD(marple_data, marple_data, lag=16, correlation_method='xcorr')
assert_array_almost_equal(psd1, psd2)
def test_pcorrelogram_class():
p = pcorrelogram(marple_data, lag=16)
p()
print(p)
p = pcorrelogram(data_two_freqs(), lag=16)
p.plot()
print(p)
def test_CORRELOGRAMPSD_others():
p = CORRELOGRAMPSD(marple_data, marple_data, lag=16, NFFT=None)
def create_figure():
psd = test_correlog()
f = linspace(-0.5, 0.5, len(psd))
psd = cshift(psd, len(psd)/2)
plot(f, 10*log10(psd/max(psd)))
savefig('psd_corr.png')
if __name__ == "__main__":
create_figure()
| cokelaer/spectrum | test/test_correlog.py | Python | bsd-3-clause | 1,904 |
# proxy module
from __future__ import absolute_import
from envisage.plugins.remote_editor.i_remote_shell import *
| enthought/etsproxy | enthought/plugins/remote_editor/i_remote_shell.py | Python | bsd-3-clause | 114 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('donation', '0002_auto_20190416_2318'),
]
operations = [
migrations.AlterField(
model_name='razorpayplans',
name='frequency',
field=models.CharField(max_length=16, choices=[('Monthly', 'Monthly'), ('Yearly', 'Yearly')]),
),
]
| PARINetwork/pari | donation/migrations/0003_auto_20201113_1437.py | Python | bsd-3-clause | 469 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_django-nupages
------------
Tests for `django-nupages` models module.
"""
import os
import shutil
import unittest
from django.utils import timezone
from django.core.urlresolvers import reverse
from django.contrib.sites.models import Site
from nupages import models
from nupages import views
class TestNupages(unittest.TestCase):
def create_page(
self,
title="Test Page",
description="yes, this is only a test",
content="yes, this is only a test",
custom_template="",
site=Site.objects.create(domain="127.0.0.1:8000", name="127.0.0.1:8000")):
return models.Page.objects.create(
title=title,
description=description,
content=content,
custom_template=custom_template,
created=timezone.now(),
site=site)
def test_page_creation(self):
p = self.create_page()
self.assertTrue(isinstance(p, models.Page))
self.assertEqual(p.__unicode__(), p.title)
self.assertEqual(p.get_absolute_url(), reverse("nupages:detail", kwargs={'slug': p.slug})) | goldhand/django-nupages | tests/test_models.py | Python | bsd-3-clause | 1,041 |
# ------------------------------------------------------------------------
# coding=utf-8
# ------------------------------------------------------------------------
from __future__ import absolute_import
import os
from django import forms
from django.utils.translation import ugettext_lazy as _
from feincms import settings
from . import logger
from .models import Category, MediaFile
from .fields import AdminFileWithPreviewWidget
# ------------------------------------------------------------------------
class MediaCategoryAdminForm(forms.ModelForm):
class Meta:
model = Category
def clean_parent(self):
data = self.cleaned_data['parent']
if data is not None and self.instance in data.path_list():
raise forms.ValidationError(_("This would create a loop in the hierarchy"))
return data
def __init__(self,* args, **kwargs):
super(MediaCategoryAdminForm, self).__init__(*args, **kwargs)
self.fields['parent'].queryset = self.fields['parent'].queryset.exclude(pk=self.instance.pk)
# ------------------------------------------------------------------------
class MediaFileAdminForm(forms.ModelForm):
class Meta:
model = MediaFile
widgets = { 'file': AdminFileWithPreviewWidget }
def __init__(self, *args, **kwargs):
super(MediaFileAdminForm, self).__init__(*args, **kwargs)
if settings.FEINCMS_MEDIAFILE_OVERWRITE and self.instance.id:
if not hasattr(self.instance.file.field, '_feincms_generate_filename_patched'):
orig_generate_filename = self.instance.file.field.generate_filename
def _gen_fname(instance, filename):
if instance.id and hasattr(instance, 'original_name'):
logger.info("Overwriting file %s with new data" % instance.original_name)
instance.file.storage.delete(instance.original_name)
return instance.original_name
return orig_generate_filename(instance, filename)
self.instance.file.field.generate_filename = _gen_fname
self.instance.file.field._feincms_generate_filename_patched = True
def clean_file(self):
if settings.FEINCMS_MEDIAFILE_OVERWRITE and self.instance.id:
new_base, new_ext = os.path.splitext(self.cleaned_data['file'].name)
old_base, old_ext = os.path.splitext(self.instance.file.name)
if new_ext.lower() != old_ext.lower():
raise forms.ValidationError(_("Cannot overwrite with different file type (attempt to overwrite a %(old_ext)s with a %(new_ext)s)") % { 'old_ext': old_ext, 'new_ext': new_ext })
self.instance.original_name = self.instance.file.name
return self.cleaned_data['file']
# ------------------------------------------------------------------------
| pjdelport/feincms | feincms/module/medialibrary/forms.py | Python | bsd-3-clause | 2,901 |
import os
import sys
from copy import copy
from collections import OrderedDict
import traceback
import atexit
import neo
from data_provider import DataProvider
from .. import conversions as convert
class NeoDataProvider(DataProvider):
""" Base class for data providers using NEO"""
# Dictionary of block lists, indexed by (filename, block index) tuples
loaded_blocks = {}
# Dictionary of index in file, indexed by block object
block_indices = {}
# Dictionary of io, indexed by block object
block_ios = {}
# Dictionary of io (IO name, read paramters) tuples for loaded blocks
block_read_params = {}
# Mode for data lazy loading:
# 0 - Full load
# 1 - Lazy load
# 2 - Caching lazy load
data_lazy_mode = 0
# Mode for lazy cascade
cascade_lazy = False
# Forced IO class for all files. If None, determine by file extension.
forced_io = None
# Active IO read parameters (dictionary indexed by IO class)
io_params = {}
def __init__(self, name, progress):
super(NeoDataProvider, self).__init__(name, progress)
@classmethod
def clear(cls):
""" Clears cached blocks
"""
cls.loaded_blocks.clear()
cls.block_indices.clear()
cls.block_read_params.clear()
ios = set()
for io in cls.block_ios.itervalues():
if io in ios:
continue
if hasattr(io, 'close'):
io.close()
ios.add(io)
cls.block_ios.clear()
@classmethod
def get_block(cls, filename, index, lazy=None, force_io=None,
read_params=None):
""" Return the block at the given index in the specified file.
:param str filename: Path to the file from which to load the block.
:param int index: The index of the block in the file.
:param int lazy: Override global lazy setting if not ``None``:
0 regular load, 1 lazy load, 2 caching lazy load.
:param force_io: Override global forced_io for the Neo IO class
to use when loading the file. If ``None``, the global
forced_io is used.
:param dict read_params: Override read parameters for the IO that
will load the block. If ``None``, the global io_params are
used.
"""
if lazy is None:
lazy = cls.data_lazy_mode > 0
else:
lazy = lazy > 0
if force_io is None:
force_io = cls.forced_io
if filename in cls.loaded_blocks:
return cls.loaded_blocks[filename][index]
io, blocks = cls._load_neo_file(filename, lazy, force_io, read_params)
if io and not lazy and not cls.cascade_lazy and hasattr(io, 'close'):
io.close()
if blocks is None:
return None
return blocks[index]
@classmethod
def get_blocks(cls, filename, lazy=None, force_io=None,
read_params=None):
""" Return a list of blocks loaded from the specified file
:param str filename: Path to the file from which to load the blocks.
:param int lazy: Override global lazy setting if not ``None``:
0 regular load, 1 lazy load, 2 caching lazy load.
:param force_io: Override global forced_io for the Neo IO class
to use when loading the file. If ``None``, the global
forced_io is used.
:param dict read_params: Override read parameters for the IO that
will load the block. If ``None``, the global io_params are
used.
"""
if lazy is None:
lazy = cls.data_lazy_mode > 0
else:
lazy = lazy > 0
if force_io is None:
force_io = cls.forced_io
if filename in cls.loaded_blocks:
return cls.loaded_blocks[filename]
io, blocks = cls._load_neo_file(filename, lazy, force_io, read_params)
if io and not lazy and not cls.cascade_lazy and hasattr(io, 'close'):
io.close()
return blocks
@classmethod
def _load_neo_file(cls, filename, lazy, force_io, read_params):
""" Returns a NEO io object and a list of contained blocks for a
file name. This function also caches all loaded blocks
:param str filename: The full path of the file (relative or absolute).
:param bool lazy: Determines if lazy mode is used for Neo io.
:param force_io: IO class to use for loading. If None, determined
by file extension or through trial and error for directories.
:param dict read_params: Override read parameters for the IO that
will load the block. If ``None``, the global io_params are
used.
"""
cascade = 'lazy' if cls.cascade_lazy else True
if os.path.isdir(filename):
if force_io:
try:
n_io = force_io(filename)
if read_params is None:
rp = cls.io_params.get(force_io, {})
else:
rp = read_params
content = n_io.read(lazy=lazy, cascade=cascade, **rp)
if force_io == neo.TdtIO and \
isinstance(content, neo.Block) and \
not content.segments:
# TdtIO can produce empty blocks for invalid dirs
sys.stderr.write(
'Could not load any blocks from "%s"' % filename)
return None, None
return cls._content_loaded(
content, filename, lazy, n_io, rp)
except Exception, e:
sys.stderr.write(
'Load error for directory "%s":\n' % filename)
tb = sys.exc_info()[2]
while not ('self' in tb.tb_frame.f_locals and
tb.tb_frame.f_locals['self'] == n_io):
if tb.tb_next is not None:
tb = tb.tb_next
else:
break
traceback.print_exception(type(e), e, tb)
else:
for io in neo.io.iolist:
if io.mode == 'dir':
try:
n_io = io(filename)
if read_params is None:
rp = cls.io_params.get(force_io, {})
else:
rp = read_params
content = n_io.read(lazy=lazy, cascade=cascade, **rp)
if io == neo.TdtIO and \
isinstance(content, neo.Block) and \
not content.segments:
# TdtIO can produce empty blocks for invalid dirs
continue
return cls._content_loaded(
content, filename, lazy, n_io, rp)
except Exception, e:
sys.stderr.write(
'Load error for directory "%s":\n' % filename)
tb = sys.exc_info()[2]
while not ('self' in tb.tb_frame.f_locals and
tb.tb_frame.f_locals['self'] == n_io):
if tb.tb_next is not None:
tb = tb.tb_next
else:
break
traceback.print_exception(type(e), e, tb)
else:
if force_io:
if read_params is None:
rp = cls.io_params.get(force_io, {})
else:
rp = read_params
return cls._load_file_with_io(filename, force_io, lazy, rp)
extension = filename.split('.')[-1]
for io in neo.io.iolist:
if extension in io.extensions:
if read_params is None:
rp = cls.io_params.get(io, {})
else:
rp = read_params
return cls._load_file_with_io(filename, io, lazy, rp)
return None, None
@classmethod
def _content_loaded(cls, content, filename, lazy, n_io, read_params):
if isinstance(content, neo.Block): # Neo 0.2.1
cls.block_indices[content] = 0
cls.loaded_blocks[filename] = [content]
cls.block_read_params[content] = (type(n_io).__name__, read_params)
if lazy or cls.cascade_lazy:
cls.block_ios[content] = n_io
return n_io, [content]
# Neo >= 0.3.0, read() returns a list of blocks
blocks = content
for i, b in enumerate(blocks):
cls.block_indices[b] = i
cls.block_read_params[b] = (type(n_io).__name__, read_params)
if lazy or cls.cascade_lazy:
cls.block_ios[b] = n_io
cls.loaded_blocks[filename] = blocks
return n_io, blocks
@classmethod
def _load_file_with_io(cls, filename, io, lazy, read_params):
if io == neo.NeoHdf5IO:
# Fix unicode problem with pyinstaller
if hasattr(sys, 'frozen'):
filename = filename.encode('UTF-8')
n_io = io(filename=filename)
if read_params is None:
rp = cls.io_params.get(io, {})
else:
rp = read_params
try:
cascade = 'lazy' if cls.cascade_lazy else True
if hasattr(io, 'read_all_blocks'): # Neo 0.2.1
content = n_io.read_all_blocks(lazy=lazy, cascade=cascade, **rp)
else:
content = n_io.read(lazy=lazy, cascade=cascade, **rp)
return cls._content_loaded(content, filename, lazy, n_io, rp)
except Exception, e:
sys.stderr.write(
'Load error for file "%s":\n' % filename)
tb = sys.exc_info()[2]
while not ('self' in tb.tb_frame.f_locals and
tb.tb_frame.f_locals['self'] == n_io):
if tb.tb_next is not None:
tb = tb.tb_next
else:
break
traceback.print_exception(type(e), e, tb)
return None, None
@classmethod
def _get_data_from_viewer(cls, viewer):
""" Return a dictionary with selection information from viewer
"""
# The links in this data format are based list indices
data = {}
data['type'] = 'Neo'
# Block entry: (Index of block in file, file location of block,
# block IO class name, block IO read parameters)
block_list = []
block_indices = {}
selected_blocks = viewer.neo_blocks()
block_files = viewer.neo_block_file_names()
for b in selected_blocks:
block_indices[b] = len(block_list)
block_list.append([NeoDataProvider.block_indices[b],
block_files[b],
cls.block_read_params[b][0],
cls.block_read_params[b][1]])
data['blocks'] = block_list
# Recording channel group entry:
# (Index of rcg in block, index of block)
rcg_list = []
rcg_indices = {}
selected_rcg = viewer.neo_channel_groups()
for rcg in selected_rcg:
rcg_indices[rcg] = len(rcg_list)
idx = rcg.block.recordingchannelgroups.index(rcg)
rcg_list.append([idx, block_indices[rcg.block]])
data['channel_groups'] = rcg_list
# Recording channel entry: (Index of channel in rcg, index of rcg)
# There can be multiple channel entries for one channel object, if
# it is part of multiple channel groups
channel_list = []
selected_channels = viewer.neo_channels()
for c in selected_channels:
for rcg in c.recordingchannelgroups:
if rcg in rcg_indices:
idx = rcg.recordingchannels.index(c)
channel_list.append([idx, rcg_indices[rcg]])
data['channels'] = channel_list
# Segment entry: (Index of segment in block, index of block)
segment_list = []
segment_indices = {}
selected_segments = viewer.neo_segments()
for s in selected_segments:
segment_indices[s] = len(segment_list)
idx = s.block.segments.index(s)
segment_list.append([idx, block_indices[s.block]])
data['segments'] = segment_list
# Unit entry: (Index of unit in rcg, index of rcg)
unit_list = []
selected_units = viewer.neo_units()
for u in selected_units:
segment_indices[u] = len(segment_list)
rcg_id = None if u.recordingchannelgroup is None \
else u.recordingchannelgroup.units.index(u)
rcg = rcg_indices[u.recordingchannelgroup] \
if u.recordingchannelgroup else None
unit_list.append([rcg_id, rcg])
data['units'] = unit_list
return data
@staticmethod
def find_io_class(name):
""" Return the Neo IO class with a given name.
:param str name: Class name of the desired IO class.
"""
for io in neo.io.iolist:
if io.__name__ == name:
return io
return None
def _active_block(self, old):
""" Return a copy of all selected elements in the given block.
Only container objects are copied, data objects are linked.
Needs to load all lazily loaded objects and will cache them
regardless of current lazy_mode,
"""
block = copy(old)
block.segments = []
selected_segments = set(self.segments() + [None])
selected_rcgs = set(self.recording_channel_groups() + [None])
selected_channels = set(self.recording_channels() + [None])
selected_units = set(self.units() + [None])
for s in old.segments:
if s in selected_segments:
segment = copy(s)
segment.analogsignals = [self._load_lazy_object(sig, True)
for sig in s.analogsignals
if sig.recordingchannel
in selected_channels]
segment.analogsignalarrays = [
self._load_lazy_object(asa, True)
for asa in s.analogsignalarrays
if asa.recordingchannelgroup in selected_rcgs]
segment.irregularlysampledsignals = [
self._load_lazy_object(iss, True)
for iss in s.irregularlysampledsignals
if iss.recordingchannel in selected_channels]
segment.spikes = [self._load_lazy_object(sp, True)
for sp in s.spikes
if sp.unit in selected_units]
segment.spiketrains = [self._load_lazy_object(st, True)
for st in s.spiketrains
if st.unit in selected_units]
segment.block = block
block.segments.append(segment)
block.recordingchannelgroups = []
for old_rcg in old.recordingchannelgroups:
if old_rcg in selected_rcgs:
rcg = copy(old_rcg)
rcg.analogsignalarrays = [
self._load_lazy_object(asa, True)
for asa in old_rcg.analogsignalarrays
if asa.segment in selected_segments]
rcg.recordingchannels = []
for c in old_rcg.recordingchannels:
if not c in selected_channels:
continue
channel = copy(c)
channel.analogsignals = [
self._load_lazy_object(sig, True)
for sig in c.analogsignals
if sig.segment in selected_segments]
channel.irregularlysampledsignals = [
self._load_lazy_object(iss, True)
for iss in c.irregularlysampledsignals
if iss.segment in selected_segments]
channel.recordingchannelgroups = copy(
c.recordingchannelgroups)
channel.recordingchannelgroups.insert(
channel.recordingchannelgroups.index(old_rcg), rcg)
channel.recordingchannelgroups.remove(old_rcg)
rcg.recordingchannels.append(channel)
rcg.units = []
for u in old_rcg.units:
if not u in selected_units:
continue
unit = copy(u)
unit.spikes = [self._load_lazy_object(sp, True)
for sp in u.spikes
if sp.segment in selected_segments]
unit.spiketrains = [self._load_lazy_object(st, True)
for st in u.spiketrains
if st.segment in selected_segments]
unit.recordingchannelgroup = rcg
rcg.units.append(unit)
rcg.block = block
block.recordingchannelgroups.append(rcg)
return block
def _get_object_io(self, o):
""" Find the IO for an object. Return ``None`` if no IO exists.
"""
if o.segment:
return self.block_ios.get(o.segment.block, None)
if hasattr(object, 'recordingchannelgroups'):
if o.recordingchannelgroups:
return self.block_ios.get(
o.recordingchannelgroups[0].block, None)
if hasattr(object, 'recordingchannel'):
c = o.recordingchannel
if c.recordingchannelgroups:
return self.block_ios.get(
c.recordingchannelgroups[0].block, None)
return None
def _load_lazy_object(self, o, change_links=False):
""" Return a loaded version of a lazily loaded object. The IO
needs a ``read_lazy_object`` that takes a lazily loaded data object
as parameter method for this to work.
:param o: The object to load.
:param bool change_links: If ``True``, replace the old object
in the hierarchy.
"""
if not hasattr(o, 'lazy_shape'):
return o
io = self._get_object_io(o)
if io:
if hasattr(io, 'load_lazy_object'):
ret = io.load_lazy_object(o)
elif isinstance(io, neo.io.NeoHdf5IO):
ret = io.get(o.hdf5_path, cascade=False, lazy=False)
else:
return o
ret.segment = o.segment
if hasattr(o, 'recordingchannelgroup'):
ret.recordingchannelgroup = o.recordingchannelgroup
elif hasattr(o, 'recordingchannel'):
ret.recordingchannel = o.recordingchannel
elif hasattr(o, 'unit'):
ret.unit = o.unit
if change_links:
name = type(o).__name__.lower() + 's'
l = getattr(o.segment, name)
try:
l[l.index(o)] = ret
except ValueError:
l.append(ret)
l = None
if hasattr(o, 'recordingchannelgroup'):
l = getattr(o.recordingchannelgroup, name)
elif hasattr(o, 'recordingchannel'):
l = getattr(o.recordingchannel, name)
elif hasattr(o, 'unit'):
l = getattr(o.unit, name)
if l is not None:
try:
l[l.index(o)] = ret
except ValueError:
l.append(ret)
return ret
return o
def _load_object_list(self, objects):
""" Return a list of loaded objects for a list of (potentially)
lazily loaded objects.
"""
ret = []
for o in objects:
ret.append(self._load_lazy_object(o, self.data_lazy_mode > 1))
return ret
def _load_object_dict(self, objects):
""" Return a dictionary (without changing indices) of loaded
objects for a dictionary of (potentially) lazily loaded objects.
"""
for k, v in objects.items():
if isinstance(v, list):
objects[k] = self._load_object_list(v)
elif isinstance(v, dict):
for ik, iv in v.items():
v[ik] = self._load_lazy_object(iv, self.data_lazy_mode > 1)
else:
raise ValueError(
'Only dicts or lists are supported as dictionary values!')
return objects
def selection_blocks(self):
""" Return a list of selected blocks.
"""
return [self._active_block(b) for b in self.blocks()]
def spike_trains(self):
""" Return a list of :class:`neo.core.SpikeTrain` objects.
"""
trains = []
units = set(self.units())
for s in self.segments():
trains.extend([t for t in s.spiketrains if t.unit in units or
t.unit is None])
for u in self.units():
trains.extend([t for t in u.spiketrains if t.segment is None])
return self._load_object_list(trains)
def spike_trains_by_unit(self):
""" Return a dictionary (indexed by Unit) of lists of
:class:`neo.core.SpikeTrain` objects.
"""
trains = OrderedDict()
segments = set(self.segments())
for u in self.units():
st = [t for t in u.spiketrains if t.segment in segments or
t.segment is None]
if st:
trains[u] = st
nonetrains = []
for s in self.segments():
nonetrains.extend([t for t in s.spiketrains if t.unit is None])
if nonetrains:
trains[self.no_unit] = nonetrains
return self._load_object_dict(trains)
def spike_trains_by_segment(self):
""" Return a dictionary (indexed by Segment) of lists of
:class:`neo.core.SpikeTrain` objects.
"""
trains = OrderedDict()
units = self.units()
for s in self.segments():
st = [t for t in s.spiketrains if t.unit in units or
t.unit is None]
if st:
trains[s] = st
nonetrains = []
for u in self.units():
nonetrains.extend([t for t in u.spiketrains if t.segment is None])
if nonetrains:
trains[self.no_segment] = nonetrains
return self._load_object_dict(trains)
def spike_trains_by_unit_and_segment(self):
""" Return a dictionary (indexed by Unit) of dictionaries
(indexed by Segment) of :class:`neo.core.SpikeTrain` objects.
"""
trains = OrderedDict()
segments = self.segments()
for u in self.units():
for s in segments:
segtrains = [t for t in u.spiketrains if t.segment == s]
if segtrains:
if u not in trains:
trains[u] = OrderedDict()
trains[u][s] = segtrains[0]
nonetrains = [t for t in u.spiketrains if t.segment is None]
if nonetrains:
if u not in trains:
trains[u] = OrderedDict()
trains[u][self.no_segment] = nonetrains[0]
nonetrains = OrderedDict()
for s in self.segments():
segtrains = [t for t in s.spiketrains if t.unit is None]
if segtrains:
nonetrains[s] = segtrains[0]
if nonetrains:
trains[self.no_unit] = nonetrains
return self._load_object_dict(trains)
def spikes(self):
""" Return a list of :class:`neo.core.Spike` objects.
"""
spikes = []
units = self.units()
for s in self.segments():
spikes.extend([t for t in s.spikes if t.unit in units or
t.unit is None])
for u in self.units():
spikes.extend([t for t in u.spikes if t.segment is None])
return self._load_object_list(spikes)
def spikes_by_unit(self):
""" Return a dictionary (indexed by Unit) of lists of
:class:`neo.core.Spike` objects.
"""
spikes = OrderedDict()
segments = self.segments()
for u in self.units():
sp = [t for t in u.spikes if t.segment in segments or
t.segment is None]
if sp:
spikes[u] = sp
nonespikes = []
for s in self.segments():
nonespikes.extend([t for t in s.spikes if t.unit is None])
if nonespikes:
spikes[self.no_unit] = nonespikes
return self._load_object_dict(spikes)
def spikes_by_segment(self):
""" Return a dictionary (indexed by Segment) of lists of
:class:`neo.core.Spike` objects.
"""
spikes = OrderedDict()
units = self.units()
for s in self.segments():
sp = [t for t in s.spikes if t.unit in units or
t.unit is None]
if sp:
spikes[s] = sp
nonespikes = []
for u in self.units():
nonespikes.extend([t for t in u.spikes if t.segment is None])
if nonespikes:
spikes[self.no_segment] = nonespikes
return self._load_object_dict(spikes)
def spikes_by_unit_and_segment(self):
""" Return a dictionary (indexed by Unit) of dictionaries
(indexed by Segment) of :class:`neo.core.Spike` lists.
"""
spikes = OrderedDict()
segments = self.segments()
for u in self.units():
for s in segments:
segtrains = [t for t in u.spikes if t.segment == s]
if segtrains:
if u not in spikes:
spikes[u] = OrderedDict()
spikes[u][s] = segtrains
nonespikes = [t for t in u.spikes if t.segment is None]
if nonespikes:
if u not in spikes:
spikes[u] = OrderedDict()
spikes[u][self.no_segment] = nonespikes
nonespikes = OrderedDict()
for s in self.segments():
segspikes = [t for t in s.spikes if t.unit is None]
if segspikes:
nonespikes[s] = segspikes
if nonespikes:
spikes[self.no_unit] = nonespikes
return self._load_object_dict(spikes)
def events(self, include_array_events=True):
""" Return a dictionary (indexed by Segment) of lists of
Event objects.
"""
ret = OrderedDict()
for s in self.segments():
if s.events:
ret[s] = s.events
if include_array_events:
for a in s.eventarrays:
if s not in ret:
ret[s] = []
ret[s].extend(convert.event_array_to_events(a))
return ret
def labeled_events(self, label, include_array_events=True):
""" Return a dictionary (indexed by Segment) of lists of Event
objects with the given label.
"""
ret = OrderedDict()
for s in self.segments():
events = [e for e in s.events if e.label == label]
if events:
ret[s] = events
if include_array_events:
for a in s.eventarrays:
if s not in ret:
ret[s] = []
events = convert.event_array_to_events(a)
ret[s].extend((e for e in events if e.label == label))
return ret
def event_arrays(self):
""" Return a dictionary (indexed by Segment) of lists of
EventArray objects.
"""
ret = OrderedDict()
for s in self.segments():
if s.eventarrays:
ret[s] = s.eventarrays
return self._load_object_dict(ret)
def epochs(self, include_array_epochs=True):
""" Return a dictionary (indexed by Segment) of lists of
Epoch objects.
"""
ret = OrderedDict()
for s in self.segments():
if s.epochs:
ret[s] = s.epochs
if include_array_epochs:
for a in s.epocharrays:
if s not in ret:
ret[s] = []
ret[s].extend(convert.epoch_array_to_epochs(a))
return ret
def labeled_epochs(self, label, include_array_epochs=True):
""" Return a dictionary (indexed by Segment) of lists of Epoch
objects with the given label.
"""
ret = OrderedDict()
for s in self.segments():
epochs = [e for e in s.epochs if e.label == label]
if epochs:
ret[s] = epochs
if include_array_epochs:
for a in s.epocharrays:
if s not in ret:
ret[s] = []
epochs = convert.epoch_array_to_epochs(a)
ret[s].extend((e for e in epochs if e.label == label))
return ret
def epoch_arrays(self):
""" Return a dictionary (indexed by Segment) of lists of
EpochArray objects.
"""
ret = OrderedDict()
for s in self.segments():
if s.epocharrays:
ret[s] = s.epocharrays
return self._load_object_dict(ret)
def analog_signals(self, conversion_mode=1):
""" Return a list of :class:`neo.core.AnalogSignal` objects.
"""
signals = []
channels = self.recording_channels()
if conversion_mode == 1 or conversion_mode == 3:
for s in self.segments():
signals.extend([t for t in s.analogsignals
if t.recordingchannel in channels or
t.recordingchannel is None])
for u in self.recording_channels():
signals.extend([t for t in u.analogsignals
if t.segment is None])
if conversion_mode > 1:
for sa in self.analog_signal_arrays():
for sig in convert.analog_signal_array_to_analog_signals(sa):
if (sig.recordingchannel is None or
sig.recordingchannel in channels):
signals.append(sig)
return self._load_object_list(signals)
def analog_signals_by_segment(self, conversion_mode=1):
""" Return a dictionary (indexed by Segment) of lists of
:class:`neo.core.AnalogSignal` objects.
"""
signals = OrderedDict()
channels = self.recording_channels()
if conversion_mode == 1 or conversion_mode == 3:
for s in self.segments():
sig = []
for c in channels:
sig.extend([t for t in c.analogsignals
if t.segment == s])
sig.extend([t for t in s.analogsignals
if t.recordingchannel is None])
if sig:
signals[s] = sig
nonesignals = []
for c in channels:
nonesignals.extend([t for t in c.analogsignals
if t.segment is None])
if nonesignals:
signals[self.no_segment] = nonesignals
if conversion_mode > 1:
for o, sa_list in \
self.analog_signal_arrays_by_segment().iteritems():
for sa in sa_list:
for sig in \
convert.analog_signal_array_to_analog_signals(sa):
if sig.recordingchannel is None or \
sig.recordingchannel in channels:
if o not in signals:
signals[o] = []
signals[o].append(sig)
return self._load_object_dict(signals)
def analog_signals_by_channel(self, conversion_mode=1):
""" Return a dictionary (indexed by RecordingChannel) of lists
of :class:`neo.core.AnalogSignal` objects.
"""
signals = OrderedDict()
channels = self.recording_channels()
if conversion_mode == 1 or conversion_mode == 3:
segments = self.segments()
for c in channels:
sig = [t for t in c.analogsignals
if t.segment in segments or
t.segment is None]
if sig:
signals[c] = sig
nonesignals = []
for s in segments:
nonesignals.extend([t for t in s.analogsignals
if t.recordingchannel is None])
if nonesignals:
signals[self.no_channel] = nonesignals
if conversion_mode > 1:
for o, sa_list in \
self.analog_signal_arrays_by_channelgroup().iteritems():
for sa in sa_list:
for sig in \
convert.analog_signal_array_to_analog_signals(sa):
if sig.recordingchannel is None:
if self.no_channel not in signals:
signals[self.no_channel] = [sig]
else:
signals[self.no_channel].append(sig)
elif sig.recordingchannel in channels:
if sig.recordingchannel not in signals:
signals[sig.recordingchannel] = [sig]
else:
signals[sig.recordingchannel].append(sig)
return self._load_object_dict(signals)
def analog_signals_by_channel_and_segment(self, conversion_mode=1):
""" Return a dictionary (indexed by RecordingChannel) of
dictionaries (indexed by Segment) of :class:`neo.core.AnalogSignal`
lists.
"""
signals = OrderedDict()
channels = self.recording_channels()
if conversion_mode == 1 or conversion_mode == 3:
segments = self.segments()
for c in channels:
for s in segments:
segsignals = [t for t in c.analogsignals if t.segment == s]
if segsignals:
if c not in signals:
signals[c] = OrderedDict()
signals[c][s] = segsignals
nonesignals = [t for t in c.analogsignals if t.segment is None]
if nonesignals:
if c not in signals:
signals[c] = OrderedDict()
signals[c][self.no_segment] = nonesignals
nonesignals = OrderedDict()
for s in self.segments():
segsignals = [t for t in s.analogsignals
if t.recordingchannel is None]
if segsignals:
nonesignals[s] = segsignals
if nonesignals:
signals[self.no_channel] = nonesignals
if conversion_mode > 1:
sigs = self.analog_signal_arrays_by_channelgroup_and_segment()
for cg, inner in sigs.iteritems():
for seg, sa_list in inner.iteritems():
for sa in sa_list:
for sig in convert.analog_signal_array_to_analog_signals(sa):
chan = sig.recordingchannel
if chan not in channels:
continue
if chan not in signals:
signals[chan] = OrderedDict()
if seg not in signals[chan]:
signals[chan][seg] = []
signals[chan][seg].append(sig)
return self._load_object_dict(signals)
def analog_signal_arrays(self):
""" Return a list of :class:`neo.core.AnalogSignalArray` objects.
"""
signals = []
channelgroups = self.recording_channel_groups()
for s in self.segments():
signals.extend([t for t in s.analogsignalarrays
if t.recordingchannelgroup in channelgroups or
t.recordingchannelgroup is None])
for u in channelgroups:
signals.extend([t for t in u.analogsignalarrays
if t.segment is None])
return self._load_object_list(signals)
def analog_signal_arrays_by_segment(self):
""" Return a dictionary (indexed by Segment) of lists of
:class:`neo.core.AnalogSignalArray` objects.
"""
signals = OrderedDict()
channelgroups = self.recording_channel_groups()
for s in self.segments():
sa = []
for c in channelgroups:
sa.extend([t for t in c.analogsignalarrays
if t.segment == s])
sa.extend([t for t in s.analogsignalarrays
if t.recordingchannelgroup is None])
if sa:
signals[s] = sa
nonesignals = []
for c in channelgroups:
nonesignals.extend([t for t in c.analogsignalarrays
if t.segment is None])
if nonesignals:
signals[self.no_segment] = nonesignals
return self._load_object_dict(signals)
def analog_signal_arrays_by_channelgroup(self):
""" Return a dictionary (indexed by RecordingChannelGroup) of
lists of :class:`neo.core.AnalogSignalArray` objects.
"""
signals = OrderedDict()
segments = self.segments()
for c in self.recording_channel_groups():
sa = [t for t in c.analogsignalarrays
if t.segment in segments]
if sa:
signals[c] = sa
nonesignals = []
for s in segments:
nonesignals.extend([t for t in s.analogsignalarrays
if t.recordingchannelgroup is None])
if nonesignals:
signals[self.no_channelgroup] = nonesignals
return self._load_object_dict(signals)
def analog_signal_arrays_by_channelgroup_and_segment(self):
""" Return a dictionary (indexed by RecordingChannelGroup) of
dictionaries (indexed by Segment) of
:class:`neo.core.AnalogSignalArray` lists.
"""
signals = OrderedDict()
segments = self.segments()
for c in self.recording_channel_groups():
for s in segments:
segsignals = [t for t in c.analogsignalarrays
if t.segment == s]
if segsignals:
if c not in signals:
signals[c] = OrderedDict()
signals[c][s] = segsignals
nonesignals = [t for t in c.analogsignalarrays
if t.segment is None]
if nonesignals:
if c not in signals:
signals[c] = OrderedDict()
signals[c][self.no_segment] = nonesignals
nonesignals = OrderedDict()
for s in self.segments():
segsignals = [t for t in s.analogsignalarrays
if t.recordingchannelgroup is None]
if segsignals:
nonesignals[s] = segsignals
if nonesignals:
signals[self.no_channelgroup] = nonesignals
return self._load_object_dict(signals)
atexit.register(NeoDataProvider.clear) | rproepp/spykeutils | spykeutils/plugin/data_provider_neo.py | Python | bsd-3-clause | 40,506 |
from __future__ import absolute_import
import operator
from django.db import models
from django.db.models import Q
from django.utils import timezone
from sentry.db.models import Model, sane_repr
from sentry.db.models.fields import FlexibleForeignKey, JSONField
from sentry.ownership.grammar import load_schema
from functools import reduce
class ProjectOwnership(Model):
__core__ = True
project = FlexibleForeignKey("sentry.Project", unique=True)
raw = models.TextField(null=True)
schema = JSONField(null=True)
fallthrough = models.BooleanField(default=True)
auto_assignment = models.BooleanField(default=False)
date_created = models.DateTimeField(default=timezone.now)
last_updated = models.DateTimeField(default=timezone.now)
is_active = models.BooleanField(default=True)
# An object to indicate ownership is implicitly everyone
Everyone = object()
class Meta:
app_label = "sentry"
db_table = "sentry_projectownership"
__repr__ = sane_repr("project_id", "is_active")
@classmethod
def get_owners(cls, project_id, data):
"""
For a given project_id, and event data blob.
If Everyone is returned, this means we implicitly are
falling through our rules and everyone is responsible.
If an empty list is returned, this means there are explicitly
no owners.
"""
try:
ownership = cls.objects.get(project_id=project_id)
except cls.DoesNotExist:
ownership = cls(project_id=project_id)
rules = cls._matching_ownership_rules(ownership, project_id, data)
if not rules:
return cls.Everyone if ownership.fallthrough else [], None
owners = {o for rule in rules for o in rule.owners}
return filter(None, resolve_actors(owners, project_id).values()), rules
@classmethod
def get_autoassign_owner(cls, project_id, data):
"""
Get the auto-assign owner for a project if there are any.
Will return None if there are no owners, or a list of owners.
"""
try:
ownership = cls.objects.get(project_id=project_id)
except cls.DoesNotExist:
return None
if not ownership.auto_assignment:
return None
rules = cls._matching_ownership_rules(ownership, project_id, data)
if not rules:
return None
score = 0
owners = None
# Automatic assignment prefers the owner with the longest
# matching pattern as the match is more specific.
for rule in rules:
candidate = len(rule.matcher.pattern)
if candidate > score:
score = candidate
owners = rule.owners
actors = filter(None, resolve_actors(owners, project_id).values())
# Can happen if the ownership rule references a user/team that no longer
# is assigned to the project or has been removed from the org.
if not actors:
return None
return actors[0].resolve()
@classmethod
def _matching_ownership_rules(cls, ownership, project_id, data):
rules = []
if ownership.schema is not None:
for rule in load_schema(ownership.schema):
if rule.test(data):
rules.append(rule)
return rules
def resolve_actors(owners, project_id):
""" Convert a list of Owner objects into a dictionary
of {Owner: Actor} pairs. Actors not identified are returned
as None. """
from sentry.api.fields.actor import Actor
from sentry.models import User, Team
if not owners:
return {}
users, teams = [], []
owners_lookup = {}
for owner in owners:
# teams aren't technical case insensitive, but teams also
# aren't allowed to have non-lowercase in slugs, so
# this kinda works itself out correctly since they won't match
owners_lookup[(owner.type, owner.identifier.lower())] = owner
if owner.type == "user":
users.append(owner)
elif owner.type == "team":
teams.append(owner)
actors = {}
if users:
actors.update(
{
("user", email.lower()): Actor(u_id, User)
for u_id, email in User.objects.filter(
reduce(operator.or_, [Q(emails__email__iexact=o.identifier) for o in users]),
# We don't require verified emails
# emails__is_verified=True,
is_active=True,
sentry_orgmember_set__organizationmemberteam__team__projectteam__project_id=project_id,
)
.distinct()
.values_list("id", "emails__email")
}
)
if teams:
actors.update(
{
("team", slug): Actor(t_id, Team)
for t_id, slug in Team.objects.filter(
slug__in=[o.identifier for o in teams], projectteam__project_id=project_id
).values_list("id", "slug")
}
)
return {o: actors.get((o.type, o.identifier.lower())) for o in owners}
| mvaled/sentry | src/sentry/models/projectownership.py | Python | bsd-3-clause | 5,206 |
from rsqueakvm.util.cells import QuasiConstant
from rsqueakvm.plugins.vmdebugging.model import wrap_oplist, wrap_greenkey, wrap_debug_info
from rpython.rlib.jit import JitHookInterface, Counters
jit_iface_recursion = QuasiConstant(False)
def make_hook(args, func):
import inspect, re
src = "\n".join([
re.sub("^\\s+", " " * 12, line) for line in inspect.getsource(func).split("\n")[1:]
])
code = [
"def f(%s):" % (args),
" from rsqueakvm import constants",
" from rsqueakvm.interpreter import jit_driver_name",
" from rsqueakvm.model.variable import W_BytesObject",
" if jitdriver.name != jit_driver_name: return",
" space = self.space",
" if jit_iface_recursion.is_set(): return",
" interp = space.interp.get()",
" w_jithook = space.w_jit_hook_selector()",
" w_rcvr = space.w_jit_hook_receiver()",
" if w_jithook and isinstance(w_jithook, W_BytesObject) and w_rcvr:",
" w_method = w_rcvr.class_shadow(space).lookup(w_jithook)",
" if w_method is None: return",
" jit_iface_recursion.activate()",
" try:",
" args_w = func(%s)" % args,
" interp.perform_headless(w_rcvr, w_jithook, [space.wrap_list(args_w)])",
" finally:",
" jit_iface_recursion.deactivate()"
]
d = {
"jit_iface_recursion": jit_iface_recursion,
"func": func,
}
exec compile("\n".join(code), __file__, 'exec') in d
return d["f"]
class JitIface(JitHookInterface):
def prepare_abort(self, reason, jitdriver, greenkey, greenkey_repr, logops, operations):
space = self.space
return [
space.wrap_string('abort'),
wrap_greenkey(space, jitdriver, greenkey, greenkey_repr),
space.wrap_string(Counters.counter_names[reason]),
wrap_oplist(space, logops, operations)]
on_abort = make_hook(
"self, reason, jitdriver, greenkey, greenkey_repr, logops, operations",
prepare_abort
)
def prepare_trace_too_long(self, jitdriver, greenkey, greenkey_repr):
space = self.space
return [
space.wrap_string('trace_too_long'),
wrap_greenkey(space, jitdriver, greenkey, greenkey_repr)]
on_trace_too_long = make_hook(
"self, jitdriver, greenkey, greenkey_repr",
prepare_trace_too_long
)
def prepare_compile_hook(self, jitdriver, debug_info, is_bridge):
space = self.space
return [
space.wrap_string('compile_loop' if not is_bridge else 'compile_bridge'),
wrap_debug_info(space, debug_info, is_bridge=is_bridge)]
wrapped_compiled_hook = make_hook(
"self, jitdriver, debug_info, is_bridge",
prepare_compile_hook
)
def _compile_hook(self, debug_info, is_bridge=False):
jitdriver = debug_info.get_jitdriver()
self.wrapped_compiled_hook(jitdriver, debug_info, is_bridge)
def after_compile(self, debug_info): self._compile_hook(debug_info, is_bridge=False)
def after_compile_bridge(self, debug_info): self._compile_hook(debug_info, is_bridge=True)
def before_compile(self, debug_info): pass
def before_compile_bridge(self, debug_info): pass
jitiface = JitIface()
| HPI-SWA-Lab/RSqueak | rsqueakvm/plugins/vmdebugging/hooks.py | Python | bsd-3-clause | 3,381 |
import email.utils
import collections
import time
import cgi
from .core import py3
if py3:
def parse_return(content):
if isinstance(content, str):
content = content.encode('utf-8', 'xmlcharrefreplace')
if isinstance(content, bytes):
return (content,)
elif isinstance(content, collections.Iterable):
return (i.encode('utf-8', 'xmlcharrefreplace') for i in content)
else:
return ''
else:
def parse_return(content):
if isinstance(content, unicode):
content = content.encode('utf-8', 'xmlcharrefreplace')
if isinstance(content, str):
return (content,)
elif isinstance(content, collections.Iterable):
return (i.encode('utf-8', 'xmlcharrefreplace') for i in content)
else:
return ''
def parse_date(ims):
"""Adapted from Bottle"""
try:
ts = email.utils.parsedate_tz(ims)
return time.mktime(ts[:8] + (0,)) - (ts[9] or 0) - time.timezone
except (TypeError, ValueError, IndexError, OverflowError):
return None
def parse_range_header(header, maxlen=0):
"""Adapted from Bottle"""
if not header or header[:6] != 'bytes=': return
ranges = [r.split('-', 1) for r in header[6:].split(',') if '-' in r]
for start, end in ranges:
try:
if not start: # bytes=-100 -> last 100 bytes
start, end = max(0, maxlen-int(end)), maxlen
elif not end: # bytes=100- -> all but the first 99 bytes
start, end = int(start), maxlen
else: # bytes=100-200 -> bytes 100-200 (inclusive)
start, end = int(start), min(int(end)+1, maxlen)
if 0 <= start < end <= maxlen:
yield start, end
except ValueError:
pass
def file_iter_range(fp, offset, bytes_, maxread=1024*1024):
"""Adapted from Bottle"""
fp.seek(offset)
while bytes_ > 0:
part = fp.read(min(bytes_, maxread))
if not part: break
bytes_ -= len(part)
yield part
class FieldStorage(cgi.FieldStorage):
def get(self, key):
try:
return self[key].value
except KeyError:
return None
| fallingduck/snifter | snifter/utils.py | Python | bsd-3-clause | 2,266 |
#
# File that determines what each URL points to. This uses _Python_ regular
# expressions, not Perl's.
#
# See:
# http://diveintopython.org/regular_expressions/street_addresses.html#re.matching.2.3
#
from django.conf import settings
from django.conf.urls.defaults import *
from django.contrib import admin
from django.views.generic import RedirectView
# Wiki imports
from wiki.urls import get_pattern as get_wiki_pattern
from django_notify.urls import get_pattern as get_notify_pattern
from djangobb_forum import settings as forum_settings
admin.autodiscover()
# Setup the root url tree from /
# AJAX stuff.
from dajaxice.core import dajaxice_autodiscover, dajaxice_config
dajaxice_autodiscover()
urlpatterns = patterns('',
# User Authentication
url(r'^login/', 'web.views.login', name="login"),
url(r'^logout/', 'django.contrib.auth.views.logout', name="logout"),
url(r'^accounts/login', 'views.login_gateway'),
# News stuff
#url(r'^news/', include('src.web.news.urls')),
# Page place-holder for things that aren't implemented yet.
url(r'^tbi/', 'game.gamesrc.oasis.web.website.views.to_be_implemented'),
# Admin interface
url(r'^admin/doc/', include('django.contrib.admindocs.urls')),
url(r'^admin/', include(admin.site.urls)),
# favicon
url(r'^favicon\.ico$', RedirectView.as_view(url='/media/images/favicon.ico')),
# ajax stuff
url(r'^webclient/',include('game.gamesrc.oasis.web.webclient.urls', namespace="webclient")),
# Wiki
url(r'^notify/', get_notify_pattern()),
url(r'^wiki/', get_wiki_pattern()),
# Forum
(r'^forum/', include('bb_urls', namespace='djangobb')),
# Favicon
(r'^favicon\.ico$', RedirectView.as_view(url='/media/images/favicon.ico')),
# Registration stuff
url(r'^roster/', include('roster.urls', namespace='roster')),
# Character related stuff.
url(r'^character/', include('character.urls', namespace='character')),
# Mail stuff
url(r'^mail/', include('mail.urls', namespace='mail')),
# Search utilities
url(r'^search/', include('haystack.urls', namespace='search')),
# AJAX stuff
url(dajaxice_config.dajaxice_url, include('dajaxice.urls')),
url(r'^selectable/', include('selectable.urls')),
# Ticket system
url(r'^tickets/', include('helpdesk.urls', namespace='helpdesk')),
url(r'^$', 'views.page_index', name='index'),
)
# 500 Errors:
handler500 = 'web.views.custom_500'
# This sets up the server if the user want to run the Django
# test server (this should normally not be needed).
if settings.SERVE_MEDIA:
urlpatterns += patterns('',
(r'^media/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
(r'^static/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT}),
(r'^wiki/([^/]+/)*wiki/(?P<path>.*)$', 'django.views.static.serve', {'document_root': settings.STATIC_ROOT + '/wiki/'})
)
# PM Extension
if (forum_settings.PM_SUPPORT):
urlpatterns += patterns('',
(r'^mail/', include('mail_urls')),
)
if (settings.DEBUG):
urlpatterns += patterns('',
(r'^%s(?P<path>.*)$' % settings.MEDIA_URL.lstrip('/'),
'django.views.static.serve', {'document_root': settings.MEDIA_ROOT}),
)
| TaliesinSkye/evennia | wintersoasis-master/web/urls.py | Python | bsd-3-clause | 3,320 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.2 on 2017-01-10 15:56
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [("elections", "0019_auto_20170110_1329")]
operations = [
migrations.AddField(
model_name="electedrole",
name="elected_role_name",
field=models.CharField(blank=True, max_length=255),
),
migrations.AlterField(
model_name="election",
name="group",
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.CASCADE,
related_name="children",
to="elections.Election",
),
),
]
| DemocracyClub/EveryElection | every_election/apps/elections/migrations/0020_auto_20170110_1556.py | Python | bsd-3-clause | 823 |
''' A taylor series visualization graph. This example demonstrates
the ability of Bokeh for inputted expressions to reflect on a chart.
'''
import numpy as np
import sympy as sy
from bokeh.core.properties import value
from bokeh.io import curdoc
from bokeh.layouts import column
from bokeh.models import (ColumnDataSource, Legend, LegendItem,
PreText, Slider, TextInput)
from bokeh.plotting import figure
xs = sy.Symbol('x')
expr = sy.exp(-xs)*sy.sin(xs)
def taylor(fx, xs, order, x_range=(0, 1), n=200):
x0, x1 = x_range
x = np.linspace(float(x0), float(x1), n)
fy = sy.lambdify(xs, fx, modules=['numpy'])(x)
tx = fx.series(xs, n=order).removeO()
if tx.is_Number:
ty = np.zeros_like(x)
ty.fill(float(tx))
else:
ty = sy.lambdify(xs, tx, modules=['numpy'])(x)
return x, fy, ty
source = ColumnDataSource(data=dict(x=[], fy=[], ty=[]))
p = figure(x_range=(-7,7), y_range=(-100, 200), width=800, height=400)
line_f = p.line(x="x", y="fy", line_color="navy", line_width=2, source=source)
line_t = p.line(x="x", y="ty", line_color="firebrick", line_width=2, source=source)
p.background_fill_color = "lightgrey"
legend = Legend(location="top_right")
legend.items = [
LegendItem(label=value(f"{expr}"), renderers=[line_f]),
LegendItem(label=value(f"taylor({expr})"), renderers=[line_t]),
]
p.add_layout(legend)
def update():
try:
expr = sy.sympify(text.value, dict(x=xs))
except Exception as exception:
errbox.text = str(exception)
else:
errbox.text = ""
x, fy, ty = taylor(expr, xs, slider.value, (-2*sy.pi, 2*sy.pi), 200)
p.title.text = "Taylor (n=%d) expansion comparison for: %s" % (slider.value, expr)
legend.items[0].label = value(f"{expr}")
legend.items[1].label = value(f"taylor({expr})")
source.data = dict(x=x, fy=fy, ty=ty)
slider = Slider(start=1, end=20, value=1, step=1, title="Order")
slider.on_change('value', lambda attr, old, new: update())
text = TextInput(value=str(expr), title="Expression:")
text.on_change('value', lambda attr, old, new: update())
errbox = PreText()
update()
inputs = column(text, slider, errbox, width=400)
curdoc().add_root(column(inputs, p))
| bokeh/bokeh | examples/app/taylor.py | Python | bsd-3-clause | 2,238 |
from collections import Sized, OrderedDict
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import numpy as np
import ipywidgets
import IPython.display as ipydisplay
from menpo.image import MaskedImage, Image
from menpo.image.base import _convert_patches_list_to_single_array
from .options import (RendererOptionsWidget, TextPrintWidget,
SaveFigureOptionsWidget, AnimationOptionsWidget,
LandmarkOptionsWidget, ChannelOptionsWidget,
FeatureOptionsWidget, PlotOptionsWidget,
PatchOptionsWidget, LinearModelParametersWidget,
CameraSnapshotWidget)
from .style import format_box, map_styles_to_hex_colours
from .tools import LogoWidget
from .utils import (extract_group_labels_from_landmarks,
extract_groups_labels_from_image, render_image,
render_patches)
from .checks import check_n_parameters
def menpowidgets_src_dir_path():
r"""
The path to the top of the menpowidgets package.
Useful for locating where the logos folder is stored.
Returns
-------
path : ``pathlib.Path``
The full path to the top of the Menpo package
"""
# to avoid cluttering the menpowidgets.base namespace
from pathlib import Path
import os.path
return Path(os.path.abspath(__file__)).parent
def visualize_pointclouds(pointclouds, figure_size=(10, 8), style='coloured',
browser_style='buttons', custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of `menpo.shape.PointCloud`,
`menpo.shape.PointUndirectedGraph`, `menpo.shape.PointDirectedGraph`,
`menpo.shape.PointTree`, `menpo.shape.TriMesh` or any subclass of those.
Any instance of the above can be combined in the `list`.
The widget has options tabs regarding the renderer (lines, markers,
numbering, zoom, axes) and saving the figure to file.
Parameters
----------
pointclouds : `list`
The `list` of objects to be visualized. It can contain a combination of
`menpo.shape.PointCloud`, `menpo.shape.PointUndirectedGraph`,
`menpo.shape.PointDirectedGraph`, `menpo.shape.PointTree`,
`menpo.shape.TriMesh` or subclasses of those.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not None, it should be a function that accepts a pointcloud
and returns a list of custom messages to be printed per
pointcloud. Each custom message will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that pointclouds is a list even with one pointcloud member
if not isinstance(pointclouds, Sized):
pointclouds = [pointclouds]
# Get the number of pointclouds
n_pointclouds = len(pointclouds)
# Define the styling options
if style == 'coloured':
logo_style = 'warning'
widget_box_style = 'warning'
widget_border_radius = 10
widget_border_width = 1
animation_style = 'warning'
info_style = 'info'
renderer_box_style = 'info'
renderer_box_border_colour = map_styles_to_hex_colours('info')
renderer_box_border_radius = 10
renderer_style = 'danger'
renderer_tabs_style = 'danger'
save_figure_style = 'danger'
else:
logo_style = 'minimal'
widget_box_style = ''
widget_border_radius = 0
widget_border_width = 0
animation_style = 'minimal'
info_style = 'minimal'
renderer_box_style = ''
renderer_box_border_colour = 'black'
renderer_box_border_radius = 0
renderer_style = 'minimal'
renderer_tabs_style = 'minimal'
save_figure_style = 'minimal'
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected pointcloud index
im = pointcloud_number_wid.selected_values if n_pointclouds > 1 else 0
# Render pointcloud with selected options
tmp1 = renderer_options_wid.selected_values['lines']
tmp2 = renderer_options_wid.selected_values['markers']
options = renderer_options_wid.selected_values['numbering']
options.update(renderer_options_wid.selected_values['axes'])
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
renderer = pointclouds[im].view(
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
image_view=axes_mode_wid.value == 1, label=None,
render_lines=tmp1['render_lines'],
line_colour=tmp1['line_colour'][0],
line_style=tmp1['line_style'], line_width=tmp1['line_width'],
render_markers=tmp2['render_markers'],
marker_style=tmp2['marker_style'],
marker_size=tmp2['marker_size'],
marker_face_colour=tmp2['marker_face_colour'][0],
marker_edge_colour=tmp2['marker_edge_colour'][0],
marker_edge_width=tmp2['marker_edge_width'],
figure_size=new_figure_size, **options)
plt.show()
# Update info text widget
update_info(pointclouds[im], custom_info_callback=custom_info_callback)
# Save the current figure id
save_figure_wid.renderer = renderer
# Define function that updates the info text
def update_info(pointcloud, custom_info_callback=None):
min_b, max_b = pointcloud.bounds()
rang = pointcloud.range()
cm = pointcloud.centre()
text_per_line = [
"> {} points".format(pointcloud.n_points),
"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H".format(
min_b[0], max_b[0], min_b[1], max_b[1]),
"> Range: {0:.1f}W, {1:.1f}H".format(rang[0], rang[1]),
"> Centre of mass: ({0:.1f}, {1:.1f})".format(cm[0], cm[1]),
"> Norm: {0:.2f}".format(pointcloud.norm())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(pointcloud):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
axes_mode_wid = ipywidgets.RadioButtons(
options={'Image': 1, 'Point cloud': 2}, description='Axes mode:',
value=1)
axes_mode_wid.observe(render_function, names='value', type='change')
renderer_options_wid = RendererOptionsWidget(
options_tabs=['markers', 'lines', 'numbering', 'zoom_one', 'axes'],
labels=None, axes_x_limits=0.1, axes_y_limits=0.1,
render_function=render_function, style=renderer_style,
tabs_style=renderer_tabs_style)
renderer_options_box = ipywidgets.VBox(
children=[axes_mode_wid, renderer_options_wid], align='center',
margin='0.1cm')
info_wid = TextPrintWidget(text_per_line=[''] * 5, style=info_style)
save_figure_wid = SaveFigureOptionsWidget(style=save_figure_style)
# Group widgets
if n_pointclouds > 1:
# Pointcloud selection slider
index = {'min': 0, 'max': n_pointclouds-1, 'step': 1, 'index': 0}
pointcloud_number_wid = AnimationOptionsWidget(
index, render_function=render_function, index_style=browser_style,
interval=0.2, description='Pointcloud ', loop_enabled=True,
continuous_update=False, style=animation_style)
# Header widget
header_wid = ipywidgets.HBox(
children=[LogoWidget(style=logo_style), pointcloud_number_wid],
align='start')
else:
# Header widget
header_wid = LogoWidget(style=logo_style)
header_wid.margin = '0.1cm'
options_box = ipywidgets.Tab(children=[info_wid, renderer_options_box,
save_figure_wid], margin='0.1cm')
tab_titles = ['Info', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_pointclouds > 1:
wid = ipywidgets.VBox(children=[header_wid, options_box], align='start')
else:
wid = ipywidgets.HBox(children=[header_wid, options_box], align='start')
# Set widget's style
wid.box_style = widget_box_style
wid.border_radius = widget_border_radius
wid.border_width = widget_border_width
wid.border_color = map_styles_to_hex_colours(widget_box_style)
format_box(renderer_options_box, renderer_box_style, True,
renderer_box_border_colour, 'solid', 1,
renderer_box_border_radius, '0.1cm', '0.2cm')
# Display final widget
ipydisplay.display(wid)
# Trigger initial visualization
render_function({})
def visualize_landmarkgroups(landmarkgroups, figure_size=(10, 8), style='coloured',
browser_style='buttons', custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of
`menpo.landmark.LandmarkGroup` (or subclass) objects.
The landmark groups can have a combination of different attributes, e.g.
different labels, number of points etc. The widget has options tabs
regarding the landmarks, the renderer (lines, markers, numbering, legend,
zoom, axes) and saving the figure to file.
Parameters
----------
landmarkgroups : `list` of `menpo.landmark.LandmarkGroup` or subclass
The `list` of landmark groups to be visualized.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not None, it should be a function that accepts a landmark group
and returns a list of custom messages to be printed per landmark
group. Each custom message will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that landmarkgroups is a list even with one landmark group
# member
if not isinstance(landmarkgroups, list):
landmarkgroups = [landmarkgroups]
# Get the number of landmarkgroups
n_landmarkgroups = len(landmarkgroups)
# Define the styling options
if style == 'coloured':
logo_style = 'success'
widget_box_style = 'success'
widget_border_radius = 10
widget_border_width = 1
animation_style = 'success'
landmarks_style = 'info'
info_style = 'info'
renderer_box_style = 'info'
renderer_box_border_colour = map_styles_to_hex_colours('info')
renderer_box_border_radius = 10
renderer_style = 'danger'
renderer_tabs_style = 'danger'
save_figure_style = 'danger'
else:
logo_style = 'minimal'
widget_box_style = ''
widget_border_radius = 0
widget_border_width = 0
landmarks_style = 'minimal'
animation_style = 'minimal'
info_style = 'minimal'
renderer_box_style = ''
renderer_box_border_colour = 'black'
renderer_box_border_radius = 0
renderer_style = 'minimal'
renderer_tabs_style = 'minimal'
save_figure_style = 'minimal'
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# get selected index
im = landmark_number_wid.selected_values if n_landmarkgroups > 1 else 0
# show landmarks with selected options
tmp1 = renderer_options_wid.selected_values['lines']
tmp2 = renderer_options_wid.selected_values['markers']
options = renderer_options_wid.selected_values['numbering']
options.update(renderer_options_wid.selected_values['legend'])
options.update(renderer_options_wid.selected_values['axes'])
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
# get line and marker colours
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in landmark_options_wid.selected_values['with_labels']:
lbl_idx = landmarkgroups[im].labels.index(lbl)
line_colour.append(tmp1['line_colour'][lbl_idx])
marker_face_colour.append(tmp2['marker_face_colour'][lbl_idx])
marker_edge_colour.append(tmp2['marker_edge_colour'][lbl_idx])
if landmark_options_wid.selected_values['render_landmarks']:
renderer = landmarkgroups[im].view(
with_labels=landmark_options_wid.selected_values['with_labels'],
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
image_view=axes_mode_wid.value == 1,
render_lines=tmp1['render_lines'], line_colour=line_colour,
line_style=tmp1['line_style'], line_width=tmp1['line_width'],
render_markers=tmp2['render_markers'],
marker_style=tmp2['marker_style'],
marker_size=tmp2['marker_size'],
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=tmp2['marker_edge_width'],
figure_size=new_figure_size, **options)
plt.show()
# Save the current figure id
save_figure_wid.renderer = renderer
else:
ipydisplay.clear_output()
# update info text widget
update_info(landmarkgroups[im], custom_info_callback=custom_info_callback)
# Define function that updates the info text
def update_info(landmarkgroup, custom_info_callback=None):
min_b, max_b = landmarkgroup.lms.bounds()
rang = landmarkgroup.lms.range()
cm = landmarkgroup.lms.centre()
text_per_line = [
"> {} landmark points".format(landmarkgroup.n_landmarks),
"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H".format(
min_b[0], max_b[0], min_b[1], max_b[1]),
"> Range: {0:.1f}W, {1:.1f}H".format(rang[0], rang[1]),
"> Centre of mass: ({0:.1f}, {1:.1f})".format(cm[0], cm[1]),
"> Norm: {0:.2f}".format(landmarkgroup.lms.norm())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(landmarkgroup):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
landmark_options_wid = LandmarkOptionsWidget(
group_keys=[' '], labels_keys=[landmarkgroups[0].labels],
render_function=render_function, style=landmarks_style)
axes_mode_wid = ipywidgets.RadioButtons(
options={'Image': 1, 'Point cloud': 2}, description='Axes mode:',
value=1)
axes_mode_wid.observe(render_function, names='value', type='change')
renderer_options_wid = RendererOptionsWidget(
options_tabs=['lines', 'markers', 'numbering', 'legend', 'zoom_one',
'axes'], labels=landmarkgroups[0].labels,
axes_x_limits=0.1, axes_y_limits=0.1,
render_function=render_function, style=renderer_style,
tabs_style=renderer_tabs_style)
renderer_options_box = ipywidgets.VBox(
children=[axes_mode_wid, renderer_options_wid], align='center',
margin='0.1cm')
info_wid = TextPrintWidget(text_per_line=[''] * 5, style=info_style)
save_figure_wid = SaveFigureOptionsWidget(renderer=None,
style=save_figure_style)
# Group widgets
if n_landmarkgroups > 1:
# Define function that updates options' widgets state
def update_widgets(change):
# Get new labels
im = landmark_number_wid.selected_values
labels = landmarkgroups[im].labels
# Update renderer options
renderer_options_wid.set_widget_state(labels=labels,
allow_callback=False)
# Update landmarks options
landmark_options_wid.set_widget_state(
group_keys=[' '], labels_keys=[labels], allow_callback=True)
landmark_options_wid.predefined_style(landmarks_style)
# Landmark selection slider
index = {'min': 0, 'max': n_landmarkgroups-1, 'step': 1, 'index': 0}
landmark_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Shape', loop_enabled=True,
continuous_update=False, style=animation_style)
# Header widget
header_wid = ipywidgets.HBox(
children=[LogoWidget(style=logo_style), landmark_number_wid],
align='start')
else:
# Header widget
header_wid = LogoWidget(style=logo_style)
header_wid.margin = '0.2cm'
options_box = ipywidgets.Tab(
children=[info_wid, landmark_options_wid, renderer_options_box,
save_figure_wid], margin='0.2cm')
tab_titles = ['Info', 'Landmarks', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_landmarkgroups > 1:
wid = ipywidgets.VBox(children=[header_wid, options_box], align='start')
else:
wid = ipywidgets.HBox(children=[header_wid, options_box], align='start')
# Set widget's style
wid.box_style = widget_box_style
wid.border_radius = widget_border_radius
wid.border_width = widget_border_width
wid.border_color = map_styles_to_hex_colours(widget_box_style)
format_box(renderer_options_box, renderer_box_style, True,
renderer_box_border_colour, 'solid', 1,
renderer_box_border_radius, '0.1cm', '0.2cm')
# Display final widget
ipydisplay.display(wid)
# Trigger initial visualization
render_function({})
def visualize_landmarks(landmarks, figure_size=(10, 8), style='coloured',
browser_style='buttons', custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of
`menpo.landmark.LandmarkManager` (or subclass) objects.
The landmark managers can have a combination of different attributes, e.g.
landmark groups and labels etc. The widget has options tabs regarding the
landmarks, the renderer (lines, markers, numbering, legend, zoom, axes)
and saving the figure to file.
Parameters
----------
landmarks : `list` of `menpo.landmark.LandmarkManager` or subclass
The `list` of landmark managers to be visualized.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not None, it should be a function that accepts a landmark group and returns
a list of custom messages to be printed per landmark group. Each custom message
will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that landmarks is a list even with one landmark manager member
if not isinstance(landmarks, list):
landmarks = [landmarks]
# Get the number of landmark managers
n_landmarks = len(landmarks)
# Define the styling options
if style == 'coloured':
logo_style = 'info'
widget_box_style = 'info'
widget_border_radius = 10
widget_border_width = 1
animation_style = 'info'
landmarks_style = 'danger'
info_style = 'danger'
renderer_box_style = 'danger'
renderer_box_border_colour = map_styles_to_hex_colours('danger')
renderer_box_border_radius = 10
renderer_style = 'warning'
renderer_tabs_style = 'warning'
save_figure_style = 'danger'
else:
logo_style = 'minimal'
widget_box_style = ''
widget_border_radius = 0
widget_border_width = 0
landmarks_style = 'minimal'
animation_style = 'minimal'
info_style = 'minimal'
renderer_box_style = ''
renderer_box_border_colour = 'black'
renderer_box_border_radius = 0
renderer_style = 'minimal'
renderer_tabs_style = 'minimal'
save_figure_style = 'minimal'
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# get selected index
im = landmark_number_wid.selected_values if n_landmarks > 1 else 0
# get selected group
selected_group = landmark_options_wid.selected_values['group']
if landmark_options_wid.selected_values['render_landmarks']:
# show landmarks with selected options
tmp1 = renderer_options_wid.selected_values['lines']
tmp2 = renderer_options_wid.selected_values['markers']
options = renderer_options_wid.selected_values['numbering']
options.update(renderer_options_wid.selected_values['legend'])
options.update(renderer_options_wid.selected_values['axes'])
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
# get line and marker colours
line_colour = []
marker_face_colour = []
marker_edge_colour = []
for lbl in landmark_options_wid.selected_values['with_labels']:
lbl_idx = landmarks[im][selected_group].labels.index(lbl)
line_colour.append(tmp1['line_colour'][lbl_idx])
marker_face_colour.append(tmp2['marker_face_colour'][lbl_idx])
marker_edge_colour.append(tmp2['marker_edge_colour'][lbl_idx])
# render
renderer = landmarks[im][selected_group].view(
with_labels=landmark_options_wid.selected_values['with_labels'],
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
image_view=axes_mode_wid.value == 1,
render_lines=tmp1['render_lines'], line_colour=line_colour,
line_style=tmp1['line_style'], line_width=tmp1['line_width'],
render_markers=tmp2['render_markers'],
marker_style=tmp2['marker_style'],
marker_size=tmp2['marker_size'],
marker_face_colour=marker_face_colour,
marker_edge_colour=marker_edge_colour,
marker_edge_width=tmp2['marker_edge_width'],
figure_size=new_figure_size, **options)
plt.show()
# Save the current figure id
save_figure_wid.renderer = renderer
else:
ipydisplay.clear_output()
# update info text widget
update_info(landmarks[im], selected_group,
custom_info_callback=custom_info_callback)
# Define function that updates the info text
def update_info(landmarks, group, custom_info_callback=None):
if group is not None:
min_b, max_b = landmarks[group][None].bounds()
rang = landmarks[group][None].range()
cm = landmarks[group][None].centre()
text_per_line = [
"> {} landmark points".format(landmarks[group][None].n_points),
"> Bounds: [{0:.1f}-{1:.1f}]W, [{2:.1f}-{3:.1f}]H".
format(min_b[0], max_b[0], min_b[1], max_b[1]),
"> Range: {0:.1f}W, {1:.1f}H".format(rang[0], rang[1]),
"> Centre of mass: ({0:.1f}, {1:.1f})".format(cm[0], cm[1]),
"> Norm: {0:.2f}".format(landmarks[group][None].norm())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(landmarks[group][None]):
text_per_line.append('> {}'.format(msg))
else:
text_per_line = ["No landmarks available."]
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
groups_keys, labels_keys = extract_group_labels_from_landmarks(landmarks[0])
first_label = labels_keys[0] if labels_keys else None
axes_mode_wid = ipywidgets.RadioButtons(
options={'Image': 1, 'Point cloud': 2}, description='Axes mode:',
value=1)
axes_mode_wid.observe(render_function, names='value', type='change')
renderer_options_wid = RendererOptionsWidget(
options_tabs=['markers', 'lines', 'numbering', 'legend', 'zoom_one',
'axes'], labels=first_label,
axes_x_limits=0.1, axes_y_limits=0.1,
render_function=render_function, style=renderer_style,
tabs_style=renderer_tabs_style)
renderer_options_box = ipywidgets.VBox(
children=[axes_mode_wid, renderer_options_wid], align='center',
margin='0.1cm')
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
render_function=render_function, style=landmarks_style,
renderer_widget=renderer_options_wid)
info_wid = TextPrintWidget(text_per_line=[''] * 5, style=info_style)
save_figure_wid = SaveFigureOptionsWidget(renderer=None,
style=save_figure_style)
# Group widgets
if n_landmarks > 1:
# Define function that updates options' widgets state
def update_widgets(change):
# Get new groups and labels
im = landmark_number_wid.selected_values
g_keys, l_keys = extract_group_labels_from_landmarks(
landmarks[im])
# Update landmarks options
landmark_options_wid.set_widget_state(
group_keys=g_keys, labels_keys=l_keys, allow_callback=True)
landmark_options_wid.predefined_style(landmarks_style)
# Landmark selection slider
index = {'min': 0, 'max': n_landmarks-1, 'step': 1, 'index': 0}
landmark_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Shape', loop_enabled=True,
continuous_update=False, style=animation_style)
# Header widget
header_wid = ipywidgets.HBox(
children=[LogoWidget(style=logo_style), landmark_number_wid],
align='start')
else:
# Header widget
header_wid = LogoWidget(style=logo_style)
header_wid.margin = '0.2cm'
options_box = ipywidgets.Tab(
children=[info_wid, landmark_options_wid, renderer_options_box,
save_figure_wid], margin='0.2cm')
tab_titles = ['Info', 'Landmarks', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_landmarks > 1:
wid = ipywidgets.VBox(children=[header_wid, options_box], align='start')
else:
wid = ipywidgets.HBox(children=[header_wid, options_box], align='start')
# Set widget's style
wid.box_style = widget_box_style
wid.border_radius = widget_border_radius
wid.border_width = widget_border_width
wid.border_color = map_styles_to_hex_colours(widget_box_style)
format_box(renderer_options_box, renderer_box_style, True,
renderer_box_border_colour, 'solid', 1,
renderer_box_border_radius, '0.1cm', '0.2cm')
# Display final widget
ipydisplay.display(wid)
# Trigger initial visualization
render_function({})
def visualize_images(images, figure_size=(10, 8), style='coloured',
browser_style='buttons', custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of `menpo.image.Image` (or
subclass) objects.
The images can have a combination of different attributes, e.g. masked or
not, landmarked or not, without multiple landmark groups and labels etc.
The widget has options tabs regarding the visualized channels, the
landmarks, the renderer (lines, markers, numbering, legend, figure, axes)
and saving the figure to file.
Parameters
----------
images : `list` of `menpo.image.Image` or subclass
The `list` of images to be visualized.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not None, it should be a function that accepts an image and returns
a list of custom messages to be printed per image. Each custom message
will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that images is a list even with one image member
if not isinstance(images, Sized):
images = [images]
# Get the number of images
n_images = len(images)
# Define the styling options
if style == 'coloured':
logo_style = 'info'
widget_box_style = 'info'
widget_border_radius = 10
widget_border_width = 1
animation_style = 'info'
channels_style = 'danger'
landmarks_style = 'danger'
info_style = 'danger'
renderer_style = 'danger'
renderer_tabs_style = 'danger'
save_figure_style = 'danger'
else:
logo_style = 'minimal'
widget_box_style = ''
widget_border_radius = 0
widget_border_width = 0
channels_style = 'minimal'
landmarks_style = 'minimal'
animation_style = 'minimal'
info_style = 'minimal'
renderer_style = 'minimal'
renderer_tabs_style = 'minimal'
save_figure_style = 'minimal'
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# get selected index
im = image_number_wid.selected_values if n_images > 1 else 0
# update info text widget
image_is_masked = isinstance(images[im], MaskedImage)
selected_group = landmark_options_wid.selected_values['group']
# show landmarks with selected options
tmp1 = renderer_options_wid.selected_values['lines']
tmp2 = renderer_options_wid.selected_values['markers']
options = renderer_options_wid.selected_values['numbering']
options.update(renderer_options_wid.selected_values['legend'])
options.update(renderer_options_wid.selected_values['axes'])
options.update(renderer_options_wid.selected_values['image'])
options.update(channel_options_wid.selected_values)
options.update(landmark_options_wid.selected_values)
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
# get line and marker colours
line_colour = []
marker_face_colour = []
marker_edge_colour = []
if images[im].has_landmarks:
for lbl in landmark_options_wid.selected_values['with_labels']:
lbl_idx = images[im].landmarks[selected_group].labels.index(lbl)
line_colour.append(tmp1['line_colour'][lbl_idx])
marker_face_colour.append(tmp2['marker_face_colour'][lbl_idx])
marker_edge_colour.append(tmp2['marker_edge_colour'][lbl_idx])
# show image with selected options
renderer = render_image(
image=images[im], renderer=save_figure_wid.renderer,
image_is_masked=image_is_masked,
render_lines=tmp1['render_lines'], line_style=tmp1['line_style'],
line_width=tmp1['line_width'], line_colour=line_colour,
render_markers=tmp2['render_markers'],
marker_style=tmp2['marker_style'],
marker_size=tmp2['marker_size'],
marker_edge_width=tmp2['marker_edge_width'],
marker_edge_colour=marker_edge_colour,
marker_face_colour=marker_face_colour,
figure_size=new_figure_size, **options)
# Update info
update_info(images[im], image_is_masked, selected_group,
custom_info_callback=custom_info_callback)
# Save the current figure id
save_figure_wid.renderer = renderer
# Define function that updates the info text
def update_info(img, image_is_masked, group, custom_info_callback=None):
# Prepare masked (or non-masked) string
masked_str = 'Masked Image' if image_is_masked else 'Image'
# Get image path, if available
path_str = img.path if hasattr(img, 'path') else 'No path available'
# Create text lines
text_per_line = [
"> {} of size {} with {} channel{}".format(
masked_str, img._str_shape(), img.n_channels,
's' * (img.n_channels > 1)),
"> Path: '{}'".format(path_str)]
if image_is_masked:
text_per_line.append(
"> {} masked pixels (attached mask {:.1%} true)".format(
img.n_true_pixels(), img.mask.proportion_true()))
text_per_line.append("> min={:.3f}, max={:.3f}".format(
img.pixels.min(), img.pixels.max()))
if img.has_landmarks:
text_per_line.append("> {} landmark points".format(
img.landmarks[group].lms.n_points))
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(img):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
groups_keys, labels_keys = extract_groups_labels_from_image(images[0])
first_label = labels_keys[0] if labels_keys else None
channel_options_wid = ChannelOptionsWidget(
n_channels=images[0].n_channels,
image_is_masked=isinstance(images[0], MaskedImage),
render_function=render_function, style=channels_style)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['markers', 'lines', 'numbering', 'legend', 'zoom_one',
'axes', 'image'], labels=first_label,
axes_x_limits=None, axes_y_limits=None,
render_function=render_function, style=renderer_style,
tabs_style=renderer_tabs_style)
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
render_function=render_function, style=landmarks_style,
renderer_widget=renderer_options_wid)
info_wid = TextPrintWidget(text_per_line=[''], style=info_style)
save_figure_wid = SaveFigureOptionsWidget(renderer=None,
style=save_figure_style)
# Group widgets
if n_images > 1:
# Define function that updates options' widgets state
def update_widgets(change):
# Get new groups and labels, then update landmark options
im = image_number_wid.selected_values
g_keys, l_keys = extract_groups_labels_from_image(
images[im])
# Update landmarks options
landmark_options_wid.set_widget_state(
group_keys=g_keys, labels_keys=l_keys, allow_callback=False)
landmark_options_wid.predefined_style(landmarks_style)
# Update channels options
channel_options_wid.set_widget_state(
n_channels=images[im].n_channels,
image_is_masked=isinstance(images[im], MaskedImage),
allow_callback=True)
# Image selection slider
index = {'min': 0, 'max': n_images-1, 'step': 1, 'index': 0}
image_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Image', loop_enabled=True,
continuous_update=False, style=animation_style)
# Header widget
header_wid = ipywidgets.HBox(
children=[LogoWidget(style=logo_style), image_number_wid],
align='start')
else:
# Header widget
header_wid = LogoWidget(style=logo_style)
header_wid.margin = '0.2cm'
options_box = ipywidgets.Tab(
children=[info_wid, channel_options_wid, landmark_options_wid,
renderer_options_wid, save_figure_wid], margin='0.2cm')
tab_titles = ['Info', 'Channels', 'Landmarks', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_images > 1:
wid = ipywidgets.VBox(children=[header_wid, options_box], align='start')
else:
wid = ipywidgets.HBox(children=[header_wid, options_box], align='start')
# Set widget's style
wid.box_style = widget_box_style
wid.border_radius = widget_border_radius
wid.border_width = widget_border_width
wid.border_color = map_styles_to_hex_colours(widget_box_style)
# Display final widget
ipydisplay.display(wid)
# Trigger initial visualization
render_function({})
def visualize_patches(patches, patch_centers, figure_size=(10, 8), style='coloured',
browser_style='buttons', custom_info_callback=None):
r"""
Widget that allows browsing through a `list` of patch-based images.
The patches argument can have any of the two formats that are returned from
the `extract_patches()` and `extract_patches_around_landmarks()` methods
of `menpo.image.Image`. Specifically it can be:
1. ``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray`
2. `list` of ``n_center * n_offset`` `menpo.image.Image` objects
The patches can have a combination of different attributes, e.g. number of
centers, number of offsets, number of channels etc. The widget has options
tabs regarding the visualized patches, channels, the renderer (lines,
markers, numbering, figure, axes, image) and saving the figure to file.
Parameters
----------
patches : `list`
The `list` of patch-based images to be visualized. It can consist of
objects with any of the two formats that are returned from the
`extract_patches()` and `extract_patches_around_landmarks()` methods.
Specifically, it can either be an
``(n_center, n_offset, self.n_channels, patch_shape)`` `ndarray` or a
`list` of ``n_center * n_offset`` `menpo.image.Image` objects.
patch_centers : `list` of `menpo.shape.PointCloud`
The centers to set the patches around. If the `list` has only one
`menpo.shape.PointCloud` then this will be used for all patches members.
Otherwise, it needs to have the same length as patches.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
browser_style : ``{'buttons', 'slider'}``, optional
It defines whether the selector of the objects will have the form of
plus/minus buttons or a slider.
custom_info_callback: `function` or ``None``, optional
If not None, it should be a function that accepts an image and returns
a list of custom messages to be printed per image. Each custom message
will be printed in a separate line.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that patches is a list even with one patches member
if (isinstance(patches, list) and isinstance(patches[0], Image)) or \
not isinstance(patches, list):
patches = [patches]
# Make sure that patch_centers is a list even with one pointcloud
if not isinstance(patch_centers, list):
patch_centers = [patch_centers] * len(patches)
elif isinstance(patch_centers, list) and len(patch_centers) == 1:
patch_centers *= len(patches)
# Make sure all patch-based images are in the single array format
for i in range(len(patches)):
if isinstance(patches[i], list):
patches[i] = _convert_patches_list_to_single_array(
patches[i], patch_centers[i].n_points)
# Get the number of patch_based images
n_patches = len(patches)
# Define the styling options
if style == 'coloured':
logo_style = 'warning'
widget_box_style = 'warning'
widget_border_radius = 10
widget_border_width = 1
animation_style = 'warning'
channels_style = 'info'
patches_style = 'minimal'
patches_subwidgets_style = 'danger'
info_style = 'info'
renderer_style = 'info'
renderer_tabs_style = 'minimal'
save_figure_style = 'danger'
else:
logo_style = 'minimal'
widget_box_style = ''
widget_border_radius = 0
widget_border_width = 0
channels_style = 'minimal'
patches_style = 'minimal'
patches_subwidgets_style = 'minimal'
animation_style = 'minimal'
info_style = 'minimal'
renderer_style = 'minimal'
renderer_tabs_style = 'minimal'
save_figure_style = 'minimal'
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# get selected index
im = image_number_wid.selected_values if n_patches > 1 else 0
# show patch-based image with selected options
options = renderer_options_wid.selected_values['lines']
options.update(renderer_options_wid.selected_values['markers'])
options.update(renderer_options_wid.selected_values['numbering'])
options.update(renderer_options_wid.selected_values['axes'])
options.update(renderer_options_wid.selected_values['image'])
options.update(patch_options_wid.selected_values)
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
# show image with selected options
renderer = render_patches(
patches=patches[im], patch_centers=patch_centers[im],
renderer=save_figure_wid.renderer, figure_size=new_figure_size,
channels=channel_options_wid.selected_values['channels'],
glyph_enabled=channel_options_wid.selected_values['glyph_enabled'],
glyph_block_size=channel_options_wid.selected_values['glyph_block_size'],
glyph_use_negative=channel_options_wid.selected_values['glyph_use_negative'],
sum_enabled=channel_options_wid.selected_values['sum_enabled'],
**options)
# update info text widget
update_info(patches[im], custom_info_callback=custom_info_callback)
# Save the current figure id
save_figure_wid.renderer = renderer
# Define function that updates the info text
def update_info(ptchs, custom_info_callback=None):
text_per_line = [
"> Patch-Based Image with {} patche{} and {} offset{}.".format(
ptchs.shape[0], 's' * (ptchs.shape[0] > 1), ptchs.shape[1],
's' * (ptchs.shape[1] > 1)),
"> Each patch has size {}H x {}W with {} channel{}.".format(
ptchs.shape[3], ptchs.shape[4], ptchs.shape[2],
's' * (ptchs.shape[2] > 1)),
"> min={:.3f}, max={:.3f}".format(ptchs.min(), ptchs.max())]
if custom_info_callback is not None:
# iterate over the list of messages returned by the callback
# function and append them in the text_per_line.
for msg in custom_info_callback(ptchs):
text_per_line.append('> {}'.format(msg))
info_wid.set_widget_state(text_per_line=text_per_line)
# Create widgets
patch_options_wid = PatchOptionsWidget(
n_patches=patches[0].shape[0], n_offsets=patches[0].shape[1],
render_function=render_function, style=patches_style,
subwidgets_style=patches_subwidgets_style)
channel_options_wid = ChannelOptionsWidget(
n_channels=patches[0].shape[2], image_is_masked=False,
render_function=render_function, style=channels_style)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['markers', 'lines', 'numbering', 'zoom_one', 'axes',
'image'], labels=None,
axes_x_limits=None, axes_y_limits=None,
render_function=None, style=renderer_style,
tabs_style=renderer_tabs_style)
renderer_options_wid.options_widgets[5].interpolation_checkbox.value = True
renderer_options_wid.add_render_function(render_function)
info_wid = TextPrintWidget(text_per_line=[''] * 3, style=info_style)
save_figure_wid = SaveFigureOptionsWidget(renderer=None,
style=save_figure_style)
# Group widgets
if n_patches > 1:
# Define function that updates options' widgets state
def update_widgets(change):
# Get new groups and labels, then update landmark options
im = 0
if n_patches > 1:
im = image_number_wid.selected_values
# Update patch options
patch_options_wid.set_widget_state(
n_patches=patches[im].shape[0], n_offsets=patches[im].shape[1],
allow_callback=False)
# Update channels options
channel_options_wid.set_widget_state(
n_channels=patches[im].shape[2], image_is_masked=False,
allow_callback=True)
# Image selection slider
index = {'min': 0, 'max': n_patches-1, 'step': 1, 'index': 0}
image_number_wid = AnimationOptionsWidget(
index, render_function=update_widgets, index_style=browser_style,
interval=0.2, description='Image', loop_enabled=True,
continuous_update=False, style=animation_style)
# Header widget
header_wid = ipywidgets.HBox(
children=[LogoWidget(style=logo_style), image_number_wid],
align='start')
else:
# Header widget
header_wid = LogoWidget(style=logo_style)
header_wid.margin = '0.2cm'
options_box = ipywidgets.Tab(
children=[info_wid, patch_options_wid, channel_options_wid,
renderer_options_wid, save_figure_wid], margin='0.2cm')
tab_titles = ['Info', 'Patches', 'Channels', 'Renderer', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
if n_patches > 1:
wid = ipywidgets.VBox(children=[header_wid, options_box], align='start')
else:
wid = ipywidgets.HBox(children=[header_wid, options_box], align='start')
# Set widget's style
wid.box_style = widget_box_style
wid.border_radius = widget_border_radius
wid.border_width = widget_border_width
wid.border_color = map_styles_to_hex_colours(widget_box_style)
# Display final widget
ipydisplay.display(wid)
# Trigger initial visualization
render_function({})
def plot_graph(x_axis, y_axis, legend_entries=None, figure_size=(10, 6),
style='coloured'):
r"""
Widget that allows plotting various curves in a graph.
The widget has options tabs regarding the graph and the renderer (lines,
markers, legend, figure, axes, grid) and saving the figure to file.
Parameters
----------
x_axis : `list` of `float`
The values of the horizontal axis. Note that these values are common for
all the curves.
y_axis : `list` of `lists` of `float`
A `list` that stores a `list` of values to be plotted for each curve.
legend_entries : `list` or `str` or ``None``, optional
The `list` of names that will appear on the legend for each curve. If
``None``, then the names format is ``curve {}.format(i)``.
figure_size : (`int`, `int`), optional
The initial size of the rendered figure.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
from menpo.visualize import plot_curve
print('Initializing...')
# Get number of curves to be plotted
n_curves = len(y_axis)
# Define the styling options
if style == 'coloured':
logo_style = 'danger'
widget_box_style = 'danger'
tabs_style = 'warning'
save_figure_style = 'warning'
else:
logo_style = 'minimal'
widget_box_style = 'minimal'
tabs_style = 'minimal'
save_figure_style = 'minimal'
# Parse options
if legend_entries is None:
legend_entries = ["curve {}".format(i) for i in range(n_curves)]
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# plot with selected options
opts = wid.selected_values.copy()
new_figure_size = (
wid.selected_values['zoom'][0] * figure_size[0],
wid.selected_values['zoom'][1] * figure_size[1])
del opts['zoom']
renderer = plot_curve(
x_axis=x_axis, y_axis=y_axis, figure_size=new_figure_size,
figure_id=save_figure_wid.renderer.figure_id, new_figure=False,
**opts)
# show plot
plt.show()
# Save the current figure id
save_figure_wid.renderer = renderer
# Create widgets
wid = PlotOptionsWidget(legend_entries=legend_entries,
render_function=render_function,
style=widget_box_style, tabs_style=tabs_style)
save_figure_wid = SaveFigureOptionsWidget(renderer=None,
style=save_figure_style)
# Group widgets
logo = LogoWidget(style=logo_style)
logo.margin = '0.1cm'
tmp_children = list(wid.options_tab.children)
tmp_children.append(save_figure_wid)
wid.options_tab.children = tmp_children
wid.options_tab.set_title(0, 'Figure')
wid.options_tab.set_title(1, 'Renderer')
wid.options_tab.set_title(2, 'Legend')
wid.options_tab.set_title(3, 'Axes')
wid.options_tab.set_title(4, 'Zoom')
wid.options_tab.set_title(5, 'Grid')
wid.options_tab.set_title(6, 'Export')
wid.children = [logo, wid.options_tab]
wid.align = 'start'
# Display final widget
ipydisplay.display(wid)
# Trigger initial visualization
render_function({})
def save_matplotlib_figure(renderer, style='coloured'):
r"""
Widget that allows to save a figure, which was generated with Matplotlib,
to file.
Parameters
----------
renderer : `menpo.visualize.viewmatplotlib.MatplotlibRenderer`
The Matplotlib renderer object.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
# Create sub-widgets
if style == 'coloured':
style = 'warning'
logo_wid = LogoWidget(style='minimal')
save_figure_wid = SaveFigureOptionsWidget(renderer, style=style)
save_figure_wid.margin = '0.1cm'
logo_wid.margin = '0.1cm'
wid = ipywidgets.HBox(children=[logo_wid, save_figure_wid])
# Display widget
ipydisplay.display(wid)
def features_selection(style='coloured'):
r"""
Widget that allows selecting a features function and its options. The
widget supports all features from `menpo.feature` and has a preview tab.
It returns a `list` of length 1 with the selected features function closure.
Parameters
----------
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
Returns
-------
features_function : `list` of length ``1``
The function closure of the features function using `functools.partial`.
So the function can be called as: ::
features_image = features_function[0](image)
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
# Styling options
if style == 'coloured':
logo_style = 'info'
outer_style = 'info'
inner_style = 'warning'
but_style = 'primary'
rad = 10
elif style == 'minimal':
logo_style = 'minimal'
outer_style = ''
inner_style = 'minimal'
but_style = ''
rad = 0
else:
raise ValueError('style must be either coloured or minimal')
# Create sub-widgets
logo_wid = LogoWidget(style=logo_style)
features_options_wid = FeatureOptionsWidget(style=inner_style)
select_but = ipywidgets.Button(description='Select')
features_wid = ipywidgets.VBox(children=[features_options_wid, select_but],
align='center')
# Create final widget
wid = ipywidgets.HBox(children=[logo_wid, features_wid])
format_box(wid, outer_style, True,
map_styles_to_hex_colours(outer_style), 'solid', 1, rad, 0, 0)
logo_wid.margin = '0.3cm'
features_options_wid.margin = '0.3cm'
select_but.margin = '0.2cm'
select_but.button_style = but_style
# function for select button
def select_function(name):
wid.close()
output.pop(0)
output.append(features_options_wid.function)
select_but.on_click(select_function)
# Display widget
ipydisplay.display(wid)
# Initialize output with empty list. It needs to be a list so that
# it's mutable and synchronizes with frontend.
output = [features_options_wid.function]
return output
def visualize_shape_model(shape_model, n_parameters=5, mode='multiple',
parameters_bounds=(-3.0, 3.0), figure_size=(10, 8),
style='coloured'):
r"""
Widget that allows the dynamic visualization of a multi-scale linear
statistical shape model.
Parameters
----------
shape_model : `list` of `menpo.shape.PCAModel` or `subclass`
The multi-scale shape model to be visualized. Note that each level can
have different number of components.
n_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the parameters
sliders. If `int`, then the number of sliders per level is the minimum
between `n_parameters` and the number of active components per level.
If `list` of `int`, then a number of sliders is defined per level.
If ``None``, all the active components per level will have a slider.
mode : ``{'single', 'multiple'}``, optional
If ``'single'``, then only a single slider is constructed along with a
drop down menu. If ``'multiple'``, then a slider is constructed for each
parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
figure_size : (`int`, `int`), optional
The size of the plotted figures.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
from menpo.visualize.viewmatplotlib import (_set_axes_options,
_parse_axes_limits)
print('Initializing...')
# Make sure that shape_model is a list even with one member
if not isinstance(shape_model, list):
shape_model = [shape_model]
# Get the number of levels (i.e. number of shape models)
n_levels = len(shape_model)
# Define the styling options
if style == 'coloured':
model_parameters_style = 'info'
logo_style = 'warning'
widget_box_style = 'warning'
widget_border_radius = 10
widget_border_width = 1
info_style = 'info'
renderer_box_style = 'info'
renderer_box_border_colour = map_styles_to_hex_colours('info')
renderer_box_border_radius = 10
renderer_style = 'danger'
renderer_tabs_style = 'danger'
save_figure_style = 'danger'
elif style == 'minimal':
model_parameters_style = 'minimal'
logo_style = 'minimal'
widget_box_style = ''
widget_border_radius = 0
widget_border_width = 0
info_style = 'minimal'
renderer_box_style = ''
renderer_box_border_colour = 'black'
renderer_box_border_radius = 0
renderer_style = 'minimal'
renderer_tabs_style = 'minimal'
save_figure_style = 'minimal'
else:
raise ValueError("style must be either coloured or minimal")
# Get the maximum number of components per level
max_n_params = [sp.n_active_components for sp in shape_model]
# Check the given number of parameters (the returned n_parameters is a list
# of len n_scales)
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = 0
if n_levels > 1:
level = level_wid.value
# Compute weights
parameters = model_parameters_wid.selected_values
weights = (parameters *
shape_model[level].eigenvalues[:len(parameters)] ** 0.5)
# Get the mean
mean = shape_model[level].mean()
# Render shape instance with selected options
tmp1 = renderer_options_wid.selected_values['lines']
tmp2 = renderer_options_wid.selected_values['markers']
options = renderer_options_wid.selected_values['numbering']
options.update(renderer_options_wid.selected_values['axes'])
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
if mode_wid.value == 1:
# Deformation mode
# Compute instance
instance = shape_model[level].instance(weights)
# Render mean shape
if mean_wid.value:
mean.view(figure_id=save_figure_wid.renderer.figure_id,
new_figure=False, image_view=axes_mode_wid.value == 1,
figure_size=None, render_lines=tmp1['render_lines'],
line_colour='yellow', line_style=tmp1['line_style'],
line_width=tmp1['line_width'],
render_markers=tmp2['render_markers'],
marker_style=tmp2['marker_style'],
marker_size=tmp2['marker_size'],
marker_face_colour='yellow',
marker_edge_colour='yellow',
marker_edge_width=tmp2['marker_edge_width'])
# Render instance
renderer = instance.view(
figure_id=save_figure_wid.renderer.figure_id,
new_figure=False, image_view=axes_mode_wid.value == 1,
figure_size=new_figure_size,
render_lines=tmp1['render_lines'],
line_colour=tmp1['line_colour'][0],
line_style=tmp1['line_style'],
line_width=tmp1['line_width'],
render_markers=tmp2['render_markers'],
marker_style=tmp2['marker_style'],
marker_size=tmp2['marker_size'],
marker_face_colour=tmp2['marker_face_colour'][0],
marker_edge_colour=tmp2['marker_edge_colour'][0],
marker_edge_width=tmp2['marker_edge_width'], **options)
# Get instance range
instance_range = instance.range()
else:
# Vectors mode
# Compute instance
instance_lower = shape_model[level].instance([-p for p in weights])
instance_upper = shape_model[level].instance(weights)
# Render mean shape
renderer = mean.view(
figure_id=save_figure_wid.renderer.figure_id,
new_figure=False, image_view=axes_mode_wid.value == 1,
figure_size=new_figure_size,
render_lines=tmp1['render_lines'],
line_colour=tmp1['line_colour'][0],
line_style=tmp1['line_style'], line_width=tmp1['line_width'],
render_markers=tmp2['render_markers'],
marker_style=tmp2['marker_style'],
marker_size=tmp2['marker_size'],
marker_face_colour=tmp2['marker_face_colour'][0],
marker_edge_colour=tmp2['marker_edge_colour'][0],
marker_edge_width=tmp2['marker_edge_width'])
# Render vectors
ax = plt.gca()
x_min = np.Inf
y_min = np.Inf
x_max = -np.Inf
y_max = -np.Inf
for p in range(mean.n_points):
xm = mean.points[p, 0]
ym = mean.points[p, 1]
xl = instance_lower.points[p, 0]
yl = instance_lower.points[p, 1]
xu = instance_upper.points[p, 0]
yu = instance_upper.points[p, 1]
if axes_mode_wid.value == 1:
# image mode
lines = [[(ym, xm), (yl, xl)], [(ym, xm), (yu, xu)]]
else:
# point cloud mode
lines = [[(xm, ym), (xl, yl)], [(xm, ym), (xu, yu)]]
lc = mc.LineCollection(lines, colors=('g', 'b'),
linestyles='solid', linewidths=2)
# update min, max
y_min = np.min([y_min, xl, xu])
y_max = np.max([y_max, xl, xu])
x_min = np.min([x_min, yl, yu])
x_max = np.max([x_max, yl, yu])
# add collection
ax.add_collection(lc)
tmp = renderer_options_wid.selected_values['axes']
# parse axes limits
axes_x_limits, axes_y_limits = _parse_axes_limits(
x_min, x_max, y_min, y_max, tmp['axes_x_limits'],
tmp['axes_y_limits'])
_set_axes_options(
ax, render_axes=tmp['render_axes'],
inverted_y_axis=axes_mode_wid.value == 1,
axes_font_name=tmp['axes_font_name'],
axes_font_size=tmp['axes_font_size'],
axes_font_style=tmp['axes_font_style'],
axes_font_weight=tmp['axes_font_weight'],
axes_x_limits=axes_x_limits, axes_y_limits=axes_y_limits,
axes_x_ticks=tmp['axes_x_ticks'],
axes_y_ticks=tmp['axes_y_ticks'])
# Get instance range
instance_range = mean.range()
plt.show()
# Save the current figure id
save_figure_wid.renderer = renderer
# Update info
update_info(level, instance_range)
# Define function that updates the info text
def update_info(level, instance_range):
text_per_line = [
"> Level {} out of {}".format(level + 1, n_levels),
"> {} components in total".format(shape_model[level].n_components),
"> {} active components".format(
shape_model[level].n_active_components),
"> {:.1f}% variance kept".format(
shape_model[level].variance_ratio() * 100),
"> Instance range: {:.1f} x {:.1f}".format(instance_range[0],
instance_range[1]),
"> {} landmark points, {} features".format(
shape_model[level].mean().n_points,
shape_model[level].n_features)]
info_wid.set_widget_state(text_per_line=text_per_line)
# Plot variance function
def plot_variance(name):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = level_wid.value if n_levels > 1 else 0
# Render
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * 10,
renderer_options_wid.selected_values['zoom_one'] * 3)
plt.subplot(121)
shape_model[level].plot_eigenvalues_ratio(
figure_id=save_figure_wid.renderer.figure_id)
plt.subplot(122)
renderer = shape_model[level].plot_eigenvalues_cumulative_ratio(
figure_id=save_figure_wid.renderer.figure_id,
figure_size=new_figure_size)
plt.show()
# Save the current figure id
save_figure_wid.renderer = renderer
# Create widgets
mode_dict = OrderedDict()
mode_dict['Deformation'] = 1
mode_dict['Vectors'] = 2
mode_wid = ipywidgets.RadioButtons(options=mode_dict,
description='Mode:', value=1)
mode_wid.observe(render_function, names='value', type='change')
mean_wid = ipywidgets.Checkbox(value=False,
description='Render mean shape')
mean_wid.observe(render_function, names='value', type='change')
# Function that controls mean shape checkbox visibility
def mean_visible(change):
if change['new'] == 1:
mean_wid.disabled = False
else:
mean_wid.disabled = True
mean_wid.value = False
mode_wid.observe(mean_visible, names='value', type='change')
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
style=model_parameters_style, continuous_update=False)
axes_mode_wid = ipywidgets.RadioButtons(
options={'Image': 1, 'Point cloud': 2}, description='Axes mode:',
value=1)
axes_mode_wid.observe(render_function, names='value', type='change')
renderer_options_wid = RendererOptionsWidget(
options_tabs=['markers', 'lines', 'numbering', 'zoom_one', 'axes'],
labels=None, axes_x_limits=0.1, axes_y_limits=0.1,
render_function=render_function, style=renderer_style,
tabs_style=renderer_tabs_style)
renderer_options_box = ipywidgets.VBox(
children=[axes_mode_wid, renderer_options_wid], align='center',
margin='0.1cm')
info_wid = TextPrintWidget(text_per_line=[''] * 6, style=info_style)
save_figure_wid = SaveFigureOptionsWidget(renderer=None,
style=save_figure_style)
# Define function that updates options' widgets state
def update_widgets(change):
model_parameters_wid.set_widget_state(
n_parameters=n_parameters[change['new']], params_str='Parameter ',
allow_callback=True)
# Group widgets
if n_levels > 1:
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid:', value=n_levels-1)
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change')
radio_children = [level_wid, mode_wid, mean_wid]
else:
radio_children = [mode_wid, mean_wid]
radio_wids = ipywidgets.VBox(children=radio_children, margin='0.3cm')
tmp_wid = ipywidgets.HBox(children=[radio_wids, model_parameters_wid])
options_box = ipywidgets.Tab(children=[tmp_wid, renderer_options_box,
info_wid, save_figure_wid])
tab_titles = ['Model', 'Renderer', 'Info', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=logo_style)
logo_wid.margin = '0.1cm'
wid = ipywidgets.HBox(children=[logo_wid, options_box], align='start')
# Set widget's style
wid.box_style = widget_box_style
wid.border_radius = widget_border_radius
wid.border_width = widget_border_width
wid.border_color = map_styles_to_hex_colours(widget_box_style)
renderer_options_wid.margin = '0.2cm'
format_box(renderer_options_box, renderer_box_style, True,
renderer_box_border_colour, 'solid', 1,
renderer_box_border_radius, '0.1cm', '0.2cm')
# Display final widget
ipydisplay.display(wid)
# Trigger initial visualization
render_function({})
def visualize_appearance_model(appearance_model, n_parameters=5,
mode='multiple', parameters_bounds=(-3.0, 3.0),
figure_size=(10, 8), style='coloured'):
r"""
Widget that allows the dynamic visualization of a multi-scale linear
statistical appearance model.
Parameters
----------
appearance_model : `list` of `menpo.model.PCAModel` or subclass
The multi-scale appearance model to be visualized. Note that each level
can have different number of components.
n_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the parameters
sliders. If `int`, then the number of sliders per level is the minimum
between `n_parameters` and the number of active components per level.
If `list` of `int`, then a number of sliders is defined per level.
If ``None``, all the active components per level will have a slider.
mode : ``{'single', 'multiple'}``, optional
If ``'single'``, then only a single slider is constructed along with a
drop down menu. If ``'multiple'``, then a slider is constructed for each
parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
figure_size : (`int`, `int`), optional
The size of the plotted figures.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that appearance_model is a list even with one member
if not isinstance(appearance_model, list):
appearance_model = [appearance_model]
# Get the number of levels (i.e. number of appearance models)
n_levels = len(appearance_model)
# Define the styling options
if style == 'coloured':
model_parameters_style = 'info'
channels_style = 'info'
landmarks_style = 'info'
logo_style = 'success'
widget_box_style = 'success'
widget_border_radius = 10
widget_border_width = 1
info_style = 'info'
renderer_style = 'warning'
renderer_tabs_style = 'warning'
save_figure_style = 'danger'
elif style == 'minimal':
model_parameters_style = 'minimal'
channels_style = 'minimal'
landmarks_style = 'minimal'
logo_style = 'minimal'
widget_box_style = ''
widget_border_radius = 0
widget_border_width = 0
info_style = 'minimal'
renderer_style = 'minimal'
renderer_tabs_style = 'minimal'
save_figure_style = 'minimal'
else:
raise ValueError("style must be either coloured or minimal")
# Get the maximum number of components per level
max_n_params = [ap.n_active_components for ap in appearance_model]
# Check the given number of parameters (the returned n_parameters is a list
# of len n_scales)
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = 0
if n_levels > 1:
level = level_wid.value
# Compute weights and instance
parameters = model_parameters_wid.selected_values
weights = (parameters *
appearance_model[level].eigenvalues[:len(parameters)] ** 0.5)
instance = appearance_model[level].instance(weights)
image_is_masked = isinstance(instance, MaskedImage)
selected_group = landmark_options_wid.selected_values['group']
# show landmarks with selected options
tmp1 = renderer_options_wid.selected_values['lines']
tmp2 = renderer_options_wid.selected_values['markers']
options = renderer_options_wid.selected_values['numbering']
options.update(renderer_options_wid.selected_values['axes'])
options.update(renderer_options_wid.selected_values['image'])
options.update(channel_options_wid.selected_values)
options.update(landmark_options_wid.selected_values)
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
# get line and marker colours
line_colour = []
marker_face_colour = []
marker_edge_colour = []
if instance.has_landmarks:
for lbl in landmark_options_wid.selected_values['with_labels']:
lbl_idx = instance.landmarks[selected_group].labels.index(lbl)
line_colour.append(tmp1['line_colour'][lbl_idx])
marker_face_colour.append(tmp2['marker_face_colour'][lbl_idx])
marker_edge_colour.append(tmp2['marker_edge_colour'][lbl_idx])
# show image with selected options
renderer = render_image(
image=instance, renderer=save_figure_wid.renderer,
image_is_masked=image_is_masked,
render_lines=tmp1['render_lines'], line_style=tmp1['line_style'],
line_width=tmp1['line_width'], line_colour=line_colour,
render_markers=tmp2['render_markers'],
marker_style=tmp2['marker_style'],
marker_size=tmp2['marker_size'],
marker_edge_width=tmp2['marker_edge_width'],
marker_edge_colour=marker_edge_colour,
marker_face_colour=marker_face_colour,
figure_size=new_figure_size, legend_n_columns=None,
legend_border_axes_pad=None, legend_rounded_corners=None,
legend_title=None, legend_horizontal_spacing=None,
legend_shadow=None, legend_location=None, legend_font_name=None,
legend_bbox_to_anchor=None, legend_border=None,
legend_marker_scale=None, legend_vertical_spacing=None,
legend_font_weight=None, legend_font_size=None, render_legend=False,
legend_font_style=None, legend_border_padding=None, **options)
# Update info
update_info(instance, level, selected_group)
# Save the current figure id
save_figure_wid.renderer = renderer
# Define function that updates the info text
def update_info(image, level, group):
lvl_app_mod = appearance_model[level]
lp = 0 if group is None else image.landmarks[group].lms.n_points
text_per_line = [
"> Level: {} out of {}.".format(level + 1, n_levels),
"> {} components in total.".format(lvl_app_mod.n_components),
"> {} active components.".format(lvl_app_mod.n_active_components),
"> {:.1f}% variance kept.".format(
lvl_app_mod.variance_ratio() * 100),
"> Reference shape of size {} with {} channel{}.".format(
image._str_shape(),
image.n_channels, 's' * (image.n_channels > 1)),
"> {} features.".format(lvl_app_mod.n_features),
"> {} landmark points.".format(lp),
"> Instance: min={:.3f}, max={:.3f}".format(image.pixels.min(),
image.pixels.max())]
info_wid.set_widget_state(text_per_line=text_per_line)
# Plot variance function
def plot_variance(name):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = 0
if n_levels > 1:
level = level_wid.value
# Render
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * 10,
renderer_options_wid.selected_values['zoom_one'] * 3)
plt.subplot(121)
appearance_model[level].plot_eigenvalues_ratio(
figure_id=save_figure_wid.renderer.figure_id)
plt.subplot(122)
renderer = appearance_model[level].plot_eigenvalues_cumulative_ratio(
figure_id=save_figure_wid.renderer.figure_id,
figure_size=new_figure_size)
plt.show()
# Save the current figure id
save_figure_wid.renderer = renderer
# Create widgets
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
style=model_parameters_style, continuous_update=False)
groups_keys, labels_keys = extract_groups_labels_from_image(
appearance_model[0].mean())
first_label = labels_keys[0] if labels_keys else None
channel_options_wid = ChannelOptionsWidget(
n_channels=appearance_model[0].mean().n_channels,
image_is_masked=isinstance(appearance_model[0].mean(), MaskedImage),
render_function=render_function, style=channels_style)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['image', 'markers', 'lines', 'numbering', 'zoom_one',
'axes'], labels=first_label,
axes_x_limits=None, axes_y_limits=None,
render_function=render_function, style=renderer_style,
tabs_style=renderer_tabs_style)
landmark_options_wid = LandmarkOptionsWidget(
group_keys=groups_keys, labels_keys=labels_keys,
render_function=render_function, style=landmarks_style,
renderer_widget=renderer_options_wid)
info_wid = TextPrintWidget(text_per_line=[''] * 8, style=info_style)
save_figure_wid = SaveFigureOptionsWidget(renderer=None,
style=save_figure_style)
# Define function that updates options' widgets state
def update_widgets(change):
value = change['new']
# Update model parameters widget
model_parameters_wid.set_widget_state(
n_parameters[value], params_str='Parameter ', allow_callback=False)
# Update channel options
channel_options_wid.set_widget_state(
n_channels=appearance_model[value].mean().n_channels,
image_is_masked=isinstance(appearance_model[value].mean(),
MaskedImage),
allow_callback=True)
# Group widgets
tmp_children = [model_parameters_wid]
if n_levels > 1:
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid:', value=n_levels-1,
margin='0.3cm')
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change''value')
tmp_children.insert(0, level_wid)
tmp_wid = ipywidgets.HBox(children=tmp_children)
options_box = ipywidgets.Tab(children=[tmp_wid, channel_options_wid,
landmark_options_wid,
renderer_options_wid,
info_wid, save_figure_wid])
tab_titles = ['Model', 'Channels', 'Landmarks', 'Renderer', 'Info',
'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=logo_style)
logo_wid.margin = '0.1cm'
wid = ipywidgets.HBox(children=[logo_wid, options_box], align='start')
# Set widget's style
wid.box_style = widget_box_style
wid.border_radius = widget_border_radius
wid.border_width = widget_border_width
wid.border_color = map_styles_to_hex_colours(widget_box_style)
renderer_options_wid.margin = '0.2cm'
# Display final widget
ipydisplay.display(wid)
# Trigger initial visualization
render_function({})
def visualize_patch_appearance_model(appearance_model, centers,
n_parameters=5, mode='multiple',
parameters_bounds=(-3.0, 3.0),
figure_size=(10, 8), style='coloured'):
r"""
Widget that allows the dynamic visualization of a multi-scale linear
statistical patch-based appearance model.
Parameters
----------
appearance_model : `list` of `menpo.model.PCAModel` or subclass
The multi-scale patch-based appearance model to be visualized. Note that
each level can have different number of components.
centers : `list` of `menpo.shape.PointCloud` or subclass
The centers to set the patches around. If the `list` has only one
`menpo.shape.PointCloud` then this will be used for all appearance model
levels. Otherwise, it needs to have the same length as
`appearance_model`.
n_parameters : `int` or `list` of `int` or ``None``, optional
The number of principal components to be used for the parameters
sliders. If `int`, then the number of sliders per level is the minimum
between `n_parameters` and the number of active components per level.
If `list` of `int`, then a number of sliders is defined per level.
If ``None``, all the active components per level will have a slider.
mode : ``{'single', 'multiple'}``, optional
If ``'single'``, then only a single slider is constructed along with a
drop down menu. If ``'multiple'``, then a slider is constructed for each
parameter.
parameters_bounds : (`float`, `float`), optional
The minimum and maximum bounds, in std units, for the sliders.
figure_size : (`int`, `int`), optional
The size of the plotted figures.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
print('Initializing...')
# Make sure that appearance_model is a list even with one member
if not isinstance(appearance_model, list):
appearance_model = [appearance_model]
# Get the number of levels (i.e. number of appearance models)
n_levels = len(appearance_model)
# Make sure that centers is a list even with one pointcloud
if not isinstance(centers, list):
centers = [centers] * n_levels
elif isinstance(centers, list) and len(centers) == 1:
centers *= n_levels
# Define the styling options
if style == 'coloured':
model_parameters_style = 'info'
patches_style = 'minimal'
patches_subwidgets_style = 'info'
channels_style = 'info'
logo_style = 'success'
widget_box_style = 'success'
widget_border_radius = 10
widget_border_width = 1
info_style = 'info'
renderer_style = 'warning'
renderer_tabs_style = 'warning'
save_figure_style = 'danger'
elif style == 'minimal':
model_parameters_style = 'minimal'
patches_style = 'minimal'
patches_subwidgets_style = 'minimal'
channels_style = 'minimal'
logo_style = 'minimal'
widget_box_style = ''
widget_border_radius = 0
widget_border_width = 0
info_style = 'minimal'
renderer_style = 'minimal'
renderer_tabs_style = 'minimal'
save_figure_style = 'minimal'
else:
raise ValueError("style must be either coloured or minimal")
# Get the maximum number of components per level
max_n_params = [ap.n_active_components for ap in appearance_model]
# Check the given number of parameters (the returned n_parameters is a list
# of len n_scales)
n_parameters = check_n_parameters(n_parameters, n_levels, max_n_params)
# Define render function
def render_function(change):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = 0
if n_levels > 1:
level = level_wid.value
# Compute weights and instance
parameters = model_parameters_wid.selected_values
weights = (parameters *
appearance_model[level].eigenvalues[:len(parameters)] ** 0.5)
instance = appearance_model[level].instance(weights)
# Render instance with selected options
options = renderer_options_wid.selected_values['lines']
options.update(renderer_options_wid.selected_values['markers'])
options.update(renderer_options_wid.selected_values['numbering'])
options.update(renderer_options_wid.selected_values['axes'])
options.update(renderer_options_wid.selected_values['image'])
options.update(patch_options_wid.selected_values)
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * figure_size[0],
renderer_options_wid.selected_values['zoom_one'] * figure_size[1])
# show image with selected options
renderer = render_patches(
patches=instance.pixels, patch_centers=centers[level],
renderer=save_figure_wid.renderer, figure_size=new_figure_size,
channels=channel_options_wid.selected_values['channels'],
glyph_enabled=channel_options_wid.selected_values['glyph_enabled'],
glyph_block_size=channel_options_wid.selected_values['glyph_block_size'],
glyph_use_negative=channel_options_wid.selected_values['glyph_use_negative'],
sum_enabled=channel_options_wid.selected_values['sum_enabled'],
**options)
# Update info
update_info(instance, level)
# Save the current figure id
save_figure_wid.renderer = renderer
# Define function that updates the info text
def update_info(image, level):
lvl_app_mod = appearance_model[level]
text_per_line = [
"> Level: {} out of {}.".format(level + 1, n_levels),
"> {} components in total.".format(lvl_app_mod.n_components),
"> {} active components.".format(lvl_app_mod.n_active_components),
"> {:.1f}% variance kept.".format(
lvl_app_mod.variance_ratio() * 100),
"> Each patch has size {}H x {}W with {} channel{}.".format(
image.pixels.shape[3], image.pixels.shape[4],
image.pixels.shape[2], 's' * (image.pixels.shape[2] > 1)),
"> {} features.".format(lvl_app_mod.n_features),
"> {} landmark points.".format(image.pixels.shape[0]),
"> Instance: min={:.3f}, max={:.3f}".format(image.pixels.min(),
image.pixels.max())]
info_wid.set_widget_state(text_per_line=text_per_line)
# Plot variance function
def plot_variance(name):
# Clear current figure, but wait until the generation of the new data
# that will be rendered
ipydisplay.clear_output(wait=True)
# Get selected level
level = 0
if n_levels > 1:
level = level_wid.value
# Render
new_figure_size = (
renderer_options_wid.selected_values['zoom_one'] * 10,
renderer_options_wid.selected_values['zoom_one'] * 3)
plt.subplot(121)
appearance_model[level].plot_eigenvalues_ratio(
figure_id=save_figure_wid.renderer.figure_id)
plt.subplot(122)
renderer = appearance_model[level].plot_eigenvalues_cumulative_ratio(
figure_id=save_figure_wid.renderer.figure_id,
figure_size=new_figure_size)
plt.show()
# Save the current figure id
save_figure_wid.renderer = renderer
# Create widgets
model_parameters_wid = LinearModelParametersWidget(
n_parameters[0], render_function, params_str='Parameter ',
mode=mode, params_bounds=parameters_bounds, params_step=0.1,
plot_variance_visible=True, plot_variance_function=plot_variance,
animation_step=0.5, interval=0., loop_enabled=True,
style=model_parameters_style, continuous_update=False)
patch_options_wid = PatchOptionsWidget(
n_patches=appearance_model[0].mean().pixels.shape[0],
n_offsets=appearance_model[0].mean().pixels.shape[1],
render_function=render_function, style=patches_style,
subwidgets_style=patches_subwidgets_style)
channel_options_wid = ChannelOptionsWidget(
n_channels=appearance_model[0].mean().pixels.shape[2],
image_is_masked=False, render_function=render_function,
style=channels_style)
renderer_options_wid = RendererOptionsWidget(
options_tabs=['image', 'markers', 'lines', 'numbering', 'zoom_one',
'axes'], labels=None,
axes_x_limits=None, axes_y_limits=None,
render_function=None, style=renderer_style,
tabs_style=renderer_tabs_style)
renderer_options_wid.options_widgets[0].interpolation_checkbox.value = True
renderer_options_wid.add_render_function(render_function)
info_wid = TextPrintWidget(text_per_line=[''] * 8, style=info_style)
save_figure_wid = SaveFigureOptionsWidget(renderer=None,
style=save_figure_style)
# Define function that updates options' widgets state
def update_widgets(change):
value = change['new']
# Update model parameters widget
model_parameters_wid.set_widget_state(n_parameters[value],
params_str='Parameter ',
allow_callback=False)
# Update patch options
patch_options_wid.set_widget_state(
n_patches=appearance_model[value].mean().pixels.shape[0],
n_offsets=appearance_model[value].mean().pixels.shape[1],
allow_callback=False)
# Update channels options
channel_options_wid.set_widget_state(
n_channels=appearance_model[value].mean().pixels.shape[2],
image_is_masked=False, allow_callback=True)
# Group widgets
tmp_children = [model_parameters_wid]
if n_levels > 1:
radio_str = OrderedDict()
for l in range(n_levels):
if l == 0:
radio_str["Level {} (low)".format(l)] = l
elif l == n_levels - 1:
radio_str["Level {} (high)".format(l)] = l
else:
radio_str["Level {}".format(l)] = l
level_wid = ipywidgets.RadioButtons(
options=radio_str, description='Pyramid:', value=n_levels-1,
margin='0.3cm')
level_wid.observe(update_widgets, names='value', type='change')
level_wid.observe(render_function, names='value', type='change')
tmp_children.insert(0, level_wid)
tmp_wid = ipywidgets.HBox(children=tmp_children)
options_box = ipywidgets.Tab(children=[tmp_wid, patch_options_wid,
channel_options_wid,
renderer_options_wid,
info_wid, save_figure_wid])
tab_titles = ['Model', 'Patches', 'Channels', 'Renderer', 'Info', 'Export']
for (k, tl) in enumerate(tab_titles):
options_box.set_title(k, tl)
logo_wid = LogoWidget(style=logo_style)
logo_wid.margin = '0.1cm'
wid = ipywidgets.HBox(children=[logo_wid, options_box], align='start')
# Set widget's style
wid.box_style = widget_box_style
wid.border_radius = widget_border_radius
wid.border_width = widget_border_width
wid.border_color = map_styles_to_hex_colours(widget_box_style)
renderer_options_wid.margin = '0.2cm'
# Display final widget
ipydisplay.display(wid)
# Trigger initial visualization
render_function({})
def webcam_widget(canvas_width=640, hd=True, n_preview_windows=5,
style='coloured'):
r"""
Webcam widget for taking snapshots. The snapshots are dynamically previewed
in a FIFO stack of thumbnails.
Parameters
----------
canvas_width : `int`, optional
The initial width of the rendered canvas. Note that this doesn't actually
change the webcam resolution. It simply rescales the rendered image, as
well as the size of the returned screenshots.
hd : `bool`, optional
If ``True``, then the webcam will be set to high definition (HD), i.e.
720 x 1280. Otherwise the default resolution will be used.
n_preview_windows : `int`, optional
The number of preview thumbnails that will be used as a FIFO stack to
show the captured screenshots. It must be at least 4.
style : ``{'coloured', 'minimal'}``, optional
If ``'coloured'``, then the style of the widget will be coloured. If
``minimal``, then the style is simple using black and white colours.
Returns
-------
snapshots : `list` of `menpo.image.Image`
The list of captured images.
"""
# Ensure that the code is being run inside a Jupyter kernel!
from .utils import verify_ipython_and_kernel
verify_ipython_and_kernel()
# Define the styling options
if style == 'coloured':
wid_style = 'danger'
preview_style = 'warning'
else:
wid_style = 'minimal'
preview_style = 'minimal'
# Create widgets
wid = CameraSnapshotWidget(
canvas_width=canvas_width, hd=hd, n_preview_windows=n_preview_windows,
preview_windows_margin=3, style=wid_style, preview_style=preview_style)
# Display widget
ipydisplay.display(wid)
# Return
return wid.selected_values
| grigorisg9gr/menpowidgets | menpowidgets/base.py | Python | bsd-3-clause | 100,134 |
# Django
from django.core.files.storage import get_storage_class
from django.utils.functional import LazyObject
# Internal
from .settings import (
FILE_STORAGE, FILE_STORAGE_ARGS,
LOGGER_NAME, LOG_FILE, LOG_SIZE, LOGGER_FORMAT, LOG_LEVEL
)
from .utils.loggers import LoggerWithStorage
class DefaultFileStorage(LazyObject):
def _setup(self):
self._wrapped = get_storage_class(FILE_STORAGE)(**FILE_STORAGE_ARGS)
default_file_storage = DefaultFileStorage()
default_logger = LoggerWithStorage(
storage=default_file_storage,
logger_name=LOGGER_NAME,
level=LOG_LEVEL,
log_file=LOG_FILE,
log_size=LOG_SIZE,
logger_format=LOGGER_FORMAT
)
| YAmikep/django-feedstorage | feedstorage/log.py | Python | bsd-3-clause | 703 |
# -*- coding: utf-8 -*-
import logging
import re
import slumber
from datetime import datetime
from dateutil import parser as dateparser
from queryset_client import client
from urllib import urlencode
from urlparse import urlparse
try:
# try to import django suite
from django.conf import settings
from django.core.cache import cache
from django.db import models
from django.core.exceptions import ObjectDoesNotExist
from django.utils.importlib import import_module
from django.utils.translation import ugettext as _
except Exception, e:
class Cache(object):
def __getattribute__(self, key):
return lambda *args: None
class ObjectDoesNotExist(Exception):
pass
def _(text):
return text
settings = object()
cache = Cache()
models = None
import_module = __import__
from rpc_proxy import exceptions
from rpc_proxy.utils import logf
PK_ID = ('pk', 'id',)
logger = logging.getLogger(__name__)
def extend(instance, new_class, attrs={}):
instance.__class__ = type(new_class.__name__,
(instance.__class__, new_class),
attrs)
instance.__class__.__module__ = new_class.__module__
instance.__module__ = new_class.__module__
return instance
def mixin(cls, mixin):
if mixin not in cls.__bases__:
cls.__bases__ = (mixin,) + cls.__bases__
def get_setting(name, default=None):
return getattr(settings, 'TASTYPIE_RPC_PROXY', {}).get(name, default)
def get_pk(obj):
"""
This is a workaroud to seek non default ``id`` primary key value.
Since queryset_client expects resources to have ``id`` fields as primary key
by design, it's hard to support one-to-one like relationship.
This method attempts to resolve such relation based on
NON_DEFAULT_ID_FOREIGNKEYS settings value.
"""
if isinstance(obj, int):
return obj
if isinstance(obj, str):
# assumed to be a resource_uri
return client.parse_id(obj)
for key in get_setting('NON_DEFAULT_ID_FOREIGNKEYS', {}):
if hasattr(obj, key):
try:
return get_pk(getattr(obj, key))
except AttributeError, e:
pass
return obj.id
class QuerySet(client.QuerySet):
def __init__(self, model, responses=None, query=None, **kwargs):
super(QuerySet, self).__init__(model, responses, query, **kwargs)
self._response_class = Response
def __getitem__(self, index):
try:
return super(QuerySet, self).__getitem__(index)
except IndexError, e:
pass
def _filter(self, *args, **kwargs):
for key, value in kwargs.items():
try:
# convert resource_uri to numeric id
id = client.parse_id('%s' % value)
kwargs[key] = id
except Exception, e:
pass
return super(QuerySet, self)._filter(*args, **kwargs)
def _wrap_response(self, dictionary):
return self._response_class(self.model,
dictionary,
_to_many_class=ManyToManyManager)
def create(self, **kwargs):
obj = super(QuerySet, self).create(**kwargs)
return Response(model=self.model, url=obj.resource_uri)
def get_or_create(self, **kwargs):
obj, created = super(QuerySet, self).get_or_create(**kwargs)
if not created:
return obj, created
return self.create(**kwargs), True
class Response(client.Response):
def __init__(self, model, response=None, url=None, **kwargs):
# implement proxy mixin
model_name = model._model_name.lower()
if model_name in ProxyClient._proxies:
proxy = ProxyClient._proxies[model_name].__class__
extend(self, proxy, proxy.__dict__.copy())
self.__init_proxy__()
super(Response, self).__init__(model, response, url, **kwargs)
# the magic
dir(self)
def __repr__(self):
if hasattr(self, 'resource_uri'):
return self.resource_uri
return '<%s: None>' % self.model._model_name.title()
def __getattr__(self, name):
"""
Overrides to support api namespace and to_one class diversity.
"""
try:
if name not in self._response:
raise AttributeError(name)
elif 'related_type' not in self._schema['fields'][name]:
return self.__getitem__(name)
except AttributeError, e:
if name in PK_ID:
return get_pk(self)
return getattr(self.model, name)
# resolves foreign key references in another api namespace
# expects to be called with detail url like /api/v1/<resource>/<id>|schema/
#
# CAVEAT: resource_uri of referred resource has to have the same version
base_client = self.model._base_client
if name in self._schema['fields']:
schema = self._schema['fields'][name]
if ('related_type' in schema and
schema['related_type'] in ('to_one', 'to_many',)):
if schema.get('schema'):
schema_uri = schema.get('schema')
else:
try:
schema_uri = self._response[name]
schema_uri = schema_uri[0] if (
isinstance(schema_uri, list)) else schema_uri
schema_uri = schema_uri['resource_uri'] if (
isinstance(schema_uri, dict)) else schema_uri
logger.debug(logf({
'message': 'Trying to guess schema info from '
'schema_uri.',
'schema_uri': schema_uri,
}))
except Exception, e:
raise exceptions.ProxyException(_('Couldn\'t identify related '
'field schema (%s).') % name)
else:
raise exceptions.ProxyException(_('The field seems not to be defined '
'in the schema (%s).') % name)
api_url = base_client._api_url
version = base_client._version
paths = filter(None, schema_uri.replace(
base_client._api_path, '').split('/'))
# strip <id> or ``schema`` part and extract resource_name
paths.pop()
resource_name = paths.pop()
if version in paths: paths.remove(version)
namespace = '/'.join(paths)
logger.debug(logf({
'message': 'Need namespace schema.',
'attribute': name,
'resource_uri': self._response[name],
'client_key': ProxyClient.build_client_key(base_client._api_url, **{
'version': base_client._version,
'namespace': namespace,
'auth': base_client._auth,
}),
}))
proxy_client = ProxyClient.get(base_client._api_url,
version=base_client._version,
namespace=namespace,
auth=base_client._auth)
proxy_client.schema()
model = proxy_client._model_gen(resource_name)
# set manager alias
if name is not resource_name:
setattr(self.model, resource_name, getattr(self.model, name))
if schema['related_type'] == 'to_many':
resource_uris = [resource_uri['resource_uri'] if isinstance(resource_uri, dict) else resource_uri for resource_uri in self._response[name]]
return ManyToManyManager(
model=model,
instance=self.model,
field_name=name,
query={'id__in': [client.parse_id(resource_uri) for resource_uri in resource_uris]})
elif schema['related_type'] == 'to_one':
return Response(model=model, url=self._response[name])
@property
def _response(self):
if self.__response:
return self.__response
serializer = slumber.serialize.Serializer(default=self.model._main_client._store['format'])
if self._url is not None:
logger.debug(logf({
'message': 'Getting cache...',
'key': self._url,
}))
cached = cache.get(self._url)
if cached:
logger.debug(logf({
'message': 'Found in cache.',
'key': self._url,
'value': cached,
}))
self.refresh(serializer.loads(cached))
return self.__response
response = super(Response, self)._response
if self._url is not None:
if 'model' in response:
del(response['model'])
content = serializer.dumps(response)
logger.debug(logf({
'message': 'Setting cache...',
'key': self._url,
'value': content,
}))
cache.set(self._url, content)
return response
def refresh(self, data):
self.__response = data
try:
self.model = self.model(**self.__response)
except:
self.model = self.model.__class__(**self.__response)
def invalidate(self):
resource = getattr(self.model._main_client, self.model._model_name)
self.refresh(resource(client.parse_id(self.resource_uri)).get())
class Manager(client.Manager):
def __init__(self, model):
self.model = model
def get_query_set(self):
return QuerySet(self.model,
response_class=Response)
class ManyToManyManager(client.ManyToManyManager):
def __init__(self, query=None, instance=None, field_name=None, **kwargs):
self._field_name = field_name
super(ManyToManyManager, self).__init__(query, instance, **kwargs)
# FIXME: work around a bug on handling empty to_many manager
# in tastypie_queryset_client
if 'id__in' in self._query and len(self._query['id__in']) < 1:
self._query.update({'id__in': 0})
def get_query_set(self):
return QuerySet(self.model,
query=self._query,
response_class=Response).filter()
def filter(self, *args, **kwargs):
if 'id__in' in kwargs:
raise exceptions.ProxyException(_('"id__in" is not supported '
'in ManyToManyManager.'))
return QuerySet(self.model,
query=self._query,
response_class=Response).filter(*args, **kwargs)
def clear(self):
# work around a bug in tastypie_queryset_client
self._query.update({"id__in": list(set([]))})
setattr(self._instance, self._field_name, list(set([])))
class ProxyClient(client.Client):
_clients = {}
_proxies = {}
_models = {}
_schemas = {}
def __new__(cls, url, **kwargs):
key = ProxyClient.build_client_key(url, **kwargs)
if key not in cls._clients:
cls._clients[key] = super(ProxyClient,
cls).__new__(cls)
proxy = kwargs.get('proxy')
if proxy:
cls._proxies[proxy.__class__.__name__.replace('Proxy', '').lower()] = proxy
return cls._clients[key]
def __init__(self, base_url, auth=None, strict_field=True, client=None, **kwargs):
self._api_url = base_url
parsed = urlparse(self._api_url)
self._api_path = parsed.path
self._auth = kwargs.get('auth', auth)
self._namespace = kwargs.get('namespace', None)
self._version = kwargs.get('version', None)
super(ProxyClient, self).__init__(ProxyClient.build_base_url(base_url,
**kwargs),
self._auth,
strict_field,
client)
def _model_gen(self, model_name, strict_field=True, base_client=None):
return self.extend_model(super(ProxyClient, self)._model_gen(model_name,
strict_field,
self))
def extend_model(self, model):
# overwrite manager and model members
model.objects = Manager(model)
model._setfield_original = model._setfield
model._getfield_original = model._get_field
model.save_original = model.save
model.delete_original = model.delete
def _setfield(obj, name, value):
try:
obj._setfield_original(name, value)
except client.FieldTypeError, e:
self.to_python(obj, name, value)
super(obj.__class__, obj).__setattr__(name,
obj._fields[name])
def _getfield(obj, name):
try:
return self.to_serializable(obj, name)
except exceptions.ProxyException, e:
return obj._getfield_original(name)
def break_cache(obj):
cache.delete(getattr(obj,
'resource_uri',
'%s%s/' % (obj._base_client._api_path,
obj._client._store['base_url'].replace(
obj._base_client._api_url,
''))))
def save(obj):
break_cache(obj)
model.save_original(obj)
def delete(obj):
break_cache(obj)
try:
model.delete_original(obj)
except KeyError, e:
try:
obj._client(get_pk(obj)).delete()
obj._clear_fields()
except Exception, e:
raise exceptions.ProxyException(_('Failed to delete an object (%s): %s' % (obj, e,)))
model._setfield = _setfield
model._get_field = _getfield
model.save = save
model.delete = delete
return model
def to_python(self, obj, name, value):
field_type = obj._schema_store['fields'][name]['type']
new_value = value
if type(value) in (str,):
if field_type == 'datetime':
new_value = dateparser.parse(value)
elif field_type == 'date':
new_value = dateparser.parse(value).date()
elif field_type in ('list', 'json',):
new_value = value
if value != new_value:
logger.debug(logf({
'message': 'Converting to python...',
'field': name,
'type': field_type,
'from': value.__repr__(),
'to': new_value.__repr__(),
}))
obj._fields[name] = new_value
def to_serializable(self, obj, name):
field_type = obj._schema_store['fields'][name]['type']
value = new_value = obj._fields[name]
if field_type == 'date' and type(value) not in (str,):
new_value = value.isoformat()
if value != new_value:
logger.debug(logf({
'message': 'Serializing from python...',
'field': name,
'type': field_type,
'from': value.__repr__(),
'to': new_value.__repr__()
}))
return new_value
raise exceptions.ProxyException(_('Raise to call super.'))
def schema(self, model_name=None):
path = '.'.join(self._base_url.replace(self._api_url,
'').split('/')[:-1])
if model_name is None:
model_name = path
url = self._base_url
else:
url = self._url_gen('%s/schema/' % model_name)
if model_name not in ProxyClient._schemas:
try:
self._schema_store[model_name] = self.request(url)
ProxyClient._schemas[model_name] = self._schema_store[model_name]
except Exception, e:
logger.debug(logf({
'message': 'Couldn\'t fetch the schema definition for some reason.',
'schema': model_name,
}))
# try to import namespaced proxies
try:
module = '%s.proxies' % self._namespace.replace('/', '.')
import_module(module)
except ImportError, e:
try:
# guess top level module from proxy class
proxy = ProxyClient._proxies[ProxyClient._proxies.keys()[0]]
module = '%s.%s' % (proxy.__class__.__module__.split('.')[0],
module,)
import_module(module)
except ImportError, e:
logger.debug(logf({
'message': 'Proxies module not found, '
'the namespace might not be structured based on '
'actual class path.',
'module': module,
}))
except Exception, e:
pass
return ProxyClient._schemas.get(model_name, {})
def request(self, url, method='GET'):
nocache = False
if method != 'GET':
logger.debug(logf({
'message': 'Deleting cache...',
'key': url,
}))
cache.delete(url)
nocache = True
else:
logger.debug(logf({
'message': 'Getting cache...',
'key': url,
}))
result = cache.get(url)
if result is not None:
logger.debug(logf({
'message': 'Found in cache.',
'key': url,
'value': result,
}))
return result
# override super to handle HTTP response error
client = self._main_client._store
url = self._url_gen(url)
response = client['session'].request(method, url)
if response.status_code >= 300:
raise exceptions.ProxyException('Failed to fetch resource (%s, %s %s)' % (url,
method,
response.status_code,))
serializer = slumber.serialize.Serializer(default=client['format'])
result = serializer.loads(response.content)
if not nocache:
logger.debug(logf({
'message': 'Setting cache...',
'url': url,
'value': result,
}))
cache.set(url, result)
return result
@property
def proxies(self):
if len(self._proxies.keys()) > 0:
return self._proxies
else:
resources = {}
for resource in self._schemas.keys():
try:
resources[resource] = getattr(self, resource)
except AttributeError, e:
# we don't need api endpoints here
pass
return resources
@classmethod
def get(cls, url, **kwargs):
key = cls.build_client_key(url, **kwargs)
return cls._clients.get(key,
ProxyClient(url,
**kwargs))
@classmethod
def get_by_schema(cls, schema):
for client in cls._clients.values():
if schema in client._schema_store:
return client
return None
@classmethod
def build_base_url(cls, url, **kwargs):
version = '%s/' % kwargs.get('version') if kwargs.get('version') else ''
namespace = '%s/' % kwargs.get('namespace') if kwargs.get('namespace') else ''
return '%s%s' % ('%s%s' % (url, '/' if not url.endswith('/') else ''),
re.sub('//+', '/', '%s/%s' % (version, namespace,)),)
@classmethod
def build_client_key(cls, url, **kwargs):
auth = kwargs.get('auth', None)
base_url = cls.build_base_url(url, **kwargs).rpartition('://')
return '%s%s%s%s' % (base_url[0],
base_url[1],
'%s:%s@' % auth if auth else '',
base_url[2])
class ProxyOptions(object):
abstract = False
api_url = get_setting('API_URL', None)
auth = (get_setting('SUPERUSER_USERNAME', None),
get_setting('SUPERUSER_PASSWORD', None))
client = ProxyClient
model = None
namespace = get_setting('API_NAMESPACE', None)
resource_name = None
version = get_setting('API_VERSION', 'v1')
def __new__(cls, meta=None):
overrides = {}
# handle overrides
if meta:
for override_name in dir(meta):
# no internals please
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
return object.__new__(type('ProxyOptions', (cls,), overrides))
class ProxyMeta(type):
def __new__(cls, name, bases, attrs):
declarative = Response not in bases
if declarative and name in ProxyClient._proxies:
# returns existing proxy object
return ProxyClient._proxies[name]
meta = attrs.pop('Meta', attrs.pop('_meta', None))
abstract = getattr(meta, 'abstract', False)
# create new proxy class
proxy = super(ProxyMeta, cls).__new__(cls, name, bases, attrs)
proxy._meta = ProxyOptions(meta)
proxy._meta.abstract = abstract
if abstract:
return proxy
if not proxy._meta.model:
try:
proxy._meta.model = getattr(import_module('%s.models' % proxy.__module__.rpartition('.')[0]), name)
except Exception, e:
pass
if proxy._meta.api_url:
# return proxy class or object
return proxy() if declarative else proxy
else:
# return model class which implements proxy interfaces
if name not in ProxyClient._models.keys():
model = proxy._meta.model
if not model:
raise exceptions.ProxyException(_('Module seems not to be imported '
'within django application context '
'("%s" model not found). Specify '
'proper model in Meta class.') % name)
# implement proxy mixin
def __init__(obj, *args, **kwargs):
obj.__init__original(*args, **kwargs)
mixin(obj.__class__, proxy)
obj.__module__ = proxy.__module__
obj.__init_proxy__()
model.__init__original = model.__init__
model.__init__ = __init__
ProxyClient._models[name] = model
return ProxyClient._models[name]
class Proxy(object):
__metaclass__ = ProxyMeta
class Meta:
abstract = True
def __init__(self, *args, **kwargs):
if (self._meta.abstract or
(models and isinstance(self, models.Model))):
super(Proxy, self).__init__(*args, **kwargs)
return
if not self._meta.api_url:
raise exceptions.ProxyException(_('"API_URL" not found in settings or '
'"api_url" not found in kwargs.'))
self._client = self._meta.client.get(self._meta.api_url,
version=self._meta.version,
namespace=self._meta.namespace or '/'.join(self.__module__.split('.')[1:-1]),
auth=self._meta.auth if self._meta.auth[0] is not None else None,
proxy=self)
try:
class_name = self.__class__.__name__
self._resource = getattr(self._client,
self._meta.resource_name or class_name,
getattr(self._client,
self._meta.resource_name or class_name.lower(), None))
except AttributeError, e:
logger.debug(logf({
'message': 'API seems not to have endpoint for the resource.',
'resource': class_name,
}))
def __init_proxy__(self):
pass
def __getattr__(self, name):
if name in PK_ID:
return get_pk(self)
if not models or (models and not isinstance(self, models.Model)):
if name is not '_resource':
return getattr(self._resource, name)
raise AttributeError(_('There is no "%s" attribute on this proxy.' % (name,)))
def invalidate(self):
if models and isinstance(self, models.Model):
pass
else:
super(Proxy, self).invalidate()
@property
def model_name(self):
if models and isinstance(self, models.Model):
return self.__class__.__name__.lower()
else:
return self.model._model_name
@property
def data(self):
if models and isinstance(self, models.Model):
dictionary = self.__dict__
for key, value in dictionary.items():
if key.startswith('_'):
del(dictionary[key])
return dictionary
else:
dictionary = dict()
for field in self.model._fields:
dictionary[field] = getattr(self.model, field)
return dictionary
| nk113/tastypie-rpc-proxy | rpc_proxy/proxies.py | Python | bsd-3-clause | 26,445 |
# Authors: Robert Luke <mail@robertluke.net>
#
# License: BSD (3-clause)
from configparser import ConfigParser, RawConfigParser
import glob as glob
import re as re
import os.path as op
import datetime as dt
import json
import numpy as np
from ..base import BaseRaw
from ..utils import _mult_cal_one
from ..constants import FIFF
from ..meas_info import create_info, _format_dig_points
from ...annotations import Annotations
from ...source_space import get_mni_fiducials
from ...transforms import apply_trans, _get_trans
from ...utils import (logger, verbose, fill_doc, warn, _check_fname,
_validate_type, _check_option, _mask_to_onsets_offsets)
@fill_doc
def read_raw_nirx(fname, saturated='annotate', preload=False, verbose=None):
"""Reader for a NIRX fNIRS recording.
Parameters
----------
fname : str
Path to the NIRX data folder or header file.
%(saturated)s
%(preload)s
%(verbose)s
Returns
-------
raw : instance of RawNIRX
A Raw object containing NIRX data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
%(nirx_notes)s
"""
return RawNIRX(fname, saturated, preload, verbose)
def _open(fname):
return open(fname, 'r', encoding='latin-1')
@fill_doc
class RawNIRX(BaseRaw):
"""Raw object from a NIRX fNIRS file.
Parameters
----------
fname : str
Path to the NIRX data folder or header file.
%(saturated)s
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
Notes
-----
%(nirx_notes)s
"""
@verbose
def __init__(self, fname, saturated, preload=False, verbose=None):
from ...externals.pymatreader import read_mat
logger.info('Loading %s' % fname)
_validate_type(fname, 'path-like', 'fname')
_validate_type(saturated, str, 'saturated')
_check_option('saturated', saturated, ('annotate', 'nan', 'ignore'))
fname = str(fname)
if fname.endswith('.hdr'):
fname = op.dirname(op.abspath(fname))
fname = _check_fname(fname, 'read', True, 'fname', need_dir=True)
json_config = glob.glob('%s/*%s' % (fname, "config.json"))
if len(json_config):
is_aurora = True
else:
is_aurora = False
if is_aurora:
# NIRSport2 devices using Aurora software
keys = ('hdr', 'config.json', 'description.json',
'wl1', 'wl2', 'probeInfo.mat', 'tri')
else:
# NIRScout devices and NIRSport1 devices
keys = ('hdr', 'inf', 'set', 'tpl', 'wl1', 'wl2',
'config.txt', 'probeInfo.mat')
n_dat = len(glob.glob('%s/*%s' % (fname, 'dat')))
if n_dat != 1:
warn("A single dat file was expected in the specified path, "
f"but got {n_dat}. This may indicate that the file "
"structure has been modified since the measurement "
"was saved.")
# Check if required files exist and store names for later use
files = dict()
nan_mask = dict()
for key in keys:
files[key] = glob.glob('%s/*%s' % (fname, key))
fidx = 0
if len(files[key]) != 1:
if key not in ('wl1', 'wl2'):
raise RuntimeError(
f'Need one {key} file, got {len(files[key])}')
noidx = np.where(['nosatflags_' in op.basename(x)
for x in files[key]])[0]
if len(noidx) != 1 or len(files[key]) != 2:
raise RuntimeError(
f'Need one nosatflags and one standard {key} file, '
f'got {len(files[key])}')
# Here two files have been found, one that is called
# no sat flags. The nosatflag file has no NaNs in it.
noidx = noidx[0]
if saturated == 'ignore':
# Ignore NaN and return values
fidx = noidx
elif saturated == 'nan':
# Return NaN
fidx = 0 if noidx == 1 else 1
else:
assert saturated == 'annotate' # guaranteed above
fidx = noidx
nan_mask[key] = files[key][0 if noidx == 1 else 1]
files[key] = files[key][fidx]
# Read number of rows/samples of wavelength data
with _open(files['wl1']) as fid:
last_sample = fid.read().count('\n') - 1
# Read header file
# The header file isn't compliant with the configparser. So all the
# text between comments must be removed before passing to parser
with _open(files['hdr']) as f:
hdr_str_all = f.read()
hdr_str = re.sub('#.*?#', '', hdr_str_all, flags=re.DOTALL)
if is_aurora:
hdr_str = re.sub('(\\[DataStructure].*)', '',
hdr_str, flags=re.DOTALL)
hdr = RawConfigParser()
hdr.read_string(hdr_str)
# Check that the file format version is supported
if is_aurora:
# We may need to ease this requirement back
if hdr['GeneralInfo']['Version'] not in ['2021.4.0-34-ge9fdbbc8']:
warn("MNE has not been tested with Aurora version "
f"{hdr['GeneralInfo']['Version']}")
else:
if hdr['GeneralInfo']['NIRStar'] not in ['"15.0"', '"15.2"',
'"15.3"']:
raise RuntimeError('MNE does not support this NIRStar version'
' (%s)' % (hdr['GeneralInfo']['NIRStar'],))
if "NIRScout" not in hdr['GeneralInfo']['Device'] \
and "NIRSport" not in hdr['GeneralInfo']['Device']:
warn("Only import of data from NIRScout devices have been "
"thoroughly tested. You are using a %s device. " %
hdr['GeneralInfo']['Device'])
# Parse required header fields
# Extract measurement date and time
if is_aurora:
datetime_str = hdr['GeneralInfo']['Date']
else:
datetime_str = hdr['GeneralInfo']['Date'] + \
hdr['GeneralInfo']['Time']
meas_date = None
# Several formats have been observed so we try each in turn
for dt_code in ['"%a, %b %d, %Y""%H:%M:%S.%f"',
'"%a, %d %b %Y""%H:%M:%S.%f"']:
try:
meas_date = dt.datetime.strptime(datetime_str, dt_code)
meas_date = meas_date.replace(tzinfo=dt.timezone.utc)
break
except ValueError:
pass
if meas_date is None:
warn("Extraction of measurement date from NIRX file failed. "
"This can be caused by files saved in certain locales. "
"Please report this as a github issue. "
"The date is being set to January 1st, 2000, "
"instead of {}".format(datetime_str))
meas_date = dt.datetime(2000, 1, 1, 0, 0, 0,
tzinfo=dt.timezone.utc)
# Extract frequencies of light used by machine
if is_aurora:
fnirs_wavelengths = [760, 850]
else:
fnirs_wavelengths = [int(s) for s in
re.findall(r'(\d+)',
hdr['ImagingParameters'][
'Wavelengths'])]
# Extract source-detectors
if is_aurora:
sources = re.findall(r'(\d+)-\d+', hdr_str_all.split("\n")[-2])
detectors = re.findall(r'\d+-(\d+)', hdr_str_all.split("\n")[-2])
sources = [int(s) + 1 for s in sources]
detectors = [int(d) + 1 for d in detectors]
else:
sources = np.asarray([int(s) for s in
re.findall(r'(\d+)-\d+:\d+',
hdr['DataStructure']
['S-D-Key'])], int)
detectors = np.asarray([int(s) for s in
re.findall(r'\d+-(\d+):\d+',
hdr['DataStructure']
['S-D-Key'])], int)
# Extract sampling rate
if is_aurora:
samplingrate = float(hdr['GeneralInfo']['Sampling rate'])
else:
samplingrate = float(hdr['ImagingParameters']['SamplingRate'])
# Read participant information file
if is_aurora:
with open(files['description.json']) as f:
inf = json.load(f)
else:
inf = ConfigParser(allow_no_value=True)
inf.read(files['inf'])
inf = inf._sections['Subject Demographics']
# Store subject information from inf file in mne format
# Note: NIRX also records "Study Type", "Experiment History",
# "Additional Notes", "Contact Information" and this information
# is currently discarded
# NIRStar does not record an id, or handedness by default
subject_info = {}
if is_aurora:
names = inf["subject"].split()
else:
names = inf['name'].split()
if len(names) > 0:
subject_info['first_name'] = \
names[0].replace("\"", "")
if len(names) > 1:
subject_info['last_name'] = \
names[-1].replace("\"", "")
if len(names) > 2:
subject_info['middle_name'] = \
names[-2].replace("\"", "")
subject_info['sex'] = inf['gender'].replace("\"", "")
# Recode values
if subject_info['sex'] in {'M', 'Male', '1'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_MALE
elif subject_info['sex'] in {'F', 'Female', '2'}:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_FEMALE
else:
subject_info['sex'] = FIFF.FIFFV_SUBJ_SEX_UNKNOWN
subject_info['birthday'] = (meas_date.year - int(inf['age']),
meas_date.month,
meas_date.day)
# Read information about probe/montage/optodes
# A word on terminology used here:
# Sources produce light
# Detectors measure light
# Sources and detectors are both called optodes
# Each source - detector pair produces a channel
# Channels are defined as the midpoint between source and detector
mat_data = read_mat(files['probeInfo.mat'], uint16_codec=None)
requested_channels = mat_data['probeInfo']['probes']['index_c']
src_locs = mat_data['probeInfo']['probes']['coords_s3'] / 100.
det_locs = mat_data['probeInfo']['probes']['coords_d3'] / 100.
ch_locs = mat_data['probeInfo']['probes']['coords_c3'] / 100.
# These are all in MNI coordinates, so let's transform them to
# the Neuromag head coordinate frame
src_locs, det_locs, ch_locs, mri_head_t = _convert_fnirs_to_head(
'fsaverage', 'mri', 'head', src_locs, det_locs, ch_locs)
# Set up digitization
dig = get_mni_fiducials('fsaverage', verbose=False)
for fid in dig:
fid['r'] = apply_trans(mri_head_t, fid['r'])
fid['coord_frame'] = FIFF.FIFFV_COORD_HEAD
for ii, ch_loc in enumerate(ch_locs, 1):
dig.append(dict(
kind=FIFF.FIFFV_POINT_EEG, # misnomer but probably okay
r=ch_loc,
ident=ii,
coord_frame=FIFF.FIFFV_COORD_HEAD,
))
dig = _format_dig_points(dig)
del mri_head_t
# Determine requested channel indices
# The wl1 and wl2 files include all possible source - detector pairs.
# But most of these are not relevant. We want to extract only the
# subset requested in the probe file
req_ind = np.array([], int)
for req_idx in range(requested_channels.shape[0]):
sd_idx = np.where((sources == requested_channels[req_idx][0]) &
(detectors == requested_channels[req_idx][1]))
req_ind = np.concatenate((req_ind, sd_idx[0]))
req_ind = req_ind.astype(int)
snames = [f"S{sources[idx]}" for idx in req_ind]
dnames = [f"_D{detectors[idx]}" for idx in req_ind]
sdnames = [m + str(n) for m, n in zip(snames, dnames)]
sd1 = [s + ' ' + str(fnirs_wavelengths[0]) for s in sdnames]
sd2 = [s + ' ' + str(fnirs_wavelengths[1]) for s in sdnames]
chnames = [val for pair in zip(sd1, sd2) for val in pair]
# Create mne structure
info = create_info(chnames,
samplingrate,
ch_types='fnirs_cw_amplitude')
info.update(subject_info=subject_info, dig=dig)
info['meas_date'] = meas_date
# Store channel, source, and detector locations
# The channel location is stored in the first 3 entries of loc.
# The source location is stored in the second 3 entries of loc.
# The detector location is stored in the third 3 entries of loc.
# NIRx NIRSite uses MNI coordinates.
# Also encode the light frequency in the structure.
for ch_idx2 in range(requested_channels.shape[0]):
# Find source and store location
src = int(requested_channels[ch_idx2, 0]) - 1
# Find detector and store location
det = int(requested_channels[ch_idx2, 1]) - 1
# Store channel location as midpoint between source and detector.
midpoint = (src_locs[src, :] + det_locs[det, :]) / 2
for ii in range(2):
ch_idx3 = ch_idx2 * 2 + ii
info['chs'][ch_idx3]['loc'][3:6] = src_locs[src, :]
info['chs'][ch_idx3]['loc'][6:9] = det_locs[det, :]
info['chs'][ch_idx3]['loc'][:3] = midpoint
info['chs'][ch_idx3]['loc'][9] = fnirs_wavelengths[ii]
info['chs'][ch_idx3]['coord_frame'] = FIFF.FIFFV_COORD_HEAD
# Extract the start/stop numbers for samples in the CSV. In theory the
# sample bounds should just be 10 * the number of channels, but some
# files have mixed \n and \n\r endings (!) so we can't rely on it, and
# instead make a single pass over the entire file at the beginning so
# that we know how to seek and read later.
bounds = dict()
for key in ('wl1', 'wl2'):
offset = 0
bounds[key] = [offset]
with open(files[key], 'rb') as fid:
for line in fid:
offset += len(line)
bounds[key].append(offset)
assert offset == fid.tell()
# Extras required for reading data
raw_extras = {
'sd_index': req_ind,
'files': files,
'bounds': bounds,
'nan_mask': nan_mask,
}
# Get our saturated mask
annot_mask = None
for ki, key in enumerate(('wl1', 'wl2')):
if nan_mask.get(key, None) is None:
continue
mask = np.isnan(_read_csv_rows_cols(
nan_mask[key], 0, last_sample + 1, req_ind, {0: 0, 1: None}).T)
if saturated == 'nan':
nan_mask[key] = mask
else:
assert saturated == 'annotate'
if annot_mask is None:
annot_mask = np.zeros(
(len(info['ch_names']) // 2, last_sample + 1), bool)
annot_mask |= mask
nan_mask[key] = None # shouldn't need again
super(RawNIRX, self).__init__(
info, preload, filenames=[fname], last_samps=[last_sample],
raw_extras=[raw_extras], verbose=verbose)
# make onset/duration/description
onset, duration, description, ch_names = list(), list(), list(), list()
if annot_mask is not None:
for ci, mask in enumerate(annot_mask):
on, dur = _mask_to_onsets_offsets(mask)
on = on / info['sfreq']
dur = dur / info['sfreq']
dur -= on
onset.extend(on)
duration.extend(dur)
description.extend(['BAD_SATURATED'] * len(on))
ch_names.extend([self.ch_names[2 * ci:2 * ci + 2]] * len(on))
# Read triggers from event file
if not is_aurora:
files['tri'] = files['hdr'][:-3] + 'evt'
if op.isfile(files['tri']):
with _open(files['tri']) as fid:
t = [re.findall(r'(\d+)', line) for line in fid]
for t_ in t:
if is_aurora:
trigger_frame = float(t_[7])
desc = float(t_[8])
else:
binary_value = ''.join(t_[1:])[::-1]
desc = float(int(binary_value, 2))
trigger_frame = float(t_[0])
onset.append(trigger_frame / samplingrate)
duration.append(1.) # No duration info stored in files
description.append(desc)
ch_names.append(list())
annot = Annotations(onset, duration, description, ch_names=ch_names)
self.set_annotations(annot)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a segment of data from a file.
The NIRX machine records raw data as two different wavelengths.
The returned data interleaves the wavelengths.
"""
sd_index = self._raw_extras[fi]['sd_index']
wls = list()
for key in ('wl1', 'wl2'):
d = _read_csv_rows_cols(
self._raw_extras[fi]['files'][key],
start, stop, sd_index,
self._raw_extras[fi]['bounds'][key]).T
nan_mask = self._raw_extras[fi]['nan_mask'].get(key, None)
if nan_mask is not None:
d[nan_mask[:, start:stop]] = np.nan
wls.append(d)
# TODO: Make this more efficient by only indexing above what we need.
# For now let's just construct the full data matrix and index.
# Interleave wavelength 1 and 2 to match channel names:
this_data = np.zeros((len(wls[0]) * 2, stop - start))
this_data[0::2, :] = wls[0]
this_data[1::2, :] = wls[1]
_mult_cal_one(data, this_data, idx, cals, mult)
return data
def _read_csv_rows_cols(fname, start, stop, cols, bounds,
sep=' ', replace=None):
with open(fname, 'rb') as fid:
fid.seek(bounds[start])
args = list()
if bounds[1] is not None:
args.append(bounds[stop] - bounds[start])
data = fid.read(*args).decode('latin-1')
if replace is not None:
data = replace(data)
x = np.fromstring(data, float, sep=sep)
x.shape = (stop - start, -1)
x = x[:, cols]
return x
def _convert_fnirs_to_head(trans, fro, to, src_locs, det_locs, ch_locs):
mri_head_t, _ = _get_trans(trans, fro, to)
src_locs = apply_trans(mri_head_t, src_locs)
det_locs = apply_trans(mri_head_t, det_locs)
ch_locs = apply_trans(mri_head_t, ch_locs)
return src_locs, det_locs, ch_locs, mri_head_t
| rkmaddox/mne-python | mne/io/nirx/nirx.py | Python | bsd-3-clause | 19,728 |
#!/usr/bin/python
# Copyright (c) 2008,2010,2011,2012,2013 Alexander Belchenko
# All rights reserved.
#
# Redistribution and use in source and binary forms,
# with or without modification, are permitted provided
# that the following conditions are met:
#
# * Redistributions of source code must retain
# the above copyright notice, this list of conditions
# and the following disclaimer.
# * Redistributions in binary form must reproduce
# the above copyright notice, this list of conditions
# and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the author nor the names
# of its contributors may be used to endorse
# or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
# AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,
# OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Show content of hex file as hexdump."""
VERSION = '1.5.1'
USAGE = '''hex2dump: show content of hex file as hexdump.
Usage:
python hex2dump.py [options] HEXFILE
Options:
-h, --help this help message.
-v, --version version info.
-r, --range=START:END specify address range for dumping
(ascii hex value).
Range can be in form 'START:' or ':END'.
Arguments:
HEXFILE name of hex file for processing (use '-' to read
from stdin)
'''
import sys
def hex2dump(hexfile, start=None, end=None):
import intelhex
if hexfile == '-':
hexfile = sys.stdin
try:
ih = intelhex.IntelHex(hexfile)
except (IOError, intelhex.IntelHexError), e:
sys.stderr.write('Error reading file: %s\n' % e)
return 1
if not (start is None and end is None):
ih = ih[slice(start,end)]
ih.dump()
return 0
def main(argv=None):
import getopt
if argv is None:
argv = sys.argv[1:]
start = None
end = None
try:
opts, args = getopt.getopt(sys.argv[1:], "hvp:r:",
["help", "version", "range="])
for o, a in opts:
if o in ("-h", "--help"):
print(USAGE)
return 0
elif o in ("-v", "--version"):
print(VERSION)
return 0
elif o in ("-r", "--range"):
try:
l = a.split(":")
if l[0] != '':
start = int(l[0], 16)
if l[1] != '':
end = int(l[1], 16)
except:
raise getopt.GetoptError('Bad range value(s)')
if not args:
raise getopt.GetoptError('Hex file is not specified')
if len(args) > 1:
raise getopt.GetoptError('Too many arguments')
except getopt.GetoptError, msg:
txt = 'ERROR: '+str(msg) # that's required to get not-so-dumb result from 2to3 tool
print(txt)
print(USAGE)
return 2
try:
return hex2dump(args[0], start, end)
except IOError, e:
import errno
if e.errno not in (0, errno.EPIPE):
raise
if __name__ == '__main__':
import sys
sys.exit(main())
| mhubig/intelhex | scripts/hex2dump.py | Python | bsd-3-clause | 3,960 |
import time
import re
from django.http import HttpResponseRedirect
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.conf import settings
# In-memory caches are used since different processes do not necessarily
# exhibit the same response times, even though they may share a caching backend
# like memcached. We also don't have to be concerned with thread safety so no
# need to use LocMemCache.
_cache = {}
_request_response_times = {}
SERVER_BUSY_URL = reverse(
settings.DJANGO_SCALER.get('server_busy_url_name', 'server-busy')
)
def redirect_n_slowest_dummy():
return 0
def redirect_n_slowest_from_cache():
"""Simple retrieval from whatever cache is in use"""
return cache.get('django_scaler_n_slowest')
def redirect_percentage_slowest_dummy():
return 0
def redirect_percentage_slowest_from_cache():
return cache.get('django_scaler_percentage_slowest')
def redirect_regexes_dummy():
return []
def redirect_regexes_from_cache():
return cache.get('django_scaler_regexes')
class ScalerMiddleware:
"""Add as the first middleware in your settings file"""
def process_request(self, request):
# Ajax requests are not subject to scaling. Busy page is exempt from
# scaling.
if request.is_ajax() or request.META['PATH_INFO'] == SERVER_BUSY_URL:
return
# If a n_slowest or percentage_slowest is provided then forcefully
# redirect the n slowest or percentage_slowest requests. This allows
# external processes to easily instruct us to scale back.
n_slowest = settings.DJANGO_SCALER.get(
'redirect_n_slowest_function', redirect_n_slowest_dummy
)()
percentage_slowest = settings.DJANGO_SCALER.get(
'redirect_percentage_slowest_function',
redirect_percentage_slowest_dummy
)()
regexes = settings.DJANGO_SCALER.get(
'redirect_regexes_function',
redirect_regexes_dummy
)()
if not request.is_ajax():
if n_slowest or percentage_slowest:
# Sort by slowest reversed
paths = sorted(
_request_response_times,
key=_request_response_times.__getitem__,
reverse=True
)
if n_slowest:
li = paths[:n_slowest]
if request.META['PATH_INFO'] in li:
return HttpResponseRedirect(SERVER_BUSY_URL)
if percentage_slowest:
n = int(round(percentage_slowest / 100.0 * len(paths)))
li = paths[:n]
if request.META['PATH_INFO'] in li:
return HttpResponseRedirect(SERVER_BUSY_URL)
if regexes:
for regex in regexes:
m = re.match(r'%s' % regex, request.META['PATH_INFO'])
if m is not None:
return HttpResponseRedirect(SERVER_BUSY_URL)
# On to automatic redirection
now = time.time()
# Marker for process_response
setattr(request, '_django_scaler_stamp', now)
# Cache key uses path info
prefix = request.META['PATH_INFO'] + '-scaler-'
# Fetch values
key_stamp = prefix + 'stamp'
key_hits = prefix + 'hits'
key_trend = prefix + 'trend'
key_redir = prefix + 'redir'
stamp = _cache.get(key_stamp, 0)
hits = _cache.get(key_hits, 0)
trend = _cache.get(key_trend, [])
redir = _cache.get(key_redir, now)
# Nothing to do if not enough hits yet
if hits > settings.DJANGO_SCALER.get('trend_size', 100):
avg = stamp * 1.0 / hits
# Update request response times dictionary
_request_response_times[request.META['PATH_INFO']] = avg
# If trend is X slower than average then redirect, unless
# enough time has passed to attempt processing.
slow_threshold = settings.DJANGO_SCALER.get(
'slow_threshold', 4.0
)
if sum(trend) * 1.0 / len(trend) > avg * slow_threshold:
# Has enough time passed to allow the request?
redirect_for = settings.DJANGO_SCALER.get(
'redirect_for', 60
)
if now - redir > redirect_for:
# Yes, enough time has passed
# Clear time of last redirect
try:
del _cache[key_redir]
except KeyError:
pass
# Clear trend since it currently stores slow response
# times. We want a fresh start.
_cache[key_trend] = []
else:
# No, not enough time has passed. Keep redirecting.
# Remove marker so process_response does not store data
delattr(request, '_django_scaler_stamp')
# Set time of last redirect if it has not been set
_cache.setdefault(key_redir, now)
return HttpResponseRedirect(SERVER_BUSY_URL)
def process_response(self, request, response):
t = getattr(request, '_django_scaler_stamp', None)
# Anything to do?
if t is not None:
# Diff in milliseconds
diff = int((time.time() - t) * 1000)
# Fetch values
prefix = request.META['PATH_INFO'] + '-scaler-'
key_stamp = prefix + 'stamp'
key_hits = prefix + 'hits'
key_trend = prefix + 'trend'
stamp = _cache.get(key_stamp, 0)
hits = _cache.get(key_hits, 0)
trend = _cache.get(key_trend, [])
# Set values
_cache[key_stamp] = stamp + diff
_cache[key_hits] = hits + 1
trend_size = settings.DJANGO_SCALER.get('trend_size', 100)
_cache[key_trend] = (trend + [diff])[-trend_size:]
return response
| praekelt/django-scaler | scaler/middleware.py | Python | bsd-3-clause | 6,116 |
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.2
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError("Python 2.7 or later required")
# Import the low-level C/C++ module
if __package__ or "." in __name__:
from . import _sparsemat
else:
import _sparsemat
try:
import builtins as __builtin__
except ImportError:
import __builtin__
_swig_new_instance_method = _sparsemat.SWIG_PyInstanceMethod_New
_swig_new_static_method = _sparsemat.SWIG_PyStaticMethod_New
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
import weakref
import mfem._par.array
import mfem._par.mem_manager
import mfem._par.globals
import mfem._par.vector
import mfem._par.operators
import mfem._par.matrix
import mfem._par.densemat
def RAP_P(A, R, ORAP):
r"""RAP_P(SparseMatrix A, SparseMatrix R, SparseMatrix ORAP) -> SparseMatrix"""
return _sparsemat.RAP_P(A, R, ORAP)
RAP_P = _sparsemat.RAP_P
def RAP_R(Rt, A, P):
r"""RAP_R(SparseMatrix Rt, SparseMatrix A, SparseMatrix P) -> SparseMatrix"""
return _sparsemat.RAP_R(Rt, A, P)
RAP_R = _sparsemat.RAP_R
def OperatorPtr2SparseMatrix(op):
r"""OperatorPtr2SparseMatrix(mfem::OperatorPtr op) -> SparseMatrix"""
return _sparsemat.OperatorPtr2SparseMatrix(op)
OperatorPtr2SparseMatrix = _sparsemat.OperatorPtr2SparseMatrix
def OperatorHandle2SparseMatrix(op):
r"""OperatorHandle2SparseMatrix(mfem::OperatorHandle op) -> SparseMatrix"""
return _sparsemat.OperatorHandle2SparseMatrix(op)
OperatorHandle2SparseMatrix = _sparsemat.OperatorHandle2SparseMatrix
class RowNode(object):
r"""Proxy of C++ mfem::RowNode class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
Value = property(_sparsemat.RowNode_Value_get, _sparsemat.RowNode_Value_set, doc=r"""Value : double""")
Prev = property(_sparsemat.RowNode_Prev_get, _sparsemat.RowNode_Prev_set, doc=r"""Prev : p.mfem::RowNode""")
Column = property(_sparsemat.RowNode_Column_get, _sparsemat.RowNode_Column_set, doc=r"""Column : int""")
def __init__(self):
r"""__init__(RowNode self) -> RowNode"""
_sparsemat.RowNode_swiginit(self, _sparsemat.new_RowNode())
__swig_destroy__ = _sparsemat.delete_RowNode
# Register RowNode in _sparsemat:
_sparsemat.RowNode_swigregister(RowNode)
class SparseMatrix(mfem._par.matrix.AbstractSparseMatrix):
r"""Proxy of C++ mfem::SparseMatrix class."""
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc="The membership flag")
__repr__ = _swig_repr
def __init__(self, *args):
r"""
__init__(SparseMatrix self) -> SparseMatrix
__init__(SparseMatrix self, int nrows, int ncols=-1) -> SparseMatrix
__init__(SparseMatrix self, int * i) -> SparseMatrix
__init__(SparseMatrix self, int * i, bool ownij, bool owna, bool issorted) -> SparseMatrix
__init__(SparseMatrix self, int nrows, int ncols, int rowsize) -> SparseMatrix
__init__(SparseMatrix self, SparseMatrix mat, bool copy_graph=True, mfem::MemoryType mt=PRESERVE) -> SparseMatrix
__init__(SparseMatrix self, Vector v) -> SparseMatrix
"""
import numpy as np
from scipy.sparse import csr_matrix
if len(args) == 1 and isinstance(args[0], csr_matrix):
csr = args[0]
if np.real(csr).dtype != 'float64':
csr = csr.astype('float64')
i = np.ascontiguousarray(csr.indptr)
j = np.ascontiguousarray(csr.indices)
data = np.ascontiguousarray(csr.data)
m, n = csr.shape
this = _sparsemat.new_SparseMatrix([i, j, data, m, n])
try:
self.this.append(this)
except __builtin__.Exception:
self.this = this
_sparsemat.SparseMatrix_SetGraphOwner(self, False)
_sparsemat.SparseMatrix_SetDataOwner(self, False)
self._i_data = i
self._j_data = j
self._d_data = data
return
_sparsemat.SparseMatrix_swiginit(self, _sparsemat.new_SparseMatrix(*args))
def UseGPUSparse(self, useGPUSparse_=True):
r"""UseGPUSparse(SparseMatrix self, bool useGPUSparse_=True)"""
return _sparsemat.SparseMatrix_UseGPUSparse(self, useGPUSparse_)
UseGPUSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_UseGPUSparse)
def UseCuSparse(self, useCuSparse_=True):
r"""UseCuSparse(SparseMatrix self, bool useCuSparse_=True)"""
return _sparsemat.SparseMatrix_UseCuSparse(self, useCuSparse_)
UseCuSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_UseCuSparse)
def MakeRef(self, master):
r"""MakeRef(SparseMatrix self, SparseMatrix master)"""
return _sparsemat.SparseMatrix_MakeRef(self, master)
MakeRef = _swig_new_instance_method(_sparsemat.SparseMatrix_MakeRef)
def Size(self):
r"""Size(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_Size(self)
Size = _swig_new_instance_method(_sparsemat.SparseMatrix_Size)
def Clear(self):
r"""Clear(SparseMatrix self)"""
return _sparsemat.SparseMatrix_Clear(self)
Clear = _swig_new_instance_method(_sparsemat.SparseMatrix_Clear)
def ClearGPUSparse(self):
r"""ClearGPUSparse(SparseMatrix self)"""
return _sparsemat.SparseMatrix_ClearGPUSparse(self)
ClearGPUSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_ClearGPUSparse)
def ClearCuSparse(self):
r"""ClearCuSparse(SparseMatrix self)"""
return _sparsemat.SparseMatrix_ClearCuSparse(self)
ClearCuSparse = _swig_new_instance_method(_sparsemat.SparseMatrix_ClearCuSparse)
def Empty(self):
r"""Empty(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_Empty(self)
Empty = _swig_new_instance_method(_sparsemat.SparseMatrix_Empty)
def GetI(self, *args):
r"""
GetI(SparseMatrix self) -> int
GetI(SparseMatrix self) -> int const *
"""
return _sparsemat.SparseMatrix_GetI(self, *args)
GetI = _swig_new_instance_method(_sparsemat.SparseMatrix_GetI)
def GetJ(self, *args):
r"""
GetJ(SparseMatrix self) -> int
GetJ(SparseMatrix self) -> int const *
"""
return _sparsemat.SparseMatrix_GetJ(self, *args)
GetJ = _swig_new_instance_method(_sparsemat.SparseMatrix_GetJ)
def GetData(self, *args):
r"""
GetData(SparseMatrix self) -> double
GetData(SparseMatrix self) -> double const *
"""
return _sparsemat.SparseMatrix_GetData(self, *args)
GetData = _swig_new_instance_method(_sparsemat.SparseMatrix_GetData)
def GetMemoryI(self, *args):
r"""
GetMemoryI(SparseMatrix self) -> mfem::Memory< int >
GetMemoryI(SparseMatrix self) -> mfem::Memory< int > const &
"""
return _sparsemat.SparseMatrix_GetMemoryI(self, *args)
GetMemoryI = _swig_new_instance_method(_sparsemat.SparseMatrix_GetMemoryI)
def ReadI(self, on_dev=True):
r"""ReadI(SparseMatrix self, bool on_dev=True) -> int const *"""
return _sparsemat.SparseMatrix_ReadI(self, on_dev)
ReadI = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadI)
def WriteI(self, on_dev=True):
r"""WriteI(SparseMatrix self, bool on_dev=True) -> int *"""
return _sparsemat.SparseMatrix_WriteI(self, on_dev)
WriteI = _swig_new_instance_method(_sparsemat.SparseMatrix_WriteI)
def ReadWriteI(self, on_dev=True):
r"""ReadWriteI(SparseMatrix self, bool on_dev=True) -> int *"""
return _sparsemat.SparseMatrix_ReadWriteI(self, on_dev)
ReadWriteI = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadWriteI)
def HostReadI(self):
r"""HostReadI(SparseMatrix self) -> int const *"""
return _sparsemat.SparseMatrix_HostReadI(self)
HostReadI = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadI)
def HostWriteI(self):
r"""HostWriteI(SparseMatrix self) -> int *"""
return _sparsemat.SparseMatrix_HostWriteI(self)
HostWriteI = _swig_new_instance_method(_sparsemat.SparseMatrix_HostWriteI)
def HostReadWriteI(self):
r"""HostReadWriteI(SparseMatrix self) -> int *"""
return _sparsemat.SparseMatrix_HostReadWriteI(self)
HostReadWriteI = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadWriteI)
def GetMemoryJ(self, *args):
r"""
GetMemoryJ(SparseMatrix self) -> mfem::Memory< int >
GetMemoryJ(SparseMatrix self) -> mfem::Memory< int > const &
"""
return _sparsemat.SparseMatrix_GetMemoryJ(self, *args)
GetMemoryJ = _swig_new_instance_method(_sparsemat.SparseMatrix_GetMemoryJ)
def ReadJ(self, on_dev=True):
r"""ReadJ(SparseMatrix self, bool on_dev=True) -> int const *"""
return _sparsemat.SparseMatrix_ReadJ(self, on_dev)
ReadJ = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadJ)
def WriteJ(self, on_dev=True):
r"""WriteJ(SparseMatrix self, bool on_dev=True) -> int *"""
return _sparsemat.SparseMatrix_WriteJ(self, on_dev)
WriteJ = _swig_new_instance_method(_sparsemat.SparseMatrix_WriteJ)
def ReadWriteJ(self, on_dev=True):
r"""ReadWriteJ(SparseMatrix self, bool on_dev=True) -> int *"""
return _sparsemat.SparseMatrix_ReadWriteJ(self, on_dev)
ReadWriteJ = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadWriteJ)
def HostReadJ(self):
r"""HostReadJ(SparseMatrix self) -> int const *"""
return _sparsemat.SparseMatrix_HostReadJ(self)
HostReadJ = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadJ)
def HostWriteJ(self):
r"""HostWriteJ(SparseMatrix self) -> int *"""
return _sparsemat.SparseMatrix_HostWriteJ(self)
HostWriteJ = _swig_new_instance_method(_sparsemat.SparseMatrix_HostWriteJ)
def HostReadWriteJ(self):
r"""HostReadWriteJ(SparseMatrix self) -> int *"""
return _sparsemat.SparseMatrix_HostReadWriteJ(self)
HostReadWriteJ = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadWriteJ)
def GetMemoryData(self, *args):
r"""
GetMemoryData(SparseMatrix self) -> mfem::Memory< double >
GetMemoryData(SparseMatrix self) -> mfem::Memory< double > const &
"""
return _sparsemat.SparseMatrix_GetMemoryData(self, *args)
GetMemoryData = _swig_new_instance_method(_sparsemat.SparseMatrix_GetMemoryData)
def ReadData(self, on_dev=True):
r"""ReadData(SparseMatrix self, bool on_dev=True) -> double const *"""
return _sparsemat.SparseMatrix_ReadData(self, on_dev)
ReadData = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadData)
def WriteData(self, on_dev=True):
r"""WriteData(SparseMatrix self, bool on_dev=True) -> double *"""
return _sparsemat.SparseMatrix_WriteData(self, on_dev)
WriteData = _swig_new_instance_method(_sparsemat.SparseMatrix_WriteData)
def ReadWriteData(self, on_dev=True):
r"""ReadWriteData(SparseMatrix self, bool on_dev=True) -> double *"""
return _sparsemat.SparseMatrix_ReadWriteData(self, on_dev)
ReadWriteData = _swig_new_instance_method(_sparsemat.SparseMatrix_ReadWriteData)
def HostReadData(self):
r"""HostReadData(SparseMatrix self) -> double const *"""
return _sparsemat.SparseMatrix_HostReadData(self)
HostReadData = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadData)
def HostWriteData(self):
r"""HostWriteData(SparseMatrix self) -> double *"""
return _sparsemat.SparseMatrix_HostWriteData(self)
HostWriteData = _swig_new_instance_method(_sparsemat.SparseMatrix_HostWriteData)
def HostReadWriteData(self):
r"""HostReadWriteData(SparseMatrix self) -> double *"""
return _sparsemat.SparseMatrix_HostReadWriteData(self)
HostReadWriteData = _swig_new_instance_method(_sparsemat.SparseMatrix_HostReadWriteData)
def RowSize(self, i):
r"""RowSize(SparseMatrix self, int const i) -> int"""
return _sparsemat.SparseMatrix_RowSize(self, i)
RowSize = _swig_new_instance_method(_sparsemat.SparseMatrix_RowSize)
def MaxRowSize(self):
r"""MaxRowSize(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_MaxRowSize(self)
MaxRowSize = _swig_new_instance_method(_sparsemat.SparseMatrix_MaxRowSize)
def GetRowColumns(self, *args):
r"""
GetRowColumns(SparseMatrix self, int const row) -> int
GetRowColumns(SparseMatrix self, int const row) -> int const *
"""
return _sparsemat.SparseMatrix_GetRowColumns(self, *args)
GetRowColumns = _swig_new_instance_method(_sparsemat.SparseMatrix_GetRowColumns)
def GetRowEntries(self, *args):
r"""
GetRowEntries(SparseMatrix self, int const row) -> double
GetRowEntries(SparseMatrix self, int const row) -> double const *
"""
return _sparsemat.SparseMatrix_GetRowEntries(self, *args)
GetRowEntries = _swig_new_instance_method(_sparsemat.SparseMatrix_GetRowEntries)
def SetWidth(self, width_=-1):
r"""SetWidth(SparseMatrix self, int width_=-1)"""
return _sparsemat.SparseMatrix_SetWidth(self, width_)
SetWidth = _swig_new_instance_method(_sparsemat.SparseMatrix_SetWidth)
def ActualWidth(self):
r"""ActualWidth(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_ActualWidth(self)
ActualWidth = _swig_new_instance_method(_sparsemat.SparseMatrix_ActualWidth)
def SortColumnIndices(self):
r"""SortColumnIndices(SparseMatrix self)"""
return _sparsemat.SparseMatrix_SortColumnIndices(self)
SortColumnIndices = _swig_new_instance_method(_sparsemat.SparseMatrix_SortColumnIndices)
def MoveDiagonalFirst(self):
r"""MoveDiagonalFirst(SparseMatrix self)"""
return _sparsemat.SparseMatrix_MoveDiagonalFirst(self)
MoveDiagonalFirst = _swig_new_instance_method(_sparsemat.SparseMatrix_MoveDiagonalFirst)
def Elem(self, *args):
r"""
Elem(SparseMatrix self, int i, int j) -> double
Elem(SparseMatrix self, int i, int j) -> double const &
"""
return _sparsemat.SparseMatrix_Elem(self, *args)
Elem = _swig_new_instance_method(_sparsemat.SparseMatrix_Elem)
def __call__(self, *args):
r"""
__call__(SparseMatrix self, int i, int j) -> double
__call__(SparseMatrix self, int i, int j) -> double const &
"""
return _sparsemat.SparseMatrix___call__(self, *args)
__call__ = _swig_new_instance_method(_sparsemat.SparseMatrix___call__)
def GetDiag(self, d):
r"""GetDiag(SparseMatrix self, Vector d)"""
return _sparsemat.SparseMatrix_GetDiag(self, d)
GetDiag = _swig_new_instance_method(_sparsemat.SparseMatrix_GetDiag)
def ToDenseMatrix(self, *args):
r"""
ToDenseMatrix(SparseMatrix self) -> DenseMatrix
ToDenseMatrix(SparseMatrix self, DenseMatrix B)
"""
return _sparsemat.SparseMatrix_ToDenseMatrix(self, *args)
ToDenseMatrix = _swig_new_instance_method(_sparsemat.SparseMatrix_ToDenseMatrix)
def GetMemoryClass(self):
r"""GetMemoryClass(SparseMatrix self) -> mfem::MemoryClass"""
return _sparsemat.SparseMatrix_GetMemoryClass(self)
GetMemoryClass = _swig_new_instance_method(_sparsemat.SparseMatrix_GetMemoryClass)
def Mult(self, x, y):
r"""Mult(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_Mult(self, x, y)
Mult = _swig_new_instance_method(_sparsemat.SparseMatrix_Mult)
def AddMult(self, x, y, a=1.0):
r"""AddMult(SparseMatrix self, Vector x, Vector y, double const a=1.0)"""
return _sparsemat.SparseMatrix_AddMult(self, x, y, a)
AddMult = _swig_new_instance_method(_sparsemat.SparseMatrix_AddMult)
def MultTranspose(self, x, y):
r"""MultTranspose(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_MultTranspose(self, x, y)
MultTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_MultTranspose)
def AddMultTranspose(self, x, y, a=1.0):
r"""AddMultTranspose(SparseMatrix self, Vector x, Vector y, double const a=1.0)"""
return _sparsemat.SparseMatrix_AddMultTranspose(self, x, y, a)
AddMultTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_AddMultTranspose)
def BuildTranspose(self):
r"""BuildTranspose(SparseMatrix self)"""
return _sparsemat.SparseMatrix_BuildTranspose(self)
BuildTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_BuildTranspose)
def ResetTranspose(self):
r"""ResetTranspose(SparseMatrix self)"""
return _sparsemat.SparseMatrix_ResetTranspose(self)
ResetTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_ResetTranspose)
def PartMult(self, rows, x, y):
r"""PartMult(SparseMatrix self, intArray rows, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_PartMult(self, rows, x, y)
PartMult = _swig_new_instance_method(_sparsemat.SparseMatrix_PartMult)
def PartAddMult(self, rows, x, y, a=1.0):
r"""PartAddMult(SparseMatrix self, intArray rows, Vector x, Vector y, double const a=1.0)"""
return _sparsemat.SparseMatrix_PartAddMult(self, rows, x, y, a)
PartAddMult = _swig_new_instance_method(_sparsemat.SparseMatrix_PartAddMult)
def BooleanMult(self, x, y):
r"""BooleanMult(SparseMatrix self, intArray x, intArray y)"""
return _sparsemat.SparseMatrix_BooleanMult(self, x, y)
BooleanMult = _swig_new_instance_method(_sparsemat.SparseMatrix_BooleanMult)
def BooleanMultTranspose(self, x, y):
r"""BooleanMultTranspose(SparseMatrix self, intArray x, intArray y)"""
return _sparsemat.SparseMatrix_BooleanMultTranspose(self, x, y)
BooleanMultTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_BooleanMultTranspose)
def AbsMult(self, x, y):
r"""AbsMult(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_AbsMult(self, x, y)
AbsMult = _swig_new_instance_method(_sparsemat.SparseMatrix_AbsMult)
def AbsMultTranspose(self, x, y):
r"""AbsMultTranspose(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_AbsMultTranspose(self, x, y)
AbsMultTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_AbsMultTranspose)
def InnerProduct(self, x, y):
r"""InnerProduct(SparseMatrix self, Vector x, Vector y) -> double"""
return _sparsemat.SparseMatrix_InnerProduct(self, x, y)
InnerProduct = _swig_new_instance_method(_sparsemat.SparseMatrix_InnerProduct)
def GetRowSums(self, x):
r"""GetRowSums(SparseMatrix self, Vector x)"""
return _sparsemat.SparseMatrix_GetRowSums(self, x)
GetRowSums = _swig_new_instance_method(_sparsemat.SparseMatrix_GetRowSums)
def GetRowNorml1(self, irow):
r"""GetRowNorml1(SparseMatrix self, int irow) -> double"""
return _sparsemat.SparseMatrix_GetRowNorml1(self, irow)
GetRowNorml1 = _swig_new_instance_method(_sparsemat.SparseMatrix_GetRowNorml1)
def Inverse(self):
r"""Inverse(SparseMatrix self) -> MatrixInverse"""
return _sparsemat.SparseMatrix_Inverse(self)
Inverse = _swig_new_instance_method(_sparsemat.SparseMatrix_Inverse)
def EliminateRow(self, *args):
r"""
EliminateRow(SparseMatrix self, int row, double const sol, Vector rhs)
EliminateRow(SparseMatrix self, int row, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ZERO)
"""
return _sparsemat.SparseMatrix_EliminateRow(self, *args)
EliminateRow = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateRow)
def EliminateCol(self, *args, **kwargs):
r"""EliminateCol(SparseMatrix self, int col, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ZERO)"""
return _sparsemat.SparseMatrix_EliminateCol(self, *args, **kwargs)
EliminateCol = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateCol)
def EliminateCols(self, *args):
r"""
EliminateCols(SparseMatrix self, intArray cols, Vector x=None, Vector b=None)
EliminateCols(SparseMatrix self, intArray col_marker, SparseMatrix Ae)
"""
return _sparsemat.SparseMatrix_EliminateCols(self, *args)
EliminateCols = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateCols)
def EliminateRowColMultipleRHS(self, *args, **kwargs):
r"""EliminateRowColMultipleRHS(SparseMatrix self, int rc, Vector sol, DenseMatrix rhs, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ONE)"""
return _sparsemat.SparseMatrix_EliminateRowColMultipleRHS(self, *args, **kwargs)
EliminateRowColMultipleRHS = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateRowColMultipleRHS)
def EliminateRowColDiag(self, rc, value):
r"""EliminateRowColDiag(SparseMatrix self, int rc, double value)"""
return _sparsemat.SparseMatrix_EliminateRowColDiag(self, rc, value)
EliminateRowColDiag = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateRowColDiag)
def EliminateRowCol(self, *args):
r"""
EliminateRowCol(SparseMatrix self, int rc, double const sol, Vector rhs, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ONE)
EliminateRowCol(SparseMatrix self, int rc, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ONE)
EliminateRowCol(SparseMatrix self, int rc, SparseMatrix Ae, mfem::Operator::DiagonalPolicy dpolicy=DIAG_ONE)
"""
return _sparsemat.SparseMatrix_EliminateRowCol(self, *args)
EliminateRowCol = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateRowCol)
def SetDiagIdentity(self):
r"""SetDiagIdentity(SparseMatrix self)"""
return _sparsemat.SparseMatrix_SetDiagIdentity(self)
SetDiagIdentity = _swig_new_instance_method(_sparsemat.SparseMatrix_SetDiagIdentity)
def EliminateZeroRows(self, threshold=1e-12):
r"""EliminateZeroRows(SparseMatrix self, double const threshold=1e-12)"""
return _sparsemat.SparseMatrix_EliminateZeroRows(self, threshold)
EliminateZeroRows = _swig_new_instance_method(_sparsemat.SparseMatrix_EliminateZeroRows)
def Gauss_Seidel_forw(self, x, y):
r"""Gauss_Seidel_forw(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_Gauss_Seidel_forw(self, x, y)
Gauss_Seidel_forw = _swig_new_instance_method(_sparsemat.SparseMatrix_Gauss_Seidel_forw)
def Gauss_Seidel_back(self, x, y):
r"""Gauss_Seidel_back(SparseMatrix self, Vector x, Vector y)"""
return _sparsemat.SparseMatrix_Gauss_Seidel_back(self, x, y)
Gauss_Seidel_back = _swig_new_instance_method(_sparsemat.SparseMatrix_Gauss_Seidel_back)
def GetJacobiScaling(self):
r"""GetJacobiScaling(SparseMatrix self) -> double"""
return _sparsemat.SparseMatrix_GetJacobiScaling(self)
GetJacobiScaling = _swig_new_instance_method(_sparsemat.SparseMatrix_GetJacobiScaling)
def Jacobi(self, b, x0, x1, sc, use_abs_diag=False):
r"""Jacobi(SparseMatrix self, Vector b, Vector x0, Vector x1, double sc, bool use_abs_diag=False)"""
return _sparsemat.SparseMatrix_Jacobi(self, b, x0, x1, sc, use_abs_diag)
Jacobi = _swig_new_instance_method(_sparsemat.SparseMatrix_Jacobi)
def DiagScale(self, b, x, sc=1.0, use_abs_diag=False):
r"""DiagScale(SparseMatrix self, Vector b, Vector x, double sc=1.0, bool use_abs_diag=False)"""
return _sparsemat.SparseMatrix_DiagScale(self, b, x, sc, use_abs_diag)
DiagScale = _swig_new_instance_method(_sparsemat.SparseMatrix_DiagScale)
def Jacobi2(self, b, x0, x1, sc=1.0):
r"""Jacobi2(SparseMatrix self, Vector b, Vector x0, Vector x1, double sc=1.0)"""
return _sparsemat.SparseMatrix_Jacobi2(self, b, x0, x1, sc)
Jacobi2 = _swig_new_instance_method(_sparsemat.SparseMatrix_Jacobi2)
def Jacobi3(self, b, x0, x1, sc=1.0):
r"""Jacobi3(SparseMatrix self, Vector b, Vector x0, Vector x1, double sc=1.0)"""
return _sparsemat.SparseMatrix_Jacobi3(self, b, x0, x1, sc)
Jacobi3 = _swig_new_instance_method(_sparsemat.SparseMatrix_Jacobi3)
def Finalize(self, *args):
r"""
Finalize(SparseMatrix self, int skip_zeros=1)
Finalize(SparseMatrix self, int skip_zeros, bool fix_empty_rows)
"""
return _sparsemat.SparseMatrix_Finalize(self, *args)
Finalize = _swig_new_instance_method(_sparsemat.SparseMatrix_Finalize)
def Finalized(self):
r"""Finalized(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_Finalized(self)
Finalized = _swig_new_instance_method(_sparsemat.SparseMatrix_Finalized)
def ColumnsAreSorted(self):
r"""ColumnsAreSorted(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_ColumnsAreSorted(self)
ColumnsAreSorted = _swig_new_instance_method(_sparsemat.SparseMatrix_ColumnsAreSorted)
def Threshold(self, tol, fix_empty_rows=False):
r"""Threshold(SparseMatrix self, double tol, bool fix_empty_rows=False)"""
return _sparsemat.SparseMatrix_Threshold(self, tol, fix_empty_rows)
Threshold = _swig_new_instance_method(_sparsemat.SparseMatrix_Threshold)
def GetBlocks(self, blocks):
r"""GetBlocks(SparseMatrix self, mfem::Array2D< mfem::SparseMatrix * > & blocks)"""
return _sparsemat.SparseMatrix_GetBlocks(self, blocks)
GetBlocks = _swig_new_instance_method(_sparsemat.SparseMatrix_GetBlocks)
def GetSubMatrix(self, rows, cols, subm):
r"""GetSubMatrix(SparseMatrix self, intArray rows, intArray cols, DenseMatrix subm)"""
return _sparsemat.SparseMatrix_GetSubMatrix(self, rows, cols, subm)
GetSubMatrix = _swig_new_instance_method(_sparsemat.SparseMatrix_GetSubMatrix)
def SetColPtr(self, row):
r"""SetColPtr(SparseMatrix self, int const row)"""
return _sparsemat.SparseMatrix_SetColPtr(self, row)
SetColPtr = _swig_new_instance_method(_sparsemat.SparseMatrix_SetColPtr)
def ClearColPtr(self):
r"""ClearColPtr(SparseMatrix self)"""
return _sparsemat.SparseMatrix_ClearColPtr(self)
ClearColPtr = _swig_new_instance_method(_sparsemat.SparseMatrix_ClearColPtr)
def _Get_(self, col):
r"""_Get_(SparseMatrix self, int const col) -> double"""
return _sparsemat.SparseMatrix__Get_(self, col)
_Get_ = _swig_new_instance_method(_sparsemat.SparseMatrix__Get_)
def SearchRow(self, *args):
r"""
SearchRow(SparseMatrix self, int const col) -> double
SearchRow(SparseMatrix self, int const row, int const col) -> double &
"""
return _sparsemat.SparseMatrix_SearchRow(self, *args)
SearchRow = _swig_new_instance_method(_sparsemat.SparseMatrix_SearchRow)
def _Add_(self, *args):
r"""
_Add_(SparseMatrix self, int const col, double const a)
_Add_(SparseMatrix self, int const row, int const col, double const a)
"""
return _sparsemat.SparseMatrix__Add_(self, *args)
_Add_ = _swig_new_instance_method(_sparsemat.SparseMatrix__Add_)
def _Set_(self, *args):
r"""
_Set_(SparseMatrix self, int const col, double const a)
_Set_(SparseMatrix self, int const row, int const col, double const a)
"""
return _sparsemat.SparseMatrix__Set_(self, *args)
_Set_ = _swig_new_instance_method(_sparsemat.SparseMatrix__Set_)
def Set(self, i, j, a):
r"""Set(SparseMatrix self, int const i, int const j, double const a)"""
return _sparsemat.SparseMatrix_Set(self, i, j, a)
Set = _swig_new_instance_method(_sparsemat.SparseMatrix_Set)
def SetSubMatrix(self, rows, cols, subm, skip_zeros=1):
r"""SetSubMatrix(SparseMatrix self, intArray rows, intArray cols, DenseMatrix subm, int skip_zeros=1)"""
return _sparsemat.SparseMatrix_SetSubMatrix(self, rows, cols, subm, skip_zeros)
SetSubMatrix = _swig_new_instance_method(_sparsemat.SparseMatrix_SetSubMatrix)
def SetSubMatrixTranspose(self, rows, cols, subm, skip_zeros=1):
r"""SetSubMatrixTranspose(SparseMatrix self, intArray rows, intArray cols, DenseMatrix subm, int skip_zeros=1)"""
return _sparsemat.SparseMatrix_SetSubMatrixTranspose(self, rows, cols, subm, skip_zeros)
SetSubMatrixTranspose = _swig_new_instance_method(_sparsemat.SparseMatrix_SetSubMatrixTranspose)
def AddSubMatrix(self, rows, cols, subm, skip_zeros=1):
r"""AddSubMatrix(SparseMatrix self, intArray rows, intArray cols, DenseMatrix subm, int skip_zeros=1)"""
return _sparsemat.SparseMatrix_AddSubMatrix(self, rows, cols, subm, skip_zeros)
AddSubMatrix = _swig_new_instance_method(_sparsemat.SparseMatrix_AddSubMatrix)
def RowIsEmpty(self, row):
r"""RowIsEmpty(SparseMatrix self, int const row) -> bool"""
return _sparsemat.SparseMatrix_RowIsEmpty(self, row)
RowIsEmpty = _swig_new_instance_method(_sparsemat.SparseMatrix_RowIsEmpty)
def GetRow(self, row, cols, srow):
r"""GetRow(SparseMatrix self, int const row, intArray cols, Vector srow) -> int"""
return _sparsemat.SparseMatrix_GetRow(self, row, cols, srow)
GetRow = _swig_new_instance_method(_sparsemat.SparseMatrix_GetRow)
def SetRow(self, row, cols, srow):
r"""SetRow(SparseMatrix self, int const row, intArray cols, Vector srow)"""
return _sparsemat.SparseMatrix_SetRow(self, row, cols, srow)
SetRow = _swig_new_instance_method(_sparsemat.SparseMatrix_SetRow)
def AddRow(self, row, cols, srow):
r"""AddRow(SparseMatrix self, int const row, intArray cols, Vector srow)"""
return _sparsemat.SparseMatrix_AddRow(self, row, cols, srow)
AddRow = _swig_new_instance_method(_sparsemat.SparseMatrix_AddRow)
def ScaleRow(self, row, scale):
r"""ScaleRow(SparseMatrix self, int const row, double const scale)"""
return _sparsemat.SparseMatrix_ScaleRow(self, row, scale)
ScaleRow = _swig_new_instance_method(_sparsemat.SparseMatrix_ScaleRow)
def ScaleRows(self, sl):
r"""ScaleRows(SparseMatrix self, Vector sl)"""
return _sparsemat.SparseMatrix_ScaleRows(self, sl)
ScaleRows = _swig_new_instance_method(_sparsemat.SparseMatrix_ScaleRows)
def ScaleColumns(self, sr):
r"""ScaleColumns(SparseMatrix self, Vector sr)"""
return _sparsemat.SparseMatrix_ScaleColumns(self, sr)
ScaleColumns = _swig_new_instance_method(_sparsemat.SparseMatrix_ScaleColumns)
def __iadd__(self, B):
r"""__iadd__(SparseMatrix self, SparseMatrix B) -> SparseMatrix"""
val = _sparsemat.SparseMatrix___iadd__(self, B)
val.thisown = 0
return self
return val
def Add(self, *args):
r"""
Add(SparseMatrix self, int const i, int const j, double const a)
Add(SparseMatrix self, double const a, SparseMatrix B)
"""
return _sparsemat.SparseMatrix_Add(self, *args)
Add = _swig_new_instance_method(_sparsemat.SparseMatrix_Add)
def __imul__(self, a):
r"""__imul__(SparseMatrix self, double a) -> SparseMatrix"""
val = _sparsemat.SparseMatrix___imul__(self, a)
val.thisown = 0
return self
return val
def IsSymmetric(self):
r"""IsSymmetric(SparseMatrix self) -> double"""
return _sparsemat.SparseMatrix_IsSymmetric(self)
IsSymmetric = _swig_new_instance_method(_sparsemat.SparseMatrix_IsSymmetric)
def Symmetrize(self):
r"""Symmetrize(SparseMatrix self)"""
return _sparsemat.SparseMatrix_Symmetrize(self)
Symmetrize = _swig_new_instance_method(_sparsemat.SparseMatrix_Symmetrize)
def NumNonZeroElems(self):
r"""NumNonZeroElems(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_NumNonZeroElems(self)
NumNonZeroElems = _swig_new_instance_method(_sparsemat.SparseMatrix_NumNonZeroElems)
def MaxNorm(self):
r"""MaxNorm(SparseMatrix self) -> double"""
return _sparsemat.SparseMatrix_MaxNorm(self)
MaxNorm = _swig_new_instance_method(_sparsemat.SparseMatrix_MaxNorm)
def CountSmallElems(self, tol):
r"""CountSmallElems(SparseMatrix self, double tol) -> int"""
return _sparsemat.SparseMatrix_CountSmallElems(self, tol)
CountSmallElems = _swig_new_instance_method(_sparsemat.SparseMatrix_CountSmallElems)
def CheckFinite(self):
r"""CheckFinite(SparseMatrix self) -> int"""
return _sparsemat.SparseMatrix_CheckFinite(self)
CheckFinite = _swig_new_instance_method(_sparsemat.SparseMatrix_CheckFinite)
def SetGraphOwner(self, ownij):
r"""SetGraphOwner(SparseMatrix self, bool ownij)"""
return _sparsemat.SparseMatrix_SetGraphOwner(self, ownij)
SetGraphOwner = _swig_new_instance_method(_sparsemat.SparseMatrix_SetGraphOwner)
def SetDataOwner(self, owna):
r"""SetDataOwner(SparseMatrix self, bool owna)"""
return _sparsemat.SparseMatrix_SetDataOwner(self, owna)
SetDataOwner = _swig_new_instance_method(_sparsemat.SparseMatrix_SetDataOwner)
def OwnsGraph(self):
r"""OwnsGraph(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_OwnsGraph(self)
OwnsGraph = _swig_new_instance_method(_sparsemat.SparseMatrix_OwnsGraph)
def OwnsData(self):
r"""OwnsData(SparseMatrix self) -> bool"""
return _sparsemat.SparseMatrix_OwnsData(self)
OwnsData = _swig_new_instance_method(_sparsemat.SparseMatrix_OwnsData)
def LoseData(self):
r"""LoseData(SparseMatrix self)"""
return _sparsemat.SparseMatrix_LoseData(self)
LoseData = _swig_new_instance_method(_sparsemat.SparseMatrix_LoseData)
def Swap(self, other):
r"""Swap(SparseMatrix self, SparseMatrix other)"""
return _sparsemat.SparseMatrix_Swap(self, other)
Swap = _swig_new_instance_method(_sparsemat.SparseMatrix_Swap)
__swig_destroy__ = _sparsemat.delete_SparseMatrix
def GetType(self):
r"""GetType(SparseMatrix self) -> mfem::Operator::Type"""
return _sparsemat.SparseMatrix_GetType(self)
GetType = _swig_new_instance_method(_sparsemat.SparseMatrix_GetType)
def GetIArray(self):
r"""GetIArray(SparseMatrix self) -> PyObject *"""
return _sparsemat.SparseMatrix_GetIArray(self)
GetIArray = _swig_new_instance_method(_sparsemat.SparseMatrix_GetIArray)
def GetJArray(self):
r"""GetJArray(SparseMatrix self) -> PyObject *"""
return _sparsemat.SparseMatrix_GetJArray(self)
GetJArray = _swig_new_instance_method(_sparsemat.SparseMatrix_GetJArray)
def GetDataArray(self):
r"""GetDataArray(SparseMatrix self) -> PyObject *"""
return _sparsemat.SparseMatrix_GetDataArray(self)
GetDataArray = _swig_new_instance_method(_sparsemat.SparseMatrix_GetDataArray)
def Print(self, *args):
r"""
Print(SparseMatrix self, std::ostream & out=out, int width_=4)
Print(SparseMatrix self, char const * file, int precision=16)
"""
return _sparsemat.SparseMatrix_Print(self, *args)
Print = _swig_new_instance_method(_sparsemat.SparseMatrix_Print)
def PrintGZ(self, file, precision=16):
r"""PrintGZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintGZ(self, file, precision)
PrintGZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintGZ)
def PrintMatlab(self, *args):
r"""
PrintMatlab(SparseMatrix self, std::ostream & out=out)
PrintMatlab(SparseMatrix self, char const * file, int precision=16)
"""
return _sparsemat.SparseMatrix_PrintMatlab(self, *args)
PrintMatlab = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintMatlab)
def PrintMatlabGZ(self, file, precision=16):
r"""PrintMatlabGZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintMatlabGZ(self, file, precision)
PrintMatlabGZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintMatlabGZ)
def PrintMM(self, *args):
r"""
PrintMM(SparseMatrix self, std::ostream & out=out)
PrintMM(SparseMatrix self, char const * file, int precision=16)
"""
return _sparsemat.SparseMatrix_PrintMM(self, *args)
PrintMM = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintMM)
def PrintMMGZ(self, file, precision=16):
r"""PrintMMGZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintMMGZ(self, file, precision)
PrintMMGZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintMMGZ)
def PrintCSRGZ(self, file, precision=16):
r"""PrintCSRGZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintCSRGZ(self, file, precision)
PrintCSRGZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintCSRGZ)
def PrintCSR(self, *args):
r"""
PrintCSR(SparseMatrix self, std::ostream & out)
PrintCSR(SparseMatrix self, char const * file, int precision=16)
PrintCSR(SparseMatrix self)
"""
return _sparsemat.SparseMatrix_PrintCSR(self, *args)
PrintCSR = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintCSR)
def PrintCSR2GZ(self, file, precision=16):
r"""PrintCSR2GZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintCSR2GZ(self, file, precision)
PrintCSR2GZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintCSR2GZ)
def PrintCSR2(self, *args):
r"""
PrintCSR2(SparseMatrix self, std::ostream & out)
PrintCSR2(SparseMatrix self, char const * file, int precision=16)
PrintCSR2(SparseMatrix self)
"""
return _sparsemat.SparseMatrix_PrintCSR2(self, *args)
PrintCSR2 = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintCSR2)
def PrintInfoGZ(self, file, precision=16):
r"""PrintInfoGZ(SparseMatrix self, char const * file, int precision=16)"""
return _sparsemat.SparseMatrix_PrintInfoGZ(self, file, precision)
PrintInfoGZ = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintInfoGZ)
def PrintInfo(self, *args):
r"""
PrintInfo(SparseMatrix self, std::ostream & out)
PrintInfo(SparseMatrix self, char const * file, int precision=16)
PrintInfo(SparseMatrix self)
"""
return _sparsemat.SparseMatrix_PrintInfo(self, *args)
PrintInfo = _swig_new_instance_method(_sparsemat.SparseMatrix_PrintInfo)
# Register SparseMatrix in _sparsemat:
_sparsemat.SparseMatrix_swigregister(SparseMatrix)
def __lshift__(os, mat):
r"""__lshift__(std::ostream & os, SparseMatrix mat) -> std::ostream &"""
return _sparsemat.__lshift__(os, mat)
__lshift__ = _sparsemat.__lshift__
def SparseMatrixFunction(S, f):
r"""SparseMatrixFunction(SparseMatrix S, double (*)(double) f)"""
return _sparsemat.SparseMatrixFunction(S, f)
SparseMatrixFunction = _sparsemat.SparseMatrixFunction
def TransposeAbstractSparseMatrix(A, useActualWidth):
r"""TransposeAbstractSparseMatrix(AbstractSparseMatrix A, int useActualWidth) -> SparseMatrix"""
return _sparsemat.TransposeAbstractSparseMatrix(A, useActualWidth)
TransposeAbstractSparseMatrix = _sparsemat.TransposeAbstractSparseMatrix
def TransposeMult(A, B):
r"""TransposeMult(SparseMatrix A, SparseMatrix B) -> SparseMatrix"""
return _sparsemat.TransposeMult(A, B)
TransposeMult = _sparsemat.TransposeMult
def MultAbstractSparseMatrix(A, B):
r"""MultAbstractSparseMatrix(AbstractSparseMatrix A, AbstractSparseMatrix B) -> SparseMatrix"""
return _sparsemat.MultAbstractSparseMatrix(A, B)
MultAbstractSparseMatrix = _sparsemat.MultAbstractSparseMatrix
def Mult_AtDA(A, D, OAtDA=None):
r"""Mult_AtDA(SparseMatrix A, Vector D, SparseMatrix OAtDA=None) -> SparseMatrix"""
return _sparsemat.Mult_AtDA(A, D, OAtDA)
Mult_AtDA = _sparsemat.Mult_AtDA
def OuterProduct(*args):
r"""
OuterProduct(DenseMatrix A, DenseMatrix B) -> DenseMatrix
OuterProduct(DenseMatrix A, SparseMatrix B) -> SparseMatrix
OuterProduct(SparseMatrix A, DenseMatrix B) -> SparseMatrix
OuterProduct(SparseMatrix A, SparseMatrix B) -> SparseMatrix
"""
return _sparsemat.OuterProduct(*args)
OuterProduct = _sparsemat.OuterProduct
| mfem/PyMFEM | mfem/_par/sparsemat.py | Python | bsd-3-clause | 42,261 |
# -*- coding: utf-8 -*-
# Copyright (c) 2011-2014 Berkeley Model United Nations. All rights reserved.
# Use of this source code is governed by a BSD License (see LICENSE).
import re
from rest_framework.serializers import ValidationError
def name(value):
'''Matches names of people, countries and and other things.'''
if re.match(r'^[A-Za-z\s\.\-\'àèéìòóôù]+$', value) is None:
raise ValidationError('This field contains invalid characters.')
def address(value):
'''Matches street addresses.'''
if re.match(r'^[\w\s\.\-\'àèéìòóôù]+$', value) is None:
raise ValidationError('This field contains invalid characters.')
def numeric(value):
'''Matches numbers and spaces.'''
if re.match(r'^[\d\s]+$', value) is None:
raise ValidationError('This field can only contain numbers and spaces.')
def email(value):
'''Loosely matches email addresses.'''
if re.match(r'^[\w_.+-]+@[\w-]+\.[\w\-.]+$', value) is None:
raise ValidationError('This is an invalid email address.')
def phone_international(value):
'''Loosely matches phone numbers.'''
if re.match(r'^[\d\-x\s\+\(\)]+$', value) is None:
raise ValidationError('This is an invalid phone number.')
def phone_domestic(value):
'''Matches domestic phone numbers.'''
if re.match(r'^\(?(\d{3})\)?\s(\d{3})-(\d{4})(\sx\d{1,5})?$', value) is None:
raise ValidationError('This is an invalid phone number.')
def nonempty(value):
'''Requires that a field be non-empty.'''
if not value:
raise ValidationError('This field is required.')
| jmosky12/huxley | huxley/api/validators.py | Python | bsd-3-clause | 1,610 |
# -*- coding: utf-8 -*-
from django.core.mail.backends.base import BaseEmailBackend
from django.core.mail.message import sanitize_address
from django.conf import settings
import requests
import logging
logger = logging.getLogger(__name__)
class Newsletter2GoEmailBackend(BaseEmailBackend):
n2g_api_endpoint = 'https://www.newsletter2go.de/de/api/send/email/'
def send_messages(self, emails):
"""
Sends one or more EmailMessage objects and returns the number of email
messages sent.
"""
if not emails:
return
num_sent = 0
for email in emails:
if not email.recipients():
continue
from_email = sanitize_address(email.from_email, email.encoding)
recipients = [sanitize_address(addr, email.encoding)
for addr in email.recipients()]
logger.debug('Sending email from {0} to {1}'.format(from_email, ', '.join(recipients)))
for recipient in recipients:
payload = {
'key': settings.NEWSLETTER2GO_API_KEY,
'to': recipient,
'from': from_email,
'subject': email.subject,
}
payload['html' if email.content_subtype == 'html' else 'text'] = email.body
response = requests.post(self.n2g_api_endpoint, payload)
response_json = response.json()
if response_json.get('status') == 200:
num_sent += 1
return num_sent
| lukasklein/django-newsletter2go | newsletter2go/backends.py | Python | bsd-3-clause | 1,590 |
__author__ = 'Bohdan Mushkevych'
import unittest
from settings import enable_test_mode
enable_test_mode()
from db.model.raw_data import DOMAIN_NAME, TIMEPERIOD
from constants import PROCESS_SITE_YEARLY
from tests import monthly_fixtures
from tests import yearly_fixtures
from tests.test_abstract_worker import AbstractWorkerUnitTest
from workers.site_yearly_aggregator import SiteYearlyAggregator
class SiteYearlyAggregatorUnitTest(AbstractWorkerUnitTest):
def virtual_set_up(self):
super(SiteYearlyAggregatorUnitTest, self).constructor(baseclass=SiteYearlyAggregator,
process_name=PROCESS_SITE_YEARLY,
output_prefix='EXPECTED_SITE_YEARLY',
output_module=yearly_fixtures,
generate_output=False,
compare_results=True)
monthly_fixtures.clean_site_entries()
return monthly_fixtures.generated_site_entries()
def virtual_tear_down(self):
monthly_fixtures.clean_site_entries()
def _get_key(self, obj):
return obj[DOMAIN_NAME], obj[TIMEPERIOD]
def test_aggregation(self):
super(SiteYearlyAggregatorUnitTest, self).perform_aggregation()
if __name__ == '__main__':
unittest.main()
| eggsandbeer/scheduler | tests/test_site_yearly_aggregator.py | Python | bsd-3-clause | 1,460 |
import datetime
import httplib2
import itertools
from django.conf import settings
from django.db import connection
from django.db.models import Sum, Max
from apiclient.discovery import build
from elasticsearch.helpers import bulk_index
from oauth2client.client import OAuth2Credentials
import olympia.core.logger
from olympia import amo
from olympia.amo import search as amo_search
from olympia.addons.models import Addon
from olympia.amo.celery import task
from olympia.bandwagon.models import Collection
from olympia.reviews.models import Review
from olympia.users.models import UserProfile
from olympia.versions.models import Version
from . import search
from .models import (
AddonCollectionCount, CollectionCount, CollectionStats, DownloadCount,
ThemeUserCount, UpdateCount)
log = olympia.core.logger.getLogger('z.task')
@task
def update_addons_collections_downloads(data, **kw):
log.info("[%s] Updating addons+collections download totals." %
(len(data)))
query = (
"UPDATE addons_collections SET downloads=%s WHERE addon_id=%s "
"AND collection_id=%s;" * len(data))
with connection.cursor() as cursor:
cursor.execute(
query,
list(itertools.chain.from_iterable(
[var['sum'], var['addon'], var['collection']]
for var in data)))
@task
def update_collections_total(data, **kw):
log.info("[%s] Updating collections' download totals." %
(len(data)))
for var in data:
(Collection.objects.filter(pk=var['collection_id'])
.update(downloads=var['sum']))
def get_profile_id(service, domain):
"""
Fetch the profile ID for the given domain.
"""
accounts = service.management().accounts().list().execute()
account_ids = [a['id'] for a in accounts.get('items', ())]
for account_id in account_ids:
webproperties = service.management().webproperties().list(
accountId=account_id).execute()
webproperty_ids = [p['id'] for p in webproperties.get('items', ())]
for webproperty_id in webproperty_ids:
profiles = service.management().profiles().list(
accountId=account_id,
webPropertyId=webproperty_id).execute()
for p in profiles.get('items', ()):
# sometimes GA includes "http://", sometimes it doesn't.
if '://' in p['websiteUrl']:
name = p['websiteUrl'].partition('://')[-1]
else:
name = p['websiteUrl']
if name == domain:
return p['id']
@task
def update_google_analytics(date, **kw):
creds_data = getattr(settings, 'GOOGLE_ANALYTICS_CREDENTIALS', None)
if not creds_data:
log.critical('Failed to update global stats: '
'GOOGLE_ANALYTICS_CREDENTIALS not set')
return
creds = OAuth2Credentials(
*[creds_data[k] for k in
('access_token', 'client_id', 'client_secret',
'refresh_token', 'token_expiry', 'token_uri',
'user_agent')])
h = httplib2.Http()
creds.authorize(h)
service = build('analytics', 'v3', http=h)
domain = getattr(settings,
'GOOGLE_ANALYTICS_DOMAIN', None) or settings.DOMAIN
profile_id = get_profile_id(service, domain)
if profile_id is None:
log.critical('Failed to update global stats: could not access a Google'
' Analytics profile for ' + domain)
return
datestr = date.strftime('%Y-%m-%d')
try:
data = service.data().ga().get(ids='ga:' + profile_id,
start_date=datestr,
end_date=datestr,
metrics='ga:visits').execute()
# Storing this under the webtrends stat name so it goes on the
# same graph as the old webtrends data.
p = ['webtrends_DailyVisitors', data['rows'][0][0], date]
except Exception, e:
log.critical(
'Fetching stats data for %s from Google Analytics failed: %s' % e)
return
try:
cursor = connection.cursor()
cursor.execute('REPLACE INTO global_stats (name, count, date) '
'values (%s, %s, %s)', p)
except Exception, e:
log.critical('Failed to update global stats: (%s): %s' % (p, e))
else:
log.debug('Committed global stats details: (%s) has (%s) for (%s)'
% tuple(p))
finally:
cursor.close()
@task
def update_global_totals(job, date, **kw):
log.info('Updating global statistics totals (%s) for (%s)' % (job, date))
jobs = _get_daily_jobs(date)
jobs.update(_get_metrics_jobs(date))
num = jobs[job]()
q = """REPLACE INTO global_stats (`name`, `count`, `date`)
VALUES (%s, %s, %s)"""
p = [job, num or 0, date]
try:
cursor = connection.cursor()
cursor.execute(q, p)
except Exception, e:
log.critical('Failed to update global stats: (%s): %s' % (p, e))
else:
log.debug('Committed global stats details: (%s) has (%s) for (%s)'
% tuple(p))
finally:
cursor.close()
def _get_daily_jobs(date=None):
"""Return a dictionary of statistics queries.
If a date is specified and applies to the job it will be used. Otherwise
the date will default to the previous day.
"""
if not date:
date = datetime.date.today() - datetime.timedelta(days=1)
# Passing through a datetime would not generate an error,
# but would pass and give incorrect values.
if isinstance(date, datetime.datetime):
raise ValueError('This requires a valid date, not a datetime')
# Testing on lte created date doesn't get you todays date, you need to do
# less than next date. That's because 2012-1-1 becomes 2012-1-1 00:00
next_date = date + datetime.timedelta(days=1)
date_str = date.strftime('%Y-%m-%d')
extra = dict(where=['DATE(created)=%s'], params=[date_str])
# If you're editing these, note that you are returning a function! This
# cheesy hackery was done so that we could pass the queries to celery
# lazily and not hammer the db with a ton of these all at once.
stats = {
# Add-on Downloads
'addon_total_downloads': lambda: DownloadCount.objects.filter(
date__lt=next_date).aggregate(sum=Sum('count'))['sum'],
'addon_downloads_new': lambda: DownloadCount.objects.filter(
date=date).aggregate(sum=Sum('count'))['sum'],
# Listed Add-on counts
'addon_count_new': Addon.objects.valid().extra(**extra).count,
# Listed Version counts
'version_count_new': Version.objects.filter(
channel=amo.RELEASE_CHANNEL_LISTED).extra(**extra).count,
# User counts
'user_count_total': UserProfile.objects.filter(
created__lt=next_date).count,
'user_count_new': UserProfile.objects.extra(**extra).count,
# Review counts
'review_count_total': Review.objects.filter(created__lte=date,
editorreview=0).count,
# We can't use "**extra" here, because this query joins on reviews
# itself, and thus raises the following error:
# "Column 'created' in where clause is ambiguous".
'review_count_new': Review.objects.filter(editorreview=0).extra(
where=['DATE(reviews.created)=%s'], params=[date_str]).count,
# Collection counts
'collection_count_total': Collection.objects.filter(
created__lt=next_date).count,
'collection_count_new': Collection.objects.extra(**extra).count,
'collection_addon_downloads': (
lambda: AddonCollectionCount.objects.filter(
date__lte=date).aggregate(sum=Sum('count'))['sum']),
}
# If we're processing today's stats, we'll do some extras. We don't do
# these for re-processed stats because they change over time (eg. add-ons
# move from sandbox -> public
if date == (datetime.date.today() - datetime.timedelta(days=1)):
stats.update({
'addon_count_nominated': Addon.objects.filter(
created__lte=date, status=amo.STATUS_NOMINATED,
disabled_by_user=0).count,
'addon_count_public': Addon.objects.filter(
created__lte=date, status=amo.STATUS_PUBLIC,
disabled_by_user=0).count,
'addon_count_pending': Version.objects.filter(
created__lte=date, files__status=amo.STATUS_PENDING).count,
'collection_count_private': Collection.objects.filter(
created__lte=date, listed=0).count,
'collection_count_public': Collection.objects.filter(
created__lte=date, listed=1).count,
'collection_count_editorspicks': Collection.objects.filter(
created__lte=date, type=amo.COLLECTION_FEATURED).count,
'collection_count_normal': Collection.objects.filter(
created__lte=date, type=amo.COLLECTION_NORMAL).count,
})
return stats
def _get_metrics_jobs(date=None):
"""Return a dictionary of statistics queries.
If a date is specified and applies to the job it will be used. Otherwise
the date will default to the last date metrics put something in the db.
"""
if not date:
date = UpdateCount.objects.aggregate(max=Max('date'))['max']
# If you're editing these, note that you are returning a function!
stats = {
'addon_total_updatepings': lambda: UpdateCount.objects.filter(
date=date).aggregate(sum=Sum('count'))['sum'],
'collector_updatepings': lambda: UpdateCount.objects.get(
addon=settings.ADDON_COLLECTOR_ID, date=date).count,
}
return stats
@task
def index_update_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = UpdateCount.objects.filter(id__in=ids)
if qs:
log.info('Indexing %s updates for %s.' % (qs.count(), qs[0].date))
data = []
try:
for update in qs:
data.append(search.extract_update_count(update))
bulk_index(es, data, index=index,
doc_type=UpdateCount.get_mapping_type(), refresh=True)
except Exception, exc:
index_update_counts.retry(args=[ids, index], exc=exc, **kw)
raise
@task
def index_download_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = DownloadCount.objects.filter(id__in=ids)
if qs:
log.info('Indexing %s downloads for %s.' % (qs.count(), qs[0].date))
try:
data = []
for dl in qs:
data.append(search.extract_download_count(dl))
bulk_index(es, data, index=index,
doc_type=DownloadCount.get_mapping_type(), refresh=True)
except Exception, exc:
index_download_counts.retry(args=[ids, index], exc=exc)
raise
@task
def index_collection_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = CollectionCount.objects.filter(collection__in=ids)
if qs:
log.info('Indexing %s addon collection counts: %s'
% (qs.count(), qs[0].date))
data = []
try:
for collection_count in qs:
collection = collection_count.collection_id
filters = dict(collection=collection,
date=collection_count.date)
data.append(search.extract_addon_collection(
collection_count,
AddonCollectionCount.objects.filter(**filters),
CollectionStats.objects.filter(**filters)))
bulk_index(es, data, index=index,
doc_type=CollectionCount.get_mapping_type(),
refresh=True)
except Exception, exc:
index_collection_counts.retry(args=[ids], exc=exc)
raise
@task
def index_theme_user_counts(ids, index=None, **kw):
index = index or search.get_alias()
es = amo_search.get_es()
qs = ThemeUserCount.objects.filter(id__in=ids)
if qs:
log.info('Indexing %s theme user counts for %s.'
% (qs.count(), qs[0].date))
data = []
try:
for user_count in qs:
data.append(search.extract_theme_user_count(user_count))
bulk_index(es, data, index=index,
doc_type=ThemeUserCount.get_mapping_type(), refresh=True)
except Exception, exc:
index_theme_user_counts.retry(args=[ids], exc=exc, **kw)
raise
| harikishen/addons-server | src/olympia/stats/tasks.py | Python | bsd-3-clause | 12,766 |
"""
The :mod:`sklearn.model_selection._search` includes utilities to fine-tune the
parameters of an estimator.
"""
from __future__ import print_function
from __future__ import division
# Author: Alexandre Gramfort <alexandre.gramfort@inria.fr>,
# Gael Varoquaux <gael.varoquaux@normalesup.org>
# Andreas Mueller <amueller@ais.uni-bonn.de>
# Olivier Grisel <olivier.grisel@ensta.org>
# License: BSD 3 clause
from abc import ABCMeta, abstractmethod
from collections import Mapping, namedtuple, Sized, defaultdict
from functools import partial, reduce
from itertools import product
import operator
import warnings
import numpy as np
from ..base import BaseEstimator, is_classifier, clone
from ..base import MetaEstimatorMixin
from ._split import check_cv
from ._validation import _fit_and_score
from ..exceptions import NotFittedError
from ..externals.joblib import Parallel, delayed
from ..externals import six
from ..utils import check_random_state
from ..utils.fixes import sp_version
from ..utils.fixes import rankdata
from ..utils.random import sample_without_replacement
from ..utils.validation import indexable, check_is_fitted
from ..utils.metaestimators import if_delegate_has_method
from ..metrics.scorer import check_scoring
__all__ = ['GridSearchCV', 'ParameterGrid', 'fit_grid_point',
'ParameterSampler', 'RandomizedSearchCV']
class ParameterGrid(object):
"""Grid of parameters with a discrete number of values for each.
Can be used to iterate over parameter value combinations with the
Python built-in function iter.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_grid : dict of string to sequence, or sequence of such
The parameter grid to explore, as a dictionary mapping estimator
parameters to sequences of allowed values.
An empty dict signifies default parameters.
A sequence of dicts signifies a sequence of grids to search, and is
useful to avoid exploring parameter combinations that make no sense
or have no effect. See the examples below.
Examples
--------
>>> from sklearn.model_selection import ParameterGrid
>>> param_grid = {'a': [1, 2], 'b': [True, False]}
>>> list(ParameterGrid(param_grid)) == (
... [{'a': 1, 'b': True}, {'a': 1, 'b': False},
... {'a': 2, 'b': True}, {'a': 2, 'b': False}])
True
>>> grid = [{'kernel': ['linear']}, {'kernel': ['rbf'], 'gamma': [1, 10]}]
>>> list(ParameterGrid(grid)) == [{'kernel': 'linear'},
... {'kernel': 'rbf', 'gamma': 1},
... {'kernel': 'rbf', 'gamma': 10}]
True
>>> ParameterGrid(grid)[1] == {'kernel': 'rbf', 'gamma': 1}
True
See also
--------
:class:`GridSearchCV`:
Uses :class:`ParameterGrid` to perform a full parallelized parameter
search.
"""
def __init__(self, param_grid):
if isinstance(param_grid, Mapping):
# wrap dictionary in a singleton list to support either dict
# or list of dicts
param_grid = [param_grid]
self.param_grid = param_grid
def __iter__(self):
"""Iterate over the points in the grid.
Returns
-------
params : iterator over dict of string to any
Yields dictionaries mapping each estimator parameter to one of its
allowed values.
"""
for p in self.param_grid:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(p.items())
if not items:
yield {}
else:
keys, values = zip(*items)
for v in product(*values):
params = dict(zip(keys, v))
yield params
def __len__(self):
"""Number of points on the grid."""
# Product function that can handle iterables (np.product can't).
product = partial(reduce, operator.mul)
return sum(product(len(v) for v in p.values()) if p else 1
for p in self.param_grid)
def __getitem__(self, ind):
"""Get the parameters that would be ``ind``th in iteration
Parameters
----------
ind : int
The iteration index
Returns
-------
params : dict of string to any
Equal to list(self)[ind]
"""
# This is used to make discrete sampling without replacement memory
# efficient.
for sub_grid in self.param_grid:
# XXX: could memoize information used here
if not sub_grid:
if ind == 0:
return {}
else:
ind -= 1
continue
# Reverse so most frequent cycling parameter comes first
keys, values_lists = zip(*sorted(sub_grid.items())[::-1])
sizes = [len(v_list) for v_list in values_lists]
total = np.product(sizes)
if ind >= total:
# Try the next grid
ind -= total
else:
out = {}
for key, v_list, n in zip(keys, values_lists, sizes):
ind, offset = divmod(ind, n)
out[key] = v_list[offset]
return out
raise IndexError('ParameterGrid index out of range')
class ParameterSampler(object):
"""Generator on parameters sampled from given distributions.
Non-deterministic iterable over random candidate combinations for hyper-
parameter search. If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Note that before SciPy 0.16, the ``scipy.stats.distributions`` do not
accept a custom RNG instance and always use the singleton RNG from
``numpy.random``. Hence setting ``random_state`` will not guarantee a
deterministic iteration whenever ``scipy.stats`` distributions are used to
define the parameter search space. Deterministic behavior is however
guaranteed from SciPy 0.16 onwards.
Read more in the :ref:`User Guide <search>`.
Parameters
----------
param_distributions : dict
Dictionary where the keys are parameters and values
are distributions from which a parameter is to be sampled.
Distributions either have to provide a ``rvs`` function
to sample from them, or can be given as a list of values,
where a uniform distribution is assumed.
n_iter : integer
Number of parameter settings that are produced.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
Returns
-------
params : dict of string to any
**Yields** dictionaries mapping each estimator parameter to
as sampled value.
Examples
--------
>>> from sklearn.model_selection import ParameterSampler
>>> from scipy.stats.distributions import expon
>>> import numpy as np
>>> np.random.seed(0)
>>> param_grid = {'a':[1, 2], 'b': expon()}
>>> param_list = list(ParameterSampler(param_grid, n_iter=4))
>>> rounded_list = [dict((k, round(v, 6)) for (k, v) in d.items())
... for d in param_list]
>>> rounded_list == [{'b': 0.89856, 'a': 1},
... {'b': 0.923223, 'a': 1},
... {'b': 1.878964, 'a': 2},
... {'b': 1.038159, 'a': 2}]
True
"""
def __init__(self, param_distributions, n_iter, random_state=None):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def __iter__(self):
# check if all distributions are given as lists
# in this case we want to sample without replacement
all_lists = np.all([not hasattr(v, "rvs")
for v in self.param_distributions.values()])
rnd = check_random_state(self.random_state)
if all_lists:
# look up sampled parameter settings in parameter grid
param_grid = ParameterGrid(self.param_distributions)
grid_size = len(param_grid)
if grid_size < self.n_iter:
raise ValueError(
"The total space of parameters %d is smaller "
"than n_iter=%d. For exhaustive searches, use "
"GridSearchCV." % (grid_size, self.n_iter))
for i in sample_without_replacement(grid_size, self.n_iter,
random_state=rnd):
yield param_grid[i]
else:
# Always sort the keys of a dictionary, for reproducibility
items = sorted(self.param_distributions.items())
for _ in six.moves.range(self.n_iter):
params = dict()
for k, v in items:
if hasattr(v, "rvs"):
if sp_version < (0, 16):
params[k] = v.rvs()
else:
params[k] = v.rvs(random_state=rnd)
else:
params[k] = v[rnd.randint(len(v))]
yield params
def __len__(self):
"""Number of points that will be sampled."""
return self.n_iter
def fit_grid_point(X, y, estimator, parameters, train, test, scorer,
verbose, error_score='raise', **fit_params):
"""Run fit on one set of parameters.
Parameters
----------
X : array-like, sparse matrix or list
Input data.
y : array-like or None
Targets for input data.
estimator : estimator object
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
parameters : dict
Parameters to be set on estimator for this grid point.
train : ndarray, dtype int or bool
Boolean mask or indices for training set.
test : ndarray, dtype int or bool
Boolean mask or indices for test set.
scorer : callable or None.
If provided must be a scorer callable object / function with signature
``scorer(estimator, X, y)``.
verbose : int
Verbosity level.
**fit_params : kwargs
Additional parameter passed to the fit function of the estimator.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Returns
-------
score : float
Score of this parameter setting on given training / test split.
parameters : dict
The parameters that have been evaluated.
n_samples_test : int
Number of test samples in this split.
"""
score, n_samples_test, _ = _fit_and_score(estimator, X, y, scorer, train,
test, verbose, parameters,
fit_params, error_score)
return score, parameters, n_samples_test
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for v in p.values():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be one-dimensional.")
check = [isinstance(v, k) for k in (list, tuple, np.ndarray)]
if True not in check:
raise ValueError("Parameter values should be a list.")
if len(v) == 0:
raise ValueError("Parameter values should be a non-empty "
"list.")
# XXX Remove in 0.20
class _CVScoreTuple (namedtuple('_CVScoreTuple',
('parameters',
'mean_validation_score',
'cv_validation_scores'))):
# A raw namedtuple is very memory efficient as it packs the attributes
# in a struct to get rid of the __dict__ of attributes in particular it
# does not copy the string for the keys on each instance.
# By deriving a namedtuple class just to introduce the __repr__ method we
# would also reintroduce the __dict__ on the instance. By telling the
# Python interpreter that this subclass uses static __slots__ instead of
# dynamic attributes. Furthermore we don't need any additional slot in the
# subclass so we set __slots__ to the empty tuple.
__slots__ = ()
def __repr__(self):
"""Simple custom repr to summarize the main info"""
return "mean: {0:.5f}, std: {1:.5f}, params: {2}".format(
self.mean_validation_score,
np.std(self.cv_validation_scores),
self.parameters)
class BaseSearchCV(six.with_metaclass(ABCMeta, BaseEstimator,
MetaEstimatorMixin)):
"""Base class for hyper parameter search with cross-validation."""
@abstractmethod
def __init__(self, estimator, scoring=None,
fit_params=None, n_jobs=1, iid=True,
refit=True, cv=None, verbose=0, pre_dispatch='2*n_jobs',
error_score='raise'):
self.scoring = scoring
self.estimator = estimator
self.n_jobs = n_jobs
self.fit_params = fit_params if fit_params is not None else {}
self.iid = iid
self.refit = refit
self.cv = cv
self.verbose = verbose
self.pre_dispatch = pre_dispatch
self.error_score = error_score
@property
def _estimator_type(self):
return self.estimator._estimator_type
def score(self, X, y=None):
"""Returns the score on the given data, if the estimator has been refit.
This uses the score defined by ``scoring`` where provided, and the
``best_estimator_.score`` method otherwise.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Input data, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
Returns
-------
score : float
"""
if self.scorer_ is None:
raise ValueError("No score function explicitly defined, "
"and the estimator doesn't provide one %s"
% self.best_estimator_)
return self.scorer_(self.best_estimator_, X, y)
def _check_is_fitted(self, method_name):
if not self.refit:
raise NotFittedError(('This GridSearchCV instance was initialized '
'with refit=False. %s is '
'available only after refitting on the best '
'parameters. ') % method_name)
else:
check_is_fitted(self, 'best_estimator_')
@if_delegate_has_method(delegate='estimator')
def predict(self, X):
"""Call predict on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict')
return self.best_estimator_.predict(X)
@if_delegate_has_method(delegate='estimator')
def predict_proba(self, X):
"""Call predict_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_proba')
return self.best_estimator_.predict_proba(X)
@if_delegate_has_method(delegate='estimator')
def predict_log_proba(self, X):
"""Call predict_log_proba on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``predict_log_proba``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('predict_log_proba')
return self.best_estimator_.predict_log_proba(X)
@if_delegate_has_method(delegate='estimator')
def decision_function(self, X):
"""Call decision_function on the estimator with the best found parameters.
Only available if ``refit=True`` and the underlying estimator supports
``decision_function``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('decision_function')
return self.best_estimator_.decision_function(X)
@if_delegate_has_method(delegate='estimator')
def transform(self, X):
"""Call transform on the estimator with the best found parameters.
Only available if the underlying estimator supports ``transform`` and
``refit=True``.
Parameters
-----------
X : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('transform')
return self.best_estimator_.transform(X)
@if_delegate_has_method(delegate='estimator')
def inverse_transform(self, Xt):
"""Call inverse_transform on the estimator with the best found params.
Only available if the underlying estimator implements
``inverse_transform`` and ``refit=True``.
Parameters
-----------
Xt : indexable, length n_samples
Must fulfill the input assumptions of the
underlying estimator.
"""
self._check_is_fitted('inverse_transform')
return self.best_estimator_.transform(Xt)
def _fit(self, X, y, labels, parameter_iterable):
"""Actual fitting, performing the search over parameters."""
estimator = self.estimator
cv = check_cv(self.cv, y, classifier=is_classifier(estimator))
self.scorer_ = check_scoring(self.estimator, scoring=self.scoring)
X, y, labels = indexable(X, y, labels)
n_splits = cv.get_n_splits(X, y, labels)
if self.verbose > 0 and isinstance(parameter_iterable, Sized):
n_candidates = len(parameter_iterable)
print("Fitting {0} folds for each of {1} candidates, totalling"
" {2} fits".format(n_splits, n_candidates,
n_candidates * n_splits))
base_estimator = clone(self.estimator)
pre_dispatch = self.pre_dispatch
out = Parallel(
n_jobs=self.n_jobs, verbose=self.verbose,
pre_dispatch=pre_dispatch
)(delayed(_fit_and_score)(clone(base_estimator), X, y, self.scorer_,
train, test, self.verbose, parameters,
self.fit_params, return_parameters=True,
error_score=self.error_score)
for parameters in parameter_iterable
for train, test in cv.split(X, y, labels))
test_scores, test_sample_counts, _, parameters = zip(*out)
candidate_params = parameters[::n_splits]
n_candidates = len(candidate_params)
test_scores = np.array(test_scores,
dtype=np.float64).reshape(n_candidates,
n_splits)
# NOTE test_sample counts (weights) remain the same for all candidates
test_sample_counts = np.array(test_sample_counts[:n_splits],
dtype=np.int)
# Computed the (weighted) mean and std for all the candidates
weights = test_sample_counts if self.iid else None
means = np.average(test_scores, axis=1, weights=weights)
stds = np.sqrt(np.average((test_scores - means[:, np.newaxis]) ** 2,
axis=1, weights=weights))
results = dict()
for split_i in range(n_splits):
results["test_split%d_score" % split_i] = test_scores[:, split_i]
results["test_mean_score"] = means
results["test_std_score"] = stds
ranks = np.asarray(rankdata(-means, method='min'), dtype=np.int32)
best_index = np.flatnonzero(ranks == 1)[0]
best_parameters = candidate_params[best_index]
results["test_rank_score"] = ranks
# Use one np.MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(np.ma.masked_all, (n_candidates,),
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
self.results_ = results
self.best_index_ = best_index
self.n_splits_ = n_splits
if self.refit:
# fit the best estimator using the entire dataset
# clone first to work around broken estimators
best_estimator = clone(base_estimator).set_params(
**best_parameters)
if y is not None:
best_estimator.fit(X, y, **self.fit_params)
else:
best_estimator.fit(X, **self.fit_params)
self.best_estimator_ = best_estimator
return self
@property
def best_params_(self):
check_is_fitted(self, 'results_')
return self.results_['params'][self.best_index_]
@property
def best_score_(self):
check_is_fitted(self, 'results_')
return self.results_['test_mean_score'][self.best_index_]
@property
def grid_scores_(self):
warnings.warn(
"The grid_scores_ attribute was deprecated in version 0.18"
" in favor of the more elaborate results_ attribute."
" The grid_scores_ attribute will not be available from 0.20",
DeprecationWarning)
check_is_fitted(self, 'results_')
grid_scores = list()
for i, (params, mean, std) in enumerate(zip(
self.results_['params'],
self.results_['test_mean_score'],
self.results_['test_std_score'])):
scores = np.array(list(self.results_['test_split%d_score' % s][i]
for s in range(self.n_splits_)),
dtype=np.float64)
grid_scores.append(_CVScoreTuple(params, mean, scores))
return grid_scores
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
Important members are fit, predict.
GridSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Read more in the :ref:`User Guide <grid_search>`.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this GridSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Examples
--------
>>> from sklearn import svm, datasets
>>> from sklearn.model_selection import GridSearchCV
>>> iris = datasets.load_iris()
>>> parameters = {'kernel':('linear', 'rbf'), 'C':[1, 10]}
>>> svr = svm.SVC()
>>> clf = GridSearchCV(svr, parameters)
>>> clf.fit(iris.data, iris.target)
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
GridSearchCV(cv=None, error_score=...,
estimator=SVC(C=1.0, cache_size=..., class_weight=..., coef0=...,
decision_function_shape=None, degree=..., gamma=...,
kernel='rbf', max_iter=-1, probability=False,
random_state=None, shrinking=True, tol=...,
verbose=False),
fit_params={}, iid=..., n_jobs=1,
param_grid=..., pre_dispatch=..., refit=...,
scoring=..., verbose=...)
>>> sorted(clf.results_.keys())
... # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
['param_C', 'param_kernel', 'params', 'test_mean_score',...
'test_rank_score', 'test_split0_score', 'test_split1_score',...
'test_split2_score', 'test_std_score']
Attributes
----------
results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+------------+-----------+------------+-----------------+---+---------+
|param_kernel|param_gamma|param_degree|test_split0_score|...|...rank..|
+============+===========+============+=================+===+=========+
| 'poly' | -- | 2 | 0.8 |...| 2 |
+------------+-----------+------------+-----------------+---+---------+
| 'poly' | -- | 3 | 0.7 |...| 4 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.1 | -- | 0.8 |...| 3 |
+------------+-----------+------------+-----------------+---+---------+
| 'rbf' | 0.2 | -- | 0.9 |...| 1 |
+------------+-----------+------------+-----------------+---+---------+
will be represented by a ``results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...)
'param_gamma': masked_array(data = [-- -- 0.1 0.2],
mask = [ True True False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'test_split0_score' : [0.8, 0.7, 0.8, 0.9],
'test_split1_score' : [0.82, 0.5, 0.7, 0.78],
'test_mean_score' : [0.81, 0.60, 0.75, 0.82],
'test_std_score' : [0.02, 0.01, 0.03, 0.03],
'test_rank_score' : [2, 4, 3, 1],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
------
The parameters selected are those that maximize the score of the left out
data, unless an explicit score is passed in which case it is used instead.
If `n_jobs` was set to a value higher than one, the data is copied for each
point in the grid (and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
---------
:class:`ParameterGrid`:
generates all the combinations of a hyperparameter grid.
:func:`sklearn.model_selection.train_test_split`:
utility function to split the data into a development set usable
for fitting a GridSearchCV instance and an evaluation set for
its final evaluation.
:func:`sklearn.metrics.make_scorer`:
Make a scorer from a performance metric or loss function.
"""
def __init__(self, estimator, param_grid, scoring=None, fit_params=None,
n_jobs=1, iid=True, refit=True, cv=None, verbose=0,
pre_dispatch='2*n_jobs', error_score='raise'):
super(GridSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
self.param_grid = param_grid
_check_param_grid(param_grid)
def fit(self, X, y=None, labels=None):
"""Run fit with all sets of parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
return self._fit(X, y, labels, ParameterGrid(self.param_grid))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
It also implements "predict", "predict_proba", "decision_function",
"transform" and "inverse_transform" if they are implemented in the
estimator used.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
It is highly recommended to use continuous distributions for continuous
parameters.
Read more in the :ref:`User Guide <randomized_parameter_search>`.
Parameters
----------
estimator : estimator object.
A object of that type is instantiated for each grid point.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, default=10
Number of parameter settings that are sampled. n_iter trades
off runtime vs quality of the solution.
scoring : string, callable or None, default=None
A string (see model evaluation documentation) or
a scorer callable object / function with signature
``scorer(estimator, X, y)``.
If ``None``, the ``score`` method of the estimator is used.
fit_params : dict, optional
Parameters to pass to the fit method.
n_jobs : int, default=1
Number of jobs to run in parallel.
pre_dispatch : int, or string, optional
Controls the number of jobs that get dispatched during parallel
execution. Reducing this number can be useful to avoid an
explosion of memory consumption when more jobs get dispatched
than CPUs can process. This parameter can be:
- None, in which case all the jobs are immediately
created and spawned. Use this for lightweight and
fast-running jobs, to avoid delays due to on-demand
spawning of the jobs
- An int, giving the exact number of total jobs that are
spawned
- A string, giving an expression as a function of n_jobs,
as in '2*n_jobs'
iid : boolean, default=True
If True, the data is assumed to be identically distributed across
the folds, and the loss minimized is the total loss per sample,
and not the mean loss across the folds.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross validation,
- integer, to specify the number of folds in a `(Stratified)KFold`,
- An object to be used as a cross-validation generator.
- An iterable yielding train, test splits.
For integer/None inputs, if the estimator is a classifier and ``y`` is
either binary or multiclass, :class:`StratifiedKFold` is used. In all
other cases, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validation strategies that can be used here.
refit : boolean, default=True
Refit the best estimator with the entire dataset.
If "False", it is impossible to make predictions using
this RandomizedSearchCV instance after fitting.
verbose : integer
Controls the verbosity: the higher, the more messages.
random_state : int or RandomState
Pseudo random number generator state used for random uniform sampling
from lists of possible values instead of scipy.stats distributions.
error_score : 'raise' (default) or numeric
Value to assign to the score if an error occurs in estimator fitting.
If set to 'raise', the error is raised. If a numeric value is given,
FitFailedWarning is raised. This parameter does not affect the refit
step, which will always raise the error.
Attributes
----------
results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+--------------+-------------+-------------------+---+---------------+
| param_kernel | param_gamma | test_split0_score |...|test_rank_score|
+==============+=============+===================+===+===============+
| 'rbf' | 0.1 | 0.8 |...| 2 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.2 | 0.9 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
| 'rbf' | 0.3 | 0.7 |...| 1 |
+--------------+-------------+-------------------+---+---------------+
will be represented by a ``results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'test_split0_score' : [0.8, 0.9, 0.7],
'test_split1_score' : [0.82, 0.5, 0.7],
'test_mean_score' : [0.81, 0.7, 0.7],
'test_std_score' : [0.02, 0.2, 0.],
'test_rank_score' : [3, 1, 1],
'params' : [{'kernel' : 'rbf', 'gamma' : 0.1}, ...],
}
NOTE that the key ``'params'`` is used to store a list of parameter
settings dict for all the parameter candidates.
best_estimator_ : estimator
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if refit=False.
best_score_ : float
Score of best_estimator on the left out data.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
best_index_ : int
The index (of the ``results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
scorer_ : function
Scorer function used on the held out data to choose the best
parameters for the model.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
Notes
-----
The parameters selected are those that maximize the score of the held-out
data, according to the scoring parameter.
If `n_jobs` was set to a value higher than one, the data is copied for each
parameter setting(and not `n_jobs` times). This is done for efficiency
reasons if individual jobs take very little time, but may raise errors if
the dataset is large and not enough memory is available. A workaround in
this case is to set `pre_dispatch`. Then, the memory is copied only
`pre_dispatch` many times. A reasonable value for `pre_dispatch` is `2 *
n_jobs`.
See Also
--------
:class:`GridSearchCV`:
Does exhaustive search over a grid of parameters.
:class:`ParameterSampler`:
A generator over parameter settins, constructed from
param_distributions.
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
fit_params=None, n_jobs=1, iid=True, refit=True, cv=None,
verbose=0, pre_dispatch='2*n_jobs', random_state=None,
error_score='raise'):
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
super(RandomizedSearchCV, self).__init__(
estimator=estimator, scoring=scoring, fit_params=fit_params,
n_jobs=n_jobs, iid=iid, refit=refit, cv=cv, verbose=verbose,
pre_dispatch=pre_dispatch, error_score=error_score)
def fit(self, X, y=None, labels=None):
"""Run fit on the estimator with randomly drawn parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training vector, where n_samples in the number of samples and
n_features is the number of features.
y : array-like, shape = [n_samples] or [n_samples, n_output], optional
Target relative to X for classification or regression;
None for unsupervised learning.
labels : array-like, with shape (n_samples,), optional
Group labels for the samples used while splitting the dataset into
train/test set.
"""
sampled_params = ParameterSampler(self.param_distributions,
self.n_iter,
random_state=self.random_state)
return self._fit(X, y, labels, sampled_params)
| ClimbsRocks/scikit-learn | sklearn/model_selection/_search.py | Python | bsd-3-clause | 44,853 |
# -*- coding: utf-8 -*-
import sys
import django
from django.utils import six
try:
from django.utils.text import truncate_words
except ImportError:
# django >=1.5
from django.utils.text import Truncator
from django.utils.functional import allow_lazy
def truncate_words(s, num, end_text='...'):
truncate = end_text and ' %s' % end_text or ''
return Truncator(s).words(num, truncate=truncate)
truncate_words = allow_lazy(truncate_words, six.text_type)
DJANGO_1_4 = django.VERSION < (1, 5)
DJANGO_1_5 = django.VERSION < (1, 6)
DJANGO_1_6 = django.VERSION < (1, 7)
DJANGO_1_7 = django.VERSION < (1, 8)
DJANGO_1_8 = django.VERSION < (1, 9)
if not six.PY3:
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
# copied from django.utils._os (not present in Django 1.4)
def upath(path):
"""
Always return a unicode path.
"""
if six.PY2 and not isinstance(path, six.text_type):
return path.decode(fs_encoding)
return path
# copied from django-cms (for compatibility with Django 1.4)
try:
from django.utils.encoding import force_unicode
def python_2_unicode_compatible(klass):
"""
A decorator that defines __unicode__ and __str__ methods under Python 2.
Under Python 3 it does nothing.
To support Python 2 and 3 with a single code base, define a __str__ method
returning text and apply this decorator to the class.
"""
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
return klass
except ImportError:
force_unicode = lambda s: str(s)
from django.utils.encoding import python_2_unicode_compatible
def get_delete_permission(opts):
try:
from django.contrib.auth import get_permission_codename
return '%s.%s' % (opts.app_label,
get_permission_codename('delete', opts))
except ImportError:
return '%s.%s' % (opts.app_label,
opts.get_delete_permission())
| lory87/django-filer | filer/utils/compatibility.py | Python | bsd-3-clause | 2,062 |
# -*- coding: utf-8 -*-
#
# © 2012 Scott Reynolds
# Author: Scott Reynolds <scott@scottreynolds.us>
#
"""Redboy: Key representation"""
# @TODO: Really this class is silly. Its really just an __init__ and a
# str(). Seems obtuse and useless.
import uuid
class Key(object):
"""
A key determines how to extract the data from Redis. Maintains binary
safe representation
"""
def __init__(self, pool_name, prefix="", key=None):
"""Create a key that connects to the pool identified by pool_name with
the prefix and a string key. The key can be None and the a uuid will be
used in its place."""
key = key or uuid.uuid4().hex
self.pool_name = pool_name
self.prefix = prefix
self.key = key
def _attrs(self):
"""Get attributes of this key."""
return dict((attr, getattr(self, attr)) for attr in
('pool_name', 'prefix', 'key',))
def __str__(self):
return self.prefix + self.key
def __repr__(self):
"""Return a printable representation of this key."""
return str(self._attrs())
def __unicode__(self):
"""Return a unicode string of this key."""
return unicode(str(self))
| SupermanScott/Redboy | redboy/key.py | Python | bsd-3-clause | 1,232 |
from django import forms
from django.forms.forms import BoundField
from .helpers import LMIForAllClient
from .fields import MultiCharField
class FieldSet(object):
"""
Taken from stackoverflow.com/questions/10366745/django-form-field-grouping
Helper class to group BoundField objects together.
"""
def __init__(self, form, fields, legend='', cls=None):
self.form = form
self.legend = legend
self.fields = fields
self.cls = cls
def __iter__(self):
for name in self.fields:
field = self.form.fields[name]
yield BoundField(self.form, field, name)
class NoColonForm(forms.Form):
"""
Removes the default colons from form labels.
"""
def __init__(self, *args, **kwargs):
kwargs.setdefault('label_suffix', '')
super().__init__(*args, **kwargs)
class BaseLMIForm(NoColonForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.lmi_client = LMIForAllClient()
class SectorForm(NoColonForm):
SECTOR_INPUT_COUNT = 3
sector = MultiCharField(
count=SECTOR_INPUT_COUNT,
label="How would you describe the types of jobs you could do?",
help_text=" eg customer services, security, data entry, driver",
require_all_fields=False,
error_messages={'required': 'Enter at least one job role', },
)
class JobDescriptionsForm(BaseLMIForm):
def __init__(self, *args, **kwargs):
keywords = kwargs['keywords']
del kwargs['keywords']
super().__init__(*args, **kwargs)
self.fieldsets = []
self._add_fields_from_keywords(keywords)
def _add_fields_from_keywords(self, keywords):
for keyword in keywords:
if keyword:
soc_codes = []
lmi_data = self.lmi_client.keyword_search(keyword)
count = 6
for item in lmi_data[:count]:
soc_code = str(item['soc'])
if soc_code not in soc_codes:
soc_codes.append(soc_code)
field = forms.BooleanField(
widget=forms.CheckboxInput,
label=item['title'],
help_text=item['description'],
required=False,
)
self.fields[soc_code] = field
self.fieldsets.append(FieldSet(
self, list(soc_codes), keyword))
def clean(self):
cleaned_data = super().clean()
if not any(cleaned_data.values()):
raise forms.ValidationError(
"Please select at least one job title",
code='invalid'
)
return cleaned_data
| lm-tools/situational | situational/apps/sectors/forms.py | Python | bsd-3-clause | 2,817 |
# -*- coding: utf-8 -*-
from flask import request
class View(object):
def __init__(self, core):
self.core = core
def __call__(self, *args, **kwargs):
method = request.method.lower()
handler = getattr(self, method, None)
if callable(handler):
return handler(request=request, *args, **kwargs)
else:
return "Bad Request", 403
class Application(object):
def __init__(self, core):
self.core = core
def get_urls(self):
"""
Returns a list of tuples: (route, View)
"""
return []
def get_plugins(self):
"""
Returns a list of plugin classes
"""
return []
| ojii/ircbotframework | ircbotframework/app.py | Python | bsd-3-clause | 719 |
from base import Phase
preparation = Phase('Preparation', 'Initializing connections, fetching data etc.')
volume_creation = Phase('Volume creation', 'Creating the volume to bootstrap onto')
volume_preparation = Phase('Volume preparation', 'Formatting the bootstrap volume')
volume_mounting = Phase('Volume mounting', 'Mounting bootstrap volume')
os_installation = Phase('OS installation', 'Installing the operating system')
package_installation = Phase('Package installation', 'Installing software')
system_modification = Phase('System modification', 'Modifying configuration files, adding resources, etc.')
system_cleaning = Phase('System cleaning', 'Removing sensitive data, temporary files and other leftovers')
volume_unmounting = Phase('Volume unmounting', 'Unmounting the bootstrap volume')
image_registration = Phase('Image registration', 'Uploading/Registering with the provider')
cleaning = Phase('Cleaning', 'Removing temporary files')
order = [preparation,
volume_creation,
volume_preparation,
volume_mounting,
os_installation,
package_installation,
system_modification,
system_cleaning,
volume_unmounting,
image_registration,
cleaning,
]
| brianspeir/Vanilla | vendor/bootstrap-vz/common/phases.py | Python | bsd-3-clause | 1,251 |
import toolz
from toolz.curried import take, first, second, sorted, merge_with
def test_take():
assert list(take(2)([1, 2, 3])) == [1, 2]
def test_first():
assert first is toolz.itertoolz.core.first
def test_merge_with():
assert merge_with(sum)({1: 1}, {1: 2}) == {1: 3}
def test_sorted():
assert sorted(key=second)([(1, 2), (2, 1)]) == [(2, 1), (1, 2)]
| obmarg/toolz | toolz/tests/test_curried.py | Python | bsd-3-clause | 378 |
#!/usr/bin/env python
from __future__ import print_function
import argparse
import os
import sys
import subprocess
import re
parser = argparse.ArgumentParser(description="""Extract version info from git and
generate a version header file. The working directory is expected to be
the root of Firmware.""")
parser.add_argument('filename', metavar='version.h', help='Header output file')
parser.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Verbose output', default=False)
parser.add_argument('--validate', dest='validate', action='store_true',
help='Validate the tag format', default=False)
args = parser.parse_args()
filename = args.filename
verbose = args.verbose
validate = args.validate
try:
fp_header = open(filename, 'r')
old_header = fp_header.read()
except:
old_header = ''
# Generate the header file content
header = """
/* Auto Magically Generated file */
/* Do not edit! */
#pragma once
"""
# PX4
git_tag = subprocess.check_output('git describe --always --tags --dirty'.split(),
stderr=subprocess.STDOUT).decode('utf-8').strip()
if validate:
if verbose:
print("testing git tag: "+git_tag)
# remove optional '-dirty' at the end
git_tag_test = re.sub(r'-dirty$', '', git_tag)
# remove optional -<num_commits>-g<commit_hash> at the end (in case we are not on a tagged commit)
git_tag_test = re.sub(r'-[0-9]+-g[0-9a-fA-F]+$', '', git_tag_test)
# now check the version format
m = re.match(r'v([0-9]+)\.([0-9]+)\.[0-9]+(((-dev)|(-alpha[0-9]+)|(-beta[0-9]+)|(-rc[0-9]+))|'\
r'(-[0-9]+\.[0-9]+\.[0-9]+((-dev)|(-alpha[0-9]+)|(-beta[0-9]+)|([-]?rc[0-9]+))?))?$', git_tag_test)
if m:
# format matches, check the major and minor numbers
major = int(m.group(1))
minor = int(m.group(2))
if major < 1 or (major == 1 and minor < 9):
print("")
print("Error: PX4 version too low, expected at least v1.9.0")
print("Check the git tag (current tag: '{:}')".format(git_tag_test))
print("")
sys.exit(1)
else:
print("")
print("Error: the git tag '{:}' does not match the expected format.".format(git_tag_test))
print("")
print("The expected format is 'v<PX4 version>[-<custom version>]'")
print(" <PX4 version>: v<major>.<minor>.<patch>[-rc<rc>|-beta<beta>|-alpha<alpha>|-dev]")
print(" <custom version>: <major>.<minor>.<patch>[-rc<rc>|-beta<beta>|-alpha<alpha>|-dev]")
print("Examples:")
print(" v1.9.0-rc3 (preferred)")
print(" v1.9.0-beta1")
print(" v1.9.0-1.0.0")
print(" v1.9.0-1.0.0-alpha2")
print("See also https://dev.px4.io/master/en/setup/building_px4.html#firmware_version")
print("")
sys.exit(1)
git_version = subprocess.check_output('git rev-parse --verify HEAD'.split(),
stderr=subprocess.STDOUT).decode('utf-8').strip()
try:
git_branch_name = subprocess.check_output('git symbolic-ref -q --short HEAD'.split(),
stderr=subprocess.STDOUT).decode('utf-8').strip()
except:
git_branch_name = ''
git_version_short = git_version[0:16]
header += """
#define PX4_GIT_VERSION_STR "{git_version}"
#define PX4_GIT_VERSION_BINARY 0x{git_version_short}
#define PX4_GIT_TAG_STR "{git_tag}"
#define PX4_GIT_BRANCH_NAME "{git_branch_name}"
""".format(git_tag=git_tag,
git_version=git_version,
git_version_short=git_version_short,
git_branch_name=git_branch_name)
# ECL
if (os.path.exists('src/lib/ecl/.git')):
ecl_git_tag = subprocess.check_output('git describe --always --tags --dirty'.split(),
cwd='src/lib/ecl', stderr=subprocess.STDOUT).decode('utf-8')
ecl_git_version = subprocess.check_output('git rev-parse --verify HEAD'.split(),
cwd='src/lib/ecl', stderr=subprocess.STDOUT).decode('utf-8').strip()
ecl_git_version_short = ecl_git_version[0:16]
header += """
#define ECL_LIB_GIT_VERSION_STR "{ecl_git_version}"
#define ECL_LIB_GIT_VERSION_BINARY 0x{ecl_git_version_short}
""".format(ecl_git_version=ecl_git_version,
ecl_git_version_short=ecl_git_version_short)
# Mavlink
if (os.path.exists('mavlink/include/mavlink/v2.0/.git')):
mavlink_git_version = subprocess.check_output('git rev-parse --verify HEAD'.split(),
cwd='mavlink/include/mavlink/v2.0', stderr=subprocess.STDOUT).decode('utf-8').strip()
mavlink_git_version_short = mavlink_git_version[0:16]
header += """
#define MAVLINK_LIB_GIT_VERSION_STR "{mavlink_git_version}"
#define MAVLINK_LIB_GIT_VERSION_BINARY 0x{mavlink_git_version_short}
""".format(mavlink_git_version=mavlink_git_version,
mavlink_git_version_short=mavlink_git_version_short)
# NuttX
if (os.path.exists('platforms/nuttx/NuttX/nuttx/.git')):
nuttx_git_tag = subprocess.check_output('git describe --always --tags --match nuttx-* --dirty'.split(),
cwd='platforms/nuttx/NuttX/nuttx', stderr=subprocess.STDOUT).decode('utf-8').strip().replace("nuttx-","v")
nuttx_git_tag = re.sub('-.*','.0',nuttx_git_tag)
nuttx_git_version = subprocess.check_output('git rev-parse --verify HEAD'.split(),
cwd='platforms/nuttx/NuttX/nuttx', stderr=subprocess.STDOUT).decode('utf-8').strip()
nuttx_git_version_short = nuttx_git_version[0:16]
header += """
#define NUTTX_GIT_VERSION_STR "{nuttx_git_version}"
#define NUTTX_GIT_VERSION_BINARY 0x{nuttx_git_version_short}
#define NUTTX_GIT_TAG_STR "{nuttx_git_tag}"
""".format(nuttx_git_version=nuttx_git_version,
nuttx_git_version_short=nuttx_git_version_short,
nuttx_git_tag=nuttx_git_tag)
if old_header != header:
if verbose:
print('Updating header {}'.format(filename))
fp_header = open(filename, 'w')
fp_header.write(header)
| krbeverx/Firmware | src/lib/version/px_update_git_header.py | Python | bsd-3-clause | 6,103 |
# pylint: disable-msg=W0614,W0401,W0611,W0622
__docformat__ = 'restructuredtext'
from datetime import datetime
import numpy as np
from pandas.version import version as __version__
from pandas.info import __doc__
from pandas.core.api import *
from pandas.io.parsers import parseCSV, parseText, parseExcel
from pandas.stats.api import *
from numpy.testing import Tester
class NoseWrapper(Tester):
'''
This is simply a monkey patch for numpy.testing.Tester, so that extra_argv can
be changed from its default None to ['--exe'] so that the tests can be run
the same across platforms.
'''
def test(self, label='fast', verbose=1, extra_argv=['--exe'], doctests=False,
coverage=False):
''' Run tests for module using nose
%(test_header)s
doctests : boolean
If True, run doctests in module, default False
coverage : boolean
If True, report coverage of NumPy code, default False
(Requires the coverage module:
http://nedbatchelder.com/code/modules/coverage.html)
'''
# cap verbosity at 3 because nose becomes *very* verbose beyond that
verbose = min(verbose, 3)
from numpy.testing import utils
utils.verbose = verbose
if doctests:
print "Running unit tests and doctests for %s" % self.package_name
else:
print "Running unit tests for %s" % self.package_name
self._show_system_info()
# reset doctest state on every run
import doctest
doctest.master = None
argv, plugins = self.prepare_test_args(label, verbose, extra_argv,
doctests, coverage)
from numpy.testing.noseclasses import NumpyTestProgram
t = NumpyTestProgram(argv=argv, exit=False, plugins=plugins)
return t.result
test = NoseWrapper().test
| willgrass/pandas | pandas/__init__.py | Python | bsd-3-clause | 1,913 |
__import__("pkg_resources").declare_namespace(__name__)
from infi.clickhouse_orm.database import *
from infi.clickhouse_orm.engines import *
from infi.clickhouse_orm.fields import *
from infi.clickhouse_orm.funcs import *
from infi.clickhouse_orm.migrations import *
from infi.clickhouse_orm.models import *
from infi.clickhouse_orm.query import *
from infi.clickhouse_orm.system_models import *
from inspect import isclass
__all__ = [c.__name__ for c in locals().values() if isclass(c)]
| Infinidat/infi.clickhouse_orm | src/infi/clickhouse_orm/__init__.py | Python | bsd-3-clause | 490 |
# -*- coding: utf-8 -*-
'''auto ordering call chain test mixins'''
from inspect import ismodule
from twoq.support import port
class ARandomQMixin(object):
def test_choice(self):
self.assertEqual(len(list(self.qclass(1, 2, 3, 4, 5, 6).choice())), 1)
def test_sample(self):
self.assertEqual(len(self.qclass(1, 2, 3, 4, 5, 6).sample(3).end()), 3)
def test_shuffle(self):
self.assertEqual(
len(self.qclass(1, 2, 3, 4, 5, 6).shuffle()),
len([5, 4, 6, 3, 1, 2]),
)
class ACombineQMixin(object):
# def test_combinations(self):
# foo = self.qclass('ABCD').combinations(2).value(),
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'C'), ('B', 'D'),
# ('C', 'D')],
# foo,
# )
#
# def test_permutations(self):
# foo = self.qclass('ABCD').permutations(2).value()
# self.assertEqual(
# foo[0],
# [('A', 'B'), ('A', 'C'), ('A', 'D'), ('B', 'A'), ('B', 'C'),
# ('B', 'D'), ('C', 'A'), ('C', 'B'), ('C', 'D'), ('D', 'A'),
# ('D', 'B'), ('D', 'C')],
# foo,
# )
def test_product(self):
foo = self.qclass('ABCD', 'xy').product().value()
self.assertEqual(
foo,
[('A', 'x'), ('A', 'y'), ('B', 'x'), ('B', 'y'), ('C', 'x'),
('C', 'y'), ('D', 'x'), ('D', 'y')],
foo,
)
class AOrderQMixin(ARandomQMixin, ACombineQMixin):
'''combination mixin'''
def test_group(self,):
from math import floor
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).tap(lambda x: floor(x)).group().end(),
[[1.0, [1.3]], [2.0, [2.1, 2.4]]]
)
self.assertEqual(
self.qclass(1.3, 2.1, 2.4).group().end(),
[[1.3, [1.3]], [2.1, [2.1]], [2.4, [2.4]]],
)
def test_grouper(self):
self.assertEqual(
self.qclass(
'moe', 'larry', 'curly', 30, 40, 50, True
).grouper(2, 'x').end(),
[('moe', 'larry'), ('curly', 30), (40, 50), (True, 'x')]
)
def test_reversed(self):
self.assertEqual(
self.qclass(5, 4, 3, 2, 1).reverse().end(), [1, 2, 3, 4, 5],
)
def test_sort(self):
from math import sin
self.assertEqual(
self.qclass(1, 2, 3, 4, 5, 6).tap(
lambda x: sin(x)
).sort().end(),
[5, 4, 6, 3, 1, 2],
)
self.assertEqual(
self.qclass(4, 6, 65, 3, 63, 2, 4).sort().end(),
[2, 3, 4, 4, 6, 63, 65],
)
__all__ = sorted(name for name, obj in port.items(locals()) if not any([
name.startswith('_'), ismodule(obj), name in ['ismodule', 'port']
]))
del ismodule
| lcrees/twoq | twoq/tests/auto/ordering.py | Python | bsd-3-clause | 2,838 |
#-------------------------------------------------------------------------------
#
# Copyright (c) 2005, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in enthought/LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
# Author: David C. Morrill
# Date: 10/18/2005
#
#-------------------------------------------------------------------------------
""" Pyface 'DockSizer' support.
This package implements the sizer associated with a Pyface DockWindow
component. The sizer manages the layout of the DockWindow child controls
and the notebook tabs and dragbars associated with the DockWindow.
"""
#-------------------------------------------------------------------------------
# Imports:
#-------------------------------------------------------------------------------
import wx, sys
from enthought.traits.api \
import HasPrivateTraits, Instance, Str, Int, List, Enum, Tuple, Any, \
Range, Property, Callable, Constant, Event, Undefined, Bool, \
cached_property
from enthought.traits.ui.dock_window_theme \
import dock_window_theme
from enthought.traits.ui.wx.helper \
import BufferDC
from enthought.pyface.api import SystemMetrics
from enthought.pyface.image_resource \
import ImageResource
from enthought.util.wx.drag_and_drop \
import PythonDropSource
from enthought.pyface.timer.api \
import do_later, do_after
from idockable \
import IDockable
from ifeature_tool \
import IFeatureTool
# Define version dependent values:
wx_26 = (wx.__version__[:3] == '2.6')
is_mac = (sys.platform == 'darwin')
#-------------------------------------------------------------------------------
# Constants:
#-------------------------------------------------------------------------------
# Standard font text height:
text_dy = 13
# Maximum allowed length of a tab label:
MaxTabLength = 30
# Size of a drag bar (in pixels):
DragBarSize = 14
# Images sizes (in pixels):
CloseTabSize = 10
CloseDragSize = 7
# Tab drawing states:
TabInactive = 0
TabActive = 1
TabHover = 2
NormalStates = ( TabInactive, TabActive )
NotActiveStates = ( TabInactive, TabHover )
# Feature overlay colors:
FeatureBrushColor = ( 255, 255, 255 )
FeaturePenColor = ( 92, 92, 92 )
# Color used to update the screen while dragging a splitter bar:
DragColor = ( 96, 96, 96 )
# Color used to update the screen while showing a docking operation in progress:
DockColorBrush = ( 255, 0, 0, 96 )
# Drop Info kinds:
DOCK_TOP = 0
DOCK_BOTTOM = 1
DOCK_LEFT = 2
DOCK_RIGHT = 3
DOCK_TAB = 4
DOCK_TABADD = 5
DOCK_BAR = 6
DOCK_NONE = 7
DOCK_SPLITTER = 8
DOCK_EXPORT = 9
# Splitter states:
SPLIT_VLEFT = 0
SPLIT_VMIDDLE = 1
SPLIT_VRIGHT = 2
SPLIT_HTOP = 3
SPLIT_HMIDDLE = 4
SPLIT_HBOTTOM = 5
# Empty clipping area:
no_clip = ( 0, 0, 0, 0 )
# Valid sequence types:
SequenceType = ( list, tuple )
# Tab scrolling directions:
SCROLL_LEFT = 1
SCROLL_RIGHT = 2
SCROLL_TO = 3
# Feature modes:
FEATURE_NONE = -1 # Has no features
FEATURE_NORMAL = 0 # Has normal features
FEATURE_CHANGED = 1 # Has changed or new features
FEATURE_DROP = 2 # Has drag data compatible drop features
FEATURE_DISABLED = 3 # Has feature icon, but is currently disabled
FEATURE_VISIBLE = 4 # Has visible features (mouseover mode)
FEATURE_DROP_VISIBLE = 5 # Has visible drop features (mouseover mode)
FEATURE_PRE_NORMAL = 6 # Has normal features (but has not been drawn yet)
FEATURE_EXTERNAL_DRAG = 256 # A drag started in another DockWindow is active
# Feature sets:
NO_FEATURE_ICON = ( FEATURE_NONE, FEATURE_DISABLED, FEATURE_VISIBLE,
FEATURE_DROP_VISIBLE )
FEATURES_VISIBLE = ( FEATURE_VISIBLE, FEATURE_DROP_VISIBLE )
FEATURE_END_DROP = ( FEATURE_DROP, FEATURE_VISIBLE, FEATURE_DROP_VISIBLE )
NORMAL_FEATURES = ( FEATURE_NORMAL, FEATURE_DISABLED )
#-------------------------------------------------------------------------------
# Global data:
#-------------------------------------------------------------------------------
# Standard font used by the DockWindow:
standard_font = None
# The list of available DockWindowFeatures:
features = []
#-------------------------------------------------------------------------------
# Trait definitions:
#-------------------------------------------------------------------------------
# Bounds (i.e. x, y, dx, dy):
Bounds = Tuple( Int, Int, Int, Int )
# Docking drag bar style:
DockStyle = Enum( 'horizontal', 'vertical', 'tab', 'fixed' )
#-------------------------------------------------------------------------------
# Adds a new DockWindowFeature class to the list of available features:
#-------------------------------------------------------------------------------
def add_feature ( feature_class ):
""" Adds a new DockWindowFeature class to the list of available features.
"""
global features
result = (feature_class not in features)
if result:
features.append( feature_class )
# Mark the feature class as having been installed:
if feature_class.state == 0:
feature_class.state = 1
return result
#-------------------------------------------------------------------------------
# Sets the standard font to use for a specified device context:
#-------------------------------------------------------------------------------
def set_standard_font ( dc ):
""" Sets the standard font to use for a specified device context.
"""
global standard_font
if standard_font is None:
standard_font = wx.SystemSettings_GetFont( wx.SYS_DEFAULT_GUI_FONT )
dc.SetFont( standard_font )
return dc
#-------------------------------------------------------------------------------
# Clears a window to the standard background color:
#-------------------------------------------------------------------------------
def clear_window ( window ):
""" Clears a window to the standard background color.
"""
bg_color = SystemMetrics().dialog_background_color
bg_color = wx.Colour(bg_color[0]*255, bg_color[1]*255, bg_color[2]*255)
dx, dy = window.GetSizeTuple()
dc = wx.PaintDC( window )
dc.SetBrush( wx.Brush( bg_color, wx.SOLID ) )
dc.SetPen( wx.TRANSPARENT_PEN )
dc.DrawRectangle( 0, 0, dx, dy )
#-------------------------------------------------------------------------------
# Gets a temporary device context for a specified window to draw in:
#-------------------------------------------------------------------------------
def get_dc ( window ):
""" Gets a temporary device context for a specified window to draw in.
"""
if is_mac:
dc = wx.ClientDC( window )
x, y = window.GetPositionTuple()
dx, dy = window.GetSizeTuple()
while True:
window = window.GetParent()
if window is None:
break
xw, yw = window.GetPositionTuple()
dxw, dyw = window.GetSizeTuple()
dx, dy = min( dx, dxw - x ), min( dy, dyw - y )
x += xw
y += yw
dc.SetClippingRegion( 0, 0, dx, dy )
return ( dc, 0, 0 )
x, y = window.ClientToScreenXY( 0, 0 )
return ( wx.ScreenDC(), x, y )
#-------------------------------------------------------------------------------
# 'DockImages' class:
#-------------------------------------------------------------------------------
class DockImages ( HasPrivateTraits ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# Image for closing a tab:
close_tab = Instance( ImageResource, ImageResource( 'close_tab' ) )
# Image for closing a drag bar:
close_drag = Instance( ImageResource, ImageResource( 'close_drag' ) )
#---------------------------------------------------------------------------
# Initalizes the object:
#---------------------------------------------------------------------------
def __init__ ( self, **traits ):
""" Initializes the object.
"""
super( DockImages, self ).__init__( **traits )
self._lazy_init_done = False
def init ( self ):
""" Initializes the parts of the object that depend on the toolkit
selection.
"""
# See if it has already been done.
if self._lazy_init_done:
return
self._lazy_init_done = True
self._close_tab = self.close_tab.create_image().ConvertToBitmap()
self._close_drag = self.close_drag.create_image().ConvertToBitmap()
self._splitter_images = [
ImageResource( name ).create_image().ConvertToBitmap()
for name in [ 'sv_left', 'sv_middle', 'sv_right',
'sh_top', 'sh_middle', 'sh_bottom' ]
]
self._tab_scroller_images = [
ImageResource( name ).create_image().ConvertToBitmap()
for name in [ 'tab_scroll_l', 'tab_scroll_r', 'tab_scroll_lr' ]
]
self._tab_scroller_dx = self._tab_scroller_images[0].GetWidth()
self._tab_scroller_dy = self._tab_scroller_images[0].GetHeight()
self._feature_images = [
ImageResource( name ).create_image().ConvertToBitmap()
for name in [ 'tab_feature_normal', 'tab_feature_changed',
'tab_feature_drop', 'tab_feature_disabled',
'bar_feature_normal', 'bar_feature_changed',
'bar_feature_drop', 'bar_feature_disabled' ]
]
self._tab_feature_width = self._feature_images[0].GetWidth()
self._tab_feature_height = self._feature_images[0].GetHeight()
self._bar_feature_width = self._feature_images[3].GetWidth()
self._bar_feature_height = self._feature_images[3].GetHeight()
#---------------------------------------------------------------------------
# Returns the splitter image to use for a specified splitter state:
#---------------------------------------------------------------------------
def get_splitter_image ( self, state ):
""" Returns the splitter image to use for a specified splitter state.
"""
return self._splitter_images[ state ]
#---------------------------------------------------------------------------
# Returns the feature image to use for a specified feature state:
#---------------------------------------------------------------------------
def get_feature_image ( self, state, is_tab = True ):
""" Returns the feature image to use for a specified feature state.
"""
if is_tab:
return self._feature_images[ state ]
return self._feature_images[ state + 3 ]
# Creates a singleton instance of the class:
DockImages = DockImages()
#-------------------------------------------------------------------------------
# 'DockItem' class:
#-------------------------------------------------------------------------------
class DockItem ( HasPrivateTraits ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The parent of this item:
parent = Any
# The DockWindow that owns this item:
owner = Property( depends_on = 'parent' )
# Bounds of the item:
bounds = Bounds
# Current width of the item:
width = Int( -1 )
# Current height of the item:
height = Int( -1 )
# Bounds of the item's drag bar or tab:
drag_bounds = Bounds
# The current tab state:
tab_state = Any
# The tab displayable version of the control's UI name:
tab_name = Property( depends_on = 'name' )
# Width of the item's tab:
tab_width = Property( depends_on = 'control, tab_state, tab_name' )
# The DockWindowTheme for this item's DockWindow:
theme = Property
# The theme for the current tab state:
tab_theme = Property
# The current feature mode:
feature_mode = Enum( FEATURE_NONE, FEATURE_NORMAL, FEATURE_CHANGED,
FEATURE_DROP, FEATURE_VISIBLE, FEATURE_DROP_VISIBLE,
FEATURE_DISABLED, FEATURE_PRE_NORMAL )
# The position where the feature popup should appear:
feature_popup_position = Property
# The list of features for this item:
features = List
# The list of drag data compatible drop features for this item:
drop_features = List
# Current active set of features:
active_features = Property
# The name of this item (implemented in subclasses):
# name = Str
# The control associated with this item (implemented in subclasses):
# control = Instance( wx.Control )
#---------------------------------------------------------------------------
# Implementation of the 'owner' property:
#---------------------------------------------------------------------------
def __init__(self, **kw):
super(DockItem, self).__init__(**kw)
@cached_property
def _get_owner ( self ):
if self.parent is None:
return None
return self.parent.owner
#---------------------------------------------------------------------------
# Implementation of the 'tab_name' property:
#---------------------------------------------------------------------------
@cached_property
def _get_tab_name ( self ):
name = self.name
if len( name ) > MaxTabLength:
name = '%s...%s' % ( name[ : MaxTabLength - 23 ], name[ -20: ] )
return name
#---------------------------------------------------------------------------
# Implementation of the 'tab_width' property:
#---------------------------------------------------------------------------
@cached_property
def _get_tab_width ( self ):
if self.control is None:
return 0
self._is_tab = True
# Calculate the size needed by the theme and margins:
theme = self.tab_theme
tw = (theme.image_slice.xleft + theme.image_slice.xright +
theme.content.left + theme.content.right)
# Add feature marker width:
if self.feature_mode != FEATURE_NONE:
tw += DockImages._tab_feature_width + 3
# Add text width:
dc = set_standard_font( wx.ClientDC( self.control ) )
tw += dc.GetTextExtent( self.tab_name )[0]
# Add custom image width:
image = self.get_image()
if image is not None:
tw += (image.GetWidth() + 3)
# Add close button width:
if self.closeable:
tw += (CloseTabSize + 6)
# Return the computed width:
return tw
#---------------------------------------------------------------------------
# Implementation of the 'theme' property:
#---------------------------------------------------------------------------
def _get_theme ( self ):
if self.control is None:
return dock_window_theme()
return self.control.GetParent().owner.theme
#---------------------------------------------------------------------------
# Implementation of the 'tab_theme' property:
#---------------------------------------------------------------------------
def _get_tab_theme ( self ):
if self.tab_state == TabInactive:
return self.theme.tab_inactive
if self.tab_state == TabActive:
return self.theme.tab_active
return self.theme.tab_hover
#---------------------------------------------------------------------------
# Implementation of the 'active_features' property:
#---------------------------------------------------------------------------
def _get_active_features ( self ):
if len( self.drop_features ) > 0:
return self.drop_features
return self.features
#---------------------------------------------------------------------------
# Implementation of the 'feature_popup_position' property:
#---------------------------------------------------------------------------
def _get_feature_popup_position ( self ):
x, y, dx, dy = self.drag_bounds
return wx.Point( x + 5, y + 3 )
#---------------------------------------------------------------------------
# Returns whether or not the item is at a specified window position:
#---------------------------------------------------------------------------
def is_at ( self, x, y, bounds = None ):
""" Returns whether or not the item is at a specified window position.
"""
if bounds is None:
bounds = self.bounds
bx, by, bdx, bdy = bounds
return ((bx <= x < (bx + bdx)) and (by <= y < (by + bdy)))
#---------------------------------------------------------------------------
# Returns whether or not an event is within a specified bounds:
#---------------------------------------------------------------------------
def is_in ( self, event, x, y, dx, dy ):
""" Returns whether or not an event is within a specified bounds.
"""
return ((x <= event.GetX() < (x + dx)) and
(y <= event.GetY() < (y + dy)))
#---------------------------------------------------------------------------
# Sets the control's drag bounds:
#---------------------------------------------------------------------------
def set_drag_bounds ( self, x, y, dx, dy ):
""" Sets the control's drag bounds.
"""
bx, by, bdx, bdy = self.bounds
self.drag_bounds = ( x, y, min( x + dx, bx + bdx ) - x, dy )
#---------------------------------------------------------------------------
# Gets the cursor to use when the mouse is over the item:
#---------------------------------------------------------------------------
def get_cursor ( self, event ):
""" Gets the cursor to use when the mouse is over the item.
"""
if self._is_tab and (not self._is_in_close( event )):
return wx.CURSOR_ARROW
return wx.CURSOR_HAND
#---------------------------------------------------------------------------
# Gets the DockInfo object for a specified window position:
#---------------------------------------------------------------------------
def dock_info_at ( self, x, y, tdx, is_control ):
""" Gets the DockInfo object for a specified window position.
"""
if self.is_at( x, y, self.drag_bounds ):
x, y, dx, dy = self.drag_bounds
control = self
if self._is_tab:
if is_control:
kind = DOCK_TABADD
tab_bounds = ( x, y, dx, dy )
else:
kind = DOCK_TAB
tab_bounds = ( x - (tdx / 2), y, tdx, dy )
else:
if is_control:
kind = DOCK_TABADD
tab_bounds = ( x, y, self.tab_width, dy )
else:
kind = DOCK_TAB
control = None
tab_bounds = ( x + self.tab_width, y, tdx, dy )
return DockInfo( kind = kind,
tab_bounds = tab_bounds,
region = self.parent,
control = control )
return None
#---------------------------------------------------------------------------
# Prepares for drawing into a device context:
#---------------------------------------------------------------------------
def begin_draw ( self, dc, ox = 0, oy = 0 ):
""" Prepares for drawing into a device context.
"""
self._save_clip = dc.GetClippingBox()
x, y, dx, dy = self.bounds
dc.SetClippingRegion( x + ox, y + oy, dx, dy )
#---------------------------------------------------------------------------
# Terminates drawing into a device context:
#---------------------------------------------------------------------------
def end_draw ( self, dc ):
""" Terminates drawing into a device context.
"""
dc.DestroyClippingRegion()
if self._save_clip != no_clip:
dc.SetClippingRegion( *self._save_clip )
self._save_clip = None
#---------------------------------------------------------------------------
# Handles the left mouse button being pressed:
#---------------------------------------------------------------------------
def mouse_down ( self, event ):
""" Handles the left mouse button being pressed.
"""
self._xy = ( event.GetX(), event.GetY() )
self._closing = self._is_in_close( event )
self._dragging = False
#---------------------------------------------------------------------------
# Handles the left mouse button being released:
#---------------------------------------------------------------------------
def mouse_up ( self, event ):
""" Handles the left mouse button being released.
"""
# Handle the user closing a control:
if self._closing:
if self._is_in_close( event ):
self.close()
# Handle the completion of a dragging operation:
elif self._dragging:
window = event.GetEventObject()
dock_info, self._dock_info = self._dock_info, None
self.mark_bounds( False )
control = self
# Check to see if the user is attempting to drag an entire notebook
# region:
if event.AltDown():
control = self.parent
# If the parent is not a notebook, then use the parent's parent:
if (isinstance( control, DockRegion ) and
(not control.is_notebook)):
control = control.parent
# Make sure the target is not contained within the notebook
# group we are trying to move:
region = dock_info.region
while region is not None:
if region is control:
# If it is, the operation is invalid, abort:
return
region = region.parent
# Check to see if the user is attempting to copy the control:
elif event.ControlDown():
owner = window.owner
control = owner.handler.dock_control_for(
*(owner.handler_args + ( window, control )) )
# Complete the docking maneuver:
dock_info.dock( control, window )
# Handle the user clicking on a notebook tab to select it:
elif (self._is_tab and
self.is_at( event.GetX(), event.GetY(), self.drag_bounds )):
self.parent.tab_clicked( self )
#---------------------------------------------------------------------------
# Handles the mouse moving while the left mouse button is pressed:
#---------------------------------------------------------------------------
def mouse_move ( self, event ):
""" Handles the mouse moving while the left mouse button is pressed.
"""
# Exit if control is 'fixed' or a 'close' is pending:
if self._closing or self.locked or (self.style == 'fixed'):
return
window = event.GetEventObject()
# Check to see if we are in 'drag mode' yet:
if not self._dragging:
x, y = self._xy
if (abs( x - event.GetX() ) + abs( y - event.GetY() )) < 3:
return
self._dragging = True
self._dock_info = no_dock_info
self._dock_size = self.tab_width
self.mark_bounds( True )
# Get the window and DockInfo object associated with the event:
cur_dock_info = self._dock_info
self._dock_info = dock_info = \
window.GetSizer().DockInfoAt( event.GetX(), event.GetY(),
self._dock_size, event.ShiftDown() )
# If the DockInfo has not changed, then no update is needed:
if ((cur_dock_info.kind == dock_info.kind) and
(cur_dock_info.region is dock_info.region) and
(cur_dock_info.bounds == dock_info.bounds) and
(cur_dock_info.tab_bounds == dock_info.tab_bounds)):
return
# Make sure the new DockInfo is legal:
region = self.parent
if ((not event.ControlDown()) and
(dock_info.region is region) and
((len( region.contents ) <= 1) or
(DOCK_TAB <= dock_info.kind <= DOCK_BAR) and
(dock_info.control is self))):
self._dock_info = no_dock_info
window.owner.set_cursor( wx.CURSOR_SIZING )
return
# Draw the new region:
dock_info.draw( window, self._drag_bitmap )
# If this is the start of an export (i.e. drag and drop) request:
if ((dock_info.kind == DOCK_EXPORT) and
(self.export != '') and
(self.dockable is not None)):
# Begin the drag and drop operation:
self.mark_bounds( False )
window.owner.set_cursor( wx.CURSOR_ARROW )
window.owner.release_mouse()
try:
window._dragging = True
if (PythonDropSource( window, self ).result in
( wx.DragNone, wx.DragCancel )):
window.owner.handler.open_view_for( self )
finally:
window._dragging = False
else:
# Update the mouse pointer as required:
cursor = wx.CURSOR_SIZING
if dock_info.kind == DOCK_BAR:
cursor = wx.CURSOR_HAND
window.owner.set_cursor( cursor )
#---------------------------------------------------------------------------
# Handles the mouse hovering over the item:
#---------------------------------------------------------------------------
def hover_enter ( self, event ):
""" Handles the mouse hovering over the item.
"""
if self._is_tab and (self.tab_state != TabActive):
self._redraw_tab( TabHover )
#---------------------------------------------------------------------------
# Handles the mouse exiting from hovering over the item:
#---------------------------------------------------------------------------
def hover_exit ( self, event ):
""" Handles the mouse exiting from hovering over the item.
"""
if self._is_tab and (self.tab_state != TabActive):
self._redraw_tab( TabInactive )
#---------------------------------------------------------------------------
# Marks/Unmarks the bounds of the bounding DockWindow:
#---------------------------------------------------------------------------
def mark_bounds ( self, begin ):
""" Marks/Unmarks the bounds of the bounding DockWindow.
"""
window = self.control.GetParent()
if begin:
dc, x, y = get_dc( window )
dx, dy = window.GetSize()
dc2 = wx.MemoryDC()
self._drag_bitmap = wx.EmptyBitmap( dx, dy )
dc2.SelectObject( self._drag_bitmap )
dc2.Blit( 0, 0, dx, dy, dc, x, y )
try:
dc3 = wx.GCDC( dc2 )
dc3.SetBrush( wx.Brush( wx.Colour( 158, 166, 255, 64 ) ) )
dc3.SetPen( wx.TRANSPARENT_PEN )
dc3.DrawRectangle( 0, 0, dx, dy )
except AttributeError:
pass
dc.Blit( x, y, dx, dy, dc2, 0, 0 )
else:
self._drag_bitmap = None
if is_mac:
top_level_window_for( window ).Refresh()
else:
window.Refresh()
def get_bg_color(self):
""" Gets the background color
"""
color = SystemMetrics().dialog_background_color
return wx.Colour( color[0]*255, color[1]*255, color[2]*255 )
#---------------------------------------------------------------------------
# Fills a specified region with the control's background color:
#---------------------------------------------------------------------------
def fill_bg_color ( self, dc, x, y, dx, dy ):
""" Fills a specified region with the control's background color.
"""
dc.SetPen( wx.TRANSPARENT_PEN )
dc.SetBrush( wx.Brush( self.get_bg_color() ) )
dc.DrawRectangle( x, y, dx, dy )
#---------------------------------------------------------------------------
# Draws a notebook tab:
#---------------------------------------------------------------------------
def draw_tab ( self, dc, state ):
global text_dy
""" Draws a notebook tab.
"""
x0, y0, dx, dy = self.drag_bounds
tab_color = self.get_bg_color()
if state == TabActive:
pass
elif state == TabInactive:
r,g,b = tab_color.Get()
tab_color.Set(max(0, r-20), max(0, g-20), max(0, b-20))
else:
r,g,b = tab_color.Get()
tab_color.Set(min(255, r+20), min(255, g+20), min(255, b+20))
self._is_tab = True
self.tab_state = state
theme = self.tab_theme
slice = theme.image_slice
bdc = BufferDC( dc, dx, dy )
self.fill_bg_color(bdc, 0, 0, dx, dy)
if state == TabActive:
# fill the tab bg with the desired color
brush = wx.Brush(tab_color)
bdc.SetBrush(brush)
bdc.SetPen(wx.TRANSPARENT_PEN)
bdc.DrawRectangle(0, 0, dx, dy)
# Draw the left, top, and right side of a rectange around the tab
pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW))
bdc.SetPen(pen)
bdc.DrawLine(0,dy,0,0) #up
bdc.DrawLine(0,0,dx,0) #right
bdc.DrawLine(dx-1,0,dx-1,dy) #down
pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
bdc.SetPen(pen)
bdc.DrawLine(1,dy,1,1)
bdc.DrawLine(1,1,dx-2,1)
bdc.DrawLine(dx-2,1,dx-2,dy)
else:
# fill the tab bg with the desired color
brush = wx.Brush(tab_color)
bdc.SetBrush(brush)
bdc.SetPen(wx.TRANSPARENT_PEN)
bdc.DrawRectangle(0, 3, dx, dy)
# Draw the left, top, and right side of a rectange around the tab
pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW))
bdc.SetPen(pen)
bdc.DrawLine(0,dy,0,3)
bdc.DrawLine(0,3,dx-1,3)
bdc.DrawLine(dx-1,3,dx-1,dy)
# Compute the initial drawing position:
name = self.tab_name
tdx, text_dy = dc.GetTextExtent( name )
tc = theme.content
ox, oy = theme.label.left, theme.label.top
y = (oy + ((dy + slice.xtop + tc.top - slice.xbottom - tc.bottom -
text_dy) / 2))
x = ox + slice.xleft + tc.left
mode = self.feature_mode
if mode == FEATURE_PRE_NORMAL:
mode = self.set_feature_mode( False )
# Draw the feature 'trigger' icon (if necessary):
if mode != FEATURE_NONE:
if mode not in FEATURES_VISIBLE:
bdc.DrawBitmap( DockImages.get_feature_image( mode ), x, y,
True )
x += (DockImages._tab_feature_width + 3)
# Draw the image (if necessary):
image = self.get_image()
if image is not None:
bdc.DrawBitmap( image, x, y, True )
x += (image.GetWidth() + 3)
# Draw the text label:
bdc.DrawText( name, x, y + 1 )
# Draw the close button (if necessary):
if self.closeable:
bdc.DrawBitmap( DockImages._close_tab, x + tdx + 5, y + 2, True )
# Copy the buffer to the display:
bdc.copy( x0, y0 )
#---------------------------------------------------------------------------
# Draws a fixed drag bar:
#---------------------------------------------------------------------------
def draw_fixed ( self, dc ):
""" Draws a fixed drag bar.
"""
pass
#---------------------------------------------------------------------------
# Draws a horizontal drag bar:
#---------------------------------------------------------------------------
def draw_horizontal ( self, dc ):
""" Draws a horizontal drag bar.
"""
self._is_tab = False
x, y, dx, dy = self.drag_bounds
self.fill_bg_color( dc, x, y, dx, dy )
pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNHILIGHT))
dc.SetPen(pen)
dc.DrawLine(x, y, x+dx, y)
dc.DrawLine(x, y+2, x+dx, y+2)
#---------------------------------------------------------------------------
# Draws a vertical drag bar:
#---------------------------------------------------------------------------
def draw_vertical ( self, dc ):
""" Draws a vertical drag bar.
"""
self._is_tab = False
x, y, dx, dy = self.drag_bounds
self.fill_bg_color( dc, x, y, dx, dy )
pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNHILIGHT))
dc.SetPen(pen)
dc.DrawLine(x, y, x, y+dy)
dc.DrawLine(x+2, y, x+2, y+dy)
#---------------------------------------------------------------------------
# Redraws the control's tab:
#---------------------------------------------------------------------------
def _redraw_tab ( self, state = None ):
if state is None:
state = self.tab_state
region = self.parent
if region is not None:
dc = set_standard_font( wx.ClientDC( self.control.GetParent() ) )
if region.is_notebook:
dc.SetClippingRegion( *region._tab_clip_bounds )
self.draw_tab( dc, state )
dc.DestroyClippingRegion()
else:
self.draw_tab( dc, state )
#---------------------------------------------------------------------------
# Redraws the control's drag bar:
#---------------------------------------------------------------------------
def _redraw_bar ( self ):
dc = wx.ClientDC( self.control )
getattr( self, 'draw_' + self.style )( dc )
#---------------------------------------------------------------------------
# Redraws the control's tab or bar:
#---------------------------------------------------------------------------
def _redraw_control ( self ):
if self._is_tab:
self._redraw_tab()
else:
self._redraw_bar()
#---------------------------------------------------------------------------
# Returns the bounds of the close button (if any):
#---------------------------------------------------------------------------
def _close_bounds ( self ):
global text_dy
if self.closeable and self._is_tab:
x, y, dx, dy = self.drag_bounds
theme = self.tab_theme
slice = theme.image_slice
tc = theme.content
ox, oy = theme.label.left, theme.label.top
# fixme: x calculation seems to be off by -1...
return ( x + dx + ox - slice.xright - tc.right - CloseTabSize,
y + oy + ((dy + slice.xtop + tc.top - slice.xbottom -
tc.bottom - text_dy) / 2) + 3,
CloseTabSize, CloseTabSize )
return ( 0, 0, 0, 0 )
#---------------------------------------------------------------------------
# Returns whether a specified window position is over the close button:
#---------------------------------------------------------------------------
def _is_in_close ( self, event ):
return self.is_in( event, *self._close_bounds() )
#---------------------------------------------------------------------------
# Sets/Returns the 'normal' feature mode for the control based on the
# number of currently active features:
#---------------------------------------------------------------------------
def set_feature_mode ( self, changed = True ):
if (not changed) or (self.feature_mode != FEATURE_PRE_NORMAL):
mode = FEATURE_DROP
features = self.drop_features
if len( features ) == 0:
mode = FEATURE_NORMAL
features = self.features
for feature in features:
if feature.bitmap is not None:
if changed:
self.feature_mode = FEATURE_CHANGED
else:
self.feature_mode = mode
break
else:
self.feature_mode = FEATURE_DISABLED
return self.feature_mode
#---------------------------------------------------------------------------
# Returns whether or not a specified window position is over the feature
# 'trigger' icon, and if so, triggers display of the feature icons:
#---------------------------------------------------------------------------
def feature_activate ( self, event, drag_object = Undefined ):
global text_dy
if (self.feature_mode in NO_FEATURE_ICON) or (not self._is_tab):
return False
# In 'drag' mode, we may get the same coordinate over and over again.
# We don't want to restart the timer, so exit now:
exy = ( event.GetX(), event.GetY() )
if self._feature_popup_xy == exy:
return True
x, y, dx, dy = self.drag_bounds
idx = DockImages._tab_feature_width
idy = DockImages._tab_feature_height
theme = self.tab_theme
slice = theme.image_slice
tc = theme.content
ox, oy = theme.label.left, theme.label.top
y += (oy + ((dy + slice.xtop + tc.top - slice.xbottom - tc.bottom -
text_dy) / 2))
x += ox + slice.xleft + tc.left
result = self.is_in( event, x, y, idx, idy )
# If the pointer is over the feature 'trigger' icon, save the event for
# the popup processing:
if result:
# If this is part of a drag operation, prepare for drag mode:
if drag_object is not Undefined:
self.pre_drag( drag_object, FEATURE_EXTERNAL_DRAG )
# Schedule the popup for later:
self._feature_popup_xy = exy
do_after( 100, self._feature_popup )
return result
#---------------------------------------------------------------------------
# Resets any pending feature popup:
#---------------------------------------------------------------------------
def reset_feature_popup ( self ):
self._feature_popup_xy = None
#---------------------------------------------------------------------------
# Pops up the current features if a feature popup is still pending:
#---------------------------------------------------------------------------
def _feature_popup ( self ):
if self._feature_popup_xy is not None:
# Set the new feature mode:
if self.feature_mode == FEATURE_DROP:
self.feature_mode = FEATURE_DROP_VISIBLE
else:
self.feature_mode = FEATURE_VISIBLE
self.owner.feature_bar_popup( self )
self._feature_popup_xy = None
else:
self.post_drag( FEATURE_EXTERNAL_DRAG )
#---------------------------------------------------------------------------
# Finishes the processing of a feature popup:
#---------------------------------------------------------------------------
def feature_bar_closed ( self ):
if self.feature_mode == FEATURE_DROP_VISIBLE:
self.feature_mode = FEATURE_DROP
else:
self.feature_mode = FEATURE_NORMAL
do_later( self._redraw_control )
#---------------------------------------------------------------------------
# Handles all pre-processing before a feature is dragged:
#---------------------------------------------------------------------------
def pre_drag_all ( self, object ):
""" Prepare all DockControls in the associated DockWindow for being
dragged over.
"""
for control in self.dock_controls:
control.pre_drag( object )
self.pre_drag( object )
def pre_drag ( self, object, tag = 0 ):
""" Prepare this DockControl for being dragged over.
"""
if (self.visible and
(self.feature_mode != FEATURE_NONE) and
(self._feature_mode is None)):
if isinstance( object, IFeatureTool ):
if (object.feature_can_drop_on( self.object ) or
object.feature_can_drop_on_dock_control( self )):
from feature_tool import FeatureTool
self.drop_features = [
FeatureTool( dock_control = self ) ]
else:
self.drop_features = [ f for f in self.features
if f.can_drop( object ) and
(f.bitmap is not None) ]
self._feature_mode = self.feature_mode + tag
if len( self.drop_features ) > 0:
self.feature_mode = FEATURE_DROP
else:
self.feature_mode = FEATURE_DISABLED
self._redraw_control()
#---------------------------------------------------------------------------
# Handles all post-processing after a feature has been dragged:
#---------------------------------------------------------------------------
def post_drag_all ( self ):
""" Restore all DockControls in the associated DockWindow after a drag
operation is completed.
"""
for control in self.dock_controls:
control.post_drag()
self.post_drag()
def post_drag ( self, tag = 0 ):
""" Restore this DockControl after a drag operation is completed.
"""
if ((self._feature_mode is None) or (tag == 0) or
((self._feature_mode & tag) != 0)):
self.drop_features = []
if self.feature_mode != FEATURE_NONE:
if self._feature_mode is not None:
self.feature_mode = self._feature_mode & (~tag)
self._feature_mode = None
else:
self.set_feature_mode( False )
self._redraw_control()
#-------------------------------------------------------------------------------
# 'DockSplitter' class:
#-------------------------------------------------------------------------------
class DockSplitter ( DockItem ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# Style of the splitter bar:
style = Enum( 'horizontal', 'vertical' )
# Index of the splitter within its parent:
index = Int
# Current state of the splitter (i.e. its position relative to the things
# it splits):
state = Property
#---------------------------------------------------------------------------
# Override the definition of the inherited 'theme' property:
#---------------------------------------------------------------------------
def _get_theme ( self ):
return self.parent.control.GetParent().owner.theme
#---------------------------------------------------------------------------
# Draws the contents of the splitter:
#---------------------------------------------------------------------------
def draw ( self, dc ):
""" Draws the contents of the splitter.
"""
if (self._live_drag is False) and (self._first_bounds is not None):
x, y, dx, dy = self._first_bounds
else:
x, y, dx, dy = self.bounds
image = DockImages.get_splitter_image( self.state )
idx, idy = image.GetWidth(), image.GetHeight()
self.fill_bg_color( dc, x, y, dx, dy )
if self.style == 'horizontal':
# Draw a line the same color as the system button shadow, which
# should be a darkish color in the users color scheme
pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW))
dc.SetPen(pen)
dc.DrawLine(x+idx+1,y+dy/2,x+dx-2,y+dy/2)
iy = y+2
ix = x
# sets the hittable area for changing the cursor to be the height of
# the image
dx = idx
else:
# Draw a line the same color as the system button shadow, which
# should be a darkish color in the users color scheme
pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW))
dc.SetPen(pen)
dc.DrawLine(x+dx/2,y+idy+1,x+dx/2,y+dy-2)
iy = y
ix = x + 2
# sets the hittable area for changing the cursor to be the width of
# the image
dy = idy
dc.DrawBitmap( image, ix, iy, True )
self._hot_spot = ( x, y, dx, dy )
#---------------------------------------------------------------------------
# Gets the cursor to use when the mouse is over the splitter bar:
#---------------------------------------------------------------------------
def get_cursor ( self, event ):
""" Gets the cursor to use when the mouse is over the splitter bar.
"""
if (self._hot_spot is None) or self.is_in( event, *self._hot_spot ):
return wx.CURSOR_ARROW
if self.style == 'horizontal':
return wx.CURSOR_SIZENS
return wx.CURSOR_SIZEWE
#---------------------------------------------------------------------------
# Returns a copy of the splitter 'structure', minus the actual content:
#---------------------------------------------------------------------------
def get_structure ( self ):
""" Returns a copy of the splitter 'structure', minus the actual
content.
"""
return self.clone_traits( [ '_last_bounds' ] )
#---------------------------------------------------------------------------
# Handles the left mouse button being pressed:
#---------------------------------------------------------------------------
def mouse_down ( self, event ):
""" Handles the left mouse button being pressed.
"""
self._live_drag = event.ControlDown()
self._click_pending = ((self._hot_spot is not None) and
self.is_in( event, *self._hot_spot ))
if not self._click_pending:
self._xy = ( event.GetX(), event.GetY() )
self._max_bounds = self.parent.get_splitter_bounds( self )
self._first_bounds = self.bounds
if not self._live_drag:
self._draw_bounds( event, self.bounds )
#---------------------------------------------------------------------------
# Handles the left mouse button being released:
#---------------------------------------------------------------------------
def mouse_up ( self, event ):
""" Handles the left mouse button being released.
"""
if self._click_pending:
hx, hy, hdx, hdy = self._hot_spot
if not self.is_in( event, hx, hy, hdx, hdy ):
return
if self.style == 'horizontal':
if event.GetX() < (hx + (hdx / 2)):
self.collapse(True)
else:
self.collapse(False)
else:
if event.GetY() < (hy + (hdy / 2)):
self.collapse(True)
else:
self.collapse(False)
else:
self._last_bounds, self._first_bounds = self._first_bounds, None
if not self._live_drag:
self._draw_bounds( event )
self.parent.update_splitter( self, event.GetEventObject() )
#---------------------------------------------------------------------------
# Handles the mouse moving while the left mouse button is pressed:
#---------------------------------------------------------------------------
def mouse_move ( self, event ):
""" Handles the mouse moving while the left mouse button is pressed.
"""
if not self._click_pending:
x, y, dx, dy = self._first_bounds
mx, my, mdx, mdy = self._max_bounds
if self.style == 'horizontal':
y = y + event.GetY() - self._xy[1]
y = min( max( y, my ), my + mdy - dy )
else:
x = x + event.GetX() - self._xy[0]
x = min( max( x, mx ), mx + mdx - dx )
bounds = ( x, y, dx, dy )
if bounds != self.bounds:
self.bounds = bounds
if self._live_drag:
self.parent.update_splitter( self, event.GetEventObject() )
else:
self._draw_bounds( event, bounds )
#---------------------------------------------------------------------------
# Collapse/expands a splitter
#---------------------------------------------------------------------------
def collapse ( self, forward ):
""" Move the splitter has far as possible in one direction. 'forward'
is a boolean: True=right/down, False=left/up.
If the splitter is already collapsed, restores it to its previous
position.
"""
is_horizontal = (self.style == 'horizontal')
x, y, dx, dy = self.bounds
if self._last_bounds is not None:
if is_horizontal:
y = self._last_bounds[1]
else:
x = self._last_bounds[0]
state = self.state
contents = self.parent.visible_contents
ix1, iy1, idx1, idy1 = contents[ self.index ].bounds
ix2, iy2, idx2, idy2 = contents[ self.index + 1 ].bounds
if is_horizontal:
if state != SPLIT_HMIDDLE:
if ((y == self.bounds[1]) or
(y < iy1) or
((y + dy) > (iy2 + idy2))):
y = (iy1 + iy2 + idy2 - dy) / 2
else:
self._last_bounds = self.bounds
if forward:
y = iy1
else:
y = iy2 + idy2 - dy
elif state != SPLIT_VMIDDLE:
if ((x == self.bounds[0]) or
(x < ix1) or
((x + dx) > (ix2 + idx2))):
x = (ix1 + ix2 + idx2 - dx) / 2
else:
self._last_bounds = self.bounds
if forward:
x = ix2 + idx2 - dx
else:
x = ix1
self.bounds = ( x, y, dx, dy )
#---------------------------------------------------------------------------
# Handles the mouse hovering over the item:
#---------------------------------------------------------------------------
def hover_enter ( self, event ):
""" Handles the mouse hovering over the item.
"""
pass
#---------------------------------------------------------------------------
# Handles the mouse exiting from hovering over the item:
#---------------------------------------------------------------------------
def hover_exit ( self, event ):
""" Handles the mouse exiting from hovering over the item.
"""
pass
#---------------------------------------------------------------------------
# Draws the splitter bar in a new position while it is being dragged:
#---------------------------------------------------------------------------
def _draw_bounds ( self, event, bounds = None ):
""" Draws the splitter bar in a new position while it is being dragged.
"""
# Set up the drawing environment:
window = event.GetEventObject()
dc, x0, y0 = get_dc( window )
dc.SetLogicalFunction( wx.XOR )
dc.SetPen( wx.TRANSPARENT_PEN )
dc.SetBrush( wx.Brush( wx.Colour( *DragColor ), wx.SOLID ) )
is_horizontal = (self.style == 'horizontal')
nx = ox = None
# Draw the new bounds (if any):
if bounds is not None:
ax = ay = adx = ady = 0
nx, ny, ndx, ndy = bounds
if is_horizontal:
ady = (ndy - 6)
ay = ady / 2
else:
adx = (ndx - 6)
ax = adx / 2
nx += ax
ny += ay
ndx -= adx
ndy -= ady
if self._bounds is not None:
ax = ay = adx = ady = 0
ox, oy, odx, ody = self._bounds
if is_horizontal:
ady = (ody - 6)
ay = ady / 2
else:
adx = (odx - 6)
ax = adx / 2
ox += ax
oy += ay
odx -= adx
ody -= ady
if nx is not None:
tx, ty, tdx, tdy = nx, ny, ndx, ndy
if ox is not None:
if is_horizontal:
yoy = oy - ty
if 0 <= yoy < tdy:
tdy = yoy
elif -ody < yoy <= 0:
ty = oy + ody
tdy = tdy - ody - yoy
else:
xox = ox - tx
if 0 <= xox < tdx:
tdx = xox
elif -odx < xox <= 0:
tx = ox + odx
tdx = tdx - odx - xox
dc.DrawRectangle( tx + x0, ty + y0, tdx, tdy )
# Erase the old bounds (if any):
if ox is not None:
if nx is not None:
if is_horizontal:
yoy = ny - oy
if 0 <= yoy < ody:
ody = yoy
elif -ndy < yoy <= 0:
oy = ny + ndy
ody = ody - ndy - yoy
else:
xox = nx - ox
if 0 <= xox < odx:
odx = xox
elif -ndx < xox <= 0:
ox = nx + ndx
odx = odx - ndx - xox
dc.DrawRectangle( ox + x0, oy + y0, odx, ody )
if is_mac:
window.Refresh(rect=wx.Rect(ox + x0, oy + y0, odx, ody))
# Save the new bounds for the next call:
self._bounds = bounds
#---------------------------------------------------------------------------
# Implementation of the 'state' property:
#---------------------------------------------------------------------------
def _get_state ( self ):
contents = self.parent.contents
x, y, dx, dy = self.bounds
ix1, iy1, idx1, idy1 = contents[ self.index ].bounds
ix2, iy2, idx2, idy2 = contents[ self.index + 1 ].bounds
if self.style == 'horizontal':
if y == iy1:
return SPLIT_HTOP
if (y + dy) == (iy2 + idy2):
return SPLIT_HBOTTOM
return SPLIT_HMIDDLE
else:
if x == ix1:
return SPLIT_VLEFT
if (x + dx) == (ix2 + idx2):
return SPLIT_VRIGHT
return SPLIT_VMIDDLE
#-------------------------------------------------------------------------------
# 'DockControl' class:
#-------------------------------------------------------------------------------
class DockControl ( DockItem ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The control this object describes:
control = Instance( wx.Window, allow_none = True )
# The number of global DockWindowFeature's that were available the last
# the time the feature set was checked:
num_features = Int
# A feature associated with the DockControl has been changed:
feature_changed = Event
# The image to display for this control:
image = Instance( ImageResource, allow_none = True )
# The UI name of this control:
name = Str
# Has the user set the name of the control?
user_name = Bool( False )
# The object (if any) associated with this control:
object = Property
# The id of this control:
id = Str
# Style of drag bar/tab:
style = DockStyle
# Has the user set the style for this control:
user_style = Bool( False )
# Category of control when it is dragged out of the DockWindow:
export = Str
# Is the control visible?
visible = Bool( True )
# Is the control's drag bar locked?
locked = Bool( False )
# Can the control be resized?
resizable = Bool( True )
# Can the control be closed?
closeable = Bool( False )
# Function to call when a DockControl is requesting to be closed:
on_close = Callable
# (Optional) object that allows the control to be docked with a different
# DockWindow:
dockable = Instance( IDockable, allow_none = True )
# List of all other DockControl's in the same DockWindow:
dock_controls = Property
# Event fired when the control's notebook tab is activated by the user:
activated = Event
#---------------------------------------------------------------------------
# Calculates the minimum size of the control:
#---------------------------------------------------------------------------
def calc_min ( self, use_size = False ):
""" Calculates the minimum size of the control.
"""
self.check_features()
dx, dy = self.width, self.height
if self.control is not None:
if wx_26:
size = self.control.GetBestFittingSize()
else:
size = self.control.GetEffectiveMinSize()
dx = size.GetWidth()
dy = size.GetHeight()
if self.width < 0:
self.width, self.height = dx, dy
if use_size and (self.width >= 0):
return ( self.width, self.height )
return ( dx, dy )
#---------------------------------------------------------------------------
# Layout the contents of the control based on the specified bounds:
#---------------------------------------------------------------------------
def recalc_sizes ( self, x, y, dx, dy ):
""" Layout the contents of the region based on the specified bounds.
"""
self.width = dx = max( 0, dx )
self.height = dy = max( 0, dy )
self.bounds = ( x, y, dx, dy )
# Note: All we really want to do is the 'SetDimensions' call, but the
# other code is needed for Linux/GTK which will not correctly process
# the SetDimensions call if the min size is larger than the specified
# size. So we temporarily set its min size to (0,0), do the
# SetDimensions, then restore the original min size. The restore is
# necessary so that DockWindow itself will correctly draw the 'drag'
# box when performing a docking maneuver...
control = self.control
min_size = control.GetMinSize()
control.SetMinSize( wx.Size( 0, 0 ) )
control.SetDimensions( x, y, dx, dy )
control.SetMinSize( min_size )
#---------------------------------------------------------------------------
# Checks to make sure that all applicable DockWindowFeatures have been
# applied:
#---------------------------------------------------------------------------
def check_features ( self ):
""" Checks to make sure that all applicable DockWindowFeatures have been
applied.
"""
global features
mode = self.feature_mode
n = len( features )
if ((self.num_features < n) and
(self.control is not None) and
isinstance( self.control.GetParent().GetSizer(), DockSizer )):
for i in range( self.num_features, n ):
feature_class = features[i]
feature = feature_class.new_feature_for( self )
if feature is not None:
if not isinstance( feature, SequenceType ):
feature = [ feature ]
self.features.extend( list( feature ) )
if mode == FEATURE_NONE:
self.feature_mode = FEATURE_PRE_NORMAL
if feature_class.state != 1:
for item in feature:
item.disable()
else:
self._tab_width = None
if mode in NORMAL_FEATURES:
self.set_feature_mode()
self.num_features = n
#---------------------------------------------------------------------------
# Sets the visibility of the control:
#---------------------------------------------------------------------------
def set_visibility ( self, visible ):
""" Sets the visibility of the control.
"""
if self.control is not None:
self.control.Show( visible )
#---------------------------------------------------------------------------
# Returns all DockControl objects contained in the control:
#---------------------------------------------------------------------------
def get_controls ( self, visible_only = True ):
""" Returns all DockControl objects contained in the control.
"""
if visible_only and (not self.visible):
return []
return [ self ]
#---------------------------------------------------------------------------
# Gets the image (if any) associated with the control:
#---------------------------------------------------------------------------
def get_image ( self ):
""" Gets the image (if any) associated with the control.
"""
if self._image is None:
if self.image is not None:
self._image = self.image.create_image().ConvertToBitmap()
return self._image
#---------------------------------------------------------------------------
# Hides or shows the control:
#---------------------------------------------------------------------------
def show ( self, visible = True, layout = True ):
""" Hides or shows the control.
"""
if visible != self.visible:
self.visible = visible
self._layout( layout )
#---------------------------------------------------------------------------
# Activates a control (i.e. makes it the active page within its containing
# notebook):
#---------------------------------------------------------------------------
def activate ( self, layout = True ):
""" Activates a control (i.e. makes it the active page within its
containing notebook).
"""
if self.parent is not None:
self.parent.activate( self, layout )
#---------------------------------------------------------------------------
# Closes the control:
#---------------------------------------------------------------------------
def close ( self, layout = True, force = False ):
""" Closes the control.
"""
control = self.control
if control is not None:
window = control.GetParent()
if self.on_close is not None:
# Ask the handler if it is OK to close the control:
if self.on_close( self, force ) is False:
# If not OK to close it, we're done:
return
elif self.dockable is not None:
# Ask the IDockable handler if it is OK to close the control:
if self.dockable.dockable_close( self, force ) is False:
# If not OK to close it, we're done:
return
else:
# No close handler, just destroy the widget ourselves:
control.Destroy()
# Reset all features:
self.reset_features()
# Remove the DockControl from the sizer:
self.parent.remove( self )
# Mark the DockControl as closed (i.e. has no associated widget or
# parent):
self.control = self.parent = None
# If a screen update is requested, lay everything out again now:
if layout:
window.Layout()
window.Refresh()
#---------------------------------------------------------------------------
# Returns the object at a specified window position:
#---------------------------------------------------------------------------
def object_at ( self, x, y ):
""" Returns the object at a specified window position.
"""
return None
#---------------------------------------------------------------------------
# Returns a copy of the control 'structure', minus the actual content:
#---------------------------------------------------------------------------
def get_structure ( self ):
""" Returns a copy of the control 'structure', minus the actual content.
"""
return self.clone_traits( [
'id', 'name', 'user_name', 'style', 'user_style', 'visible',
'locked', 'closeable', 'resizable', 'width', 'height'
] )
#---------------------------------------------------------------------------
# Toggles the 'lock' status of the control:
#---------------------------------------------------------------------------
def toggle_lock ( self ):
""" Toggles the 'lock' status of the control.
"""
self.locked = not self.locked
#---------------------------------------------------------------------------
# Prints the contents of the control:
#---------------------------------------------------------------------------
def dump ( self, indent ):
""" Prints the contents of the control.
"""
print ('%sControl( %08X, name = %s, id = %s,\n%s'
'style = %s, locked = %s,\n%s'
'closeable = %s, resizable = %s, visible = %s\n%s'
'width = %d, height = %d )' % (
' ' * indent, id( self ), self.name, self.id,
' ' * (indent + 9), self.style, self.locked,
' ' * (indent + 9), self.closeable, self.resizable, self.visible,
' ' * (indent + 9), self.width, self.height ))
#---------------------------------------------------------------------------
# Draws the contents of the control:
#---------------------------------------------------------------------------
def draw ( self, dc ):
""" Draws the contents of the control.
"""
pass
#---------------------------------------------------------------------------
# Sets a new name for the control:
#---------------------------------------------------------------------------
def set_name ( self, name, layout = True ):
""" Sets a new name for the control.
"""
if name != self.name:
self.name = name
self._layout( layout )
#---------------------------------------------------------------------------
# Resets the state of the tab:
#---------------------------------------------------------------------------
def reset_tab ( self ):
""" Resets the state of the tab.
"""
self.reset_features()
self._layout()
#---------------------------------------------------------------------------
# Resets all currently defined features:
#---------------------------------------------------------------------------
def reset_features ( self ):
""" Resets all currently defined features.
"""
for feature in self.features:
feature.dispose()
self.features = []
self.num_features = 0
#---------------------------------------------------------------------------
# Forces the containing DockWindow to be laid out:
#---------------------------------------------------------------------------
def _layout ( self, layout = True ):
""" Forces the containing DockWindow to be laid out.
"""
if layout and (self.control is not None):
do_later( self.control.GetParent().owner.update_layout )
#---------------------------------------------------------------------------
# Handles the 'activated' event being fired:
#---------------------------------------------------------------------------
def _activated_fired(self):
""" Notifies the active dockable that the control's tab is being
activated.
"""
if self.dockable is not None:
self.dockable.dockable_tab_activated(self, True)
#---------------------------------------------------------------------------
# Handles the 'feature_changed' trait being changed:
#---------------------------------------------------------------------------
def _feature_changed ( self ):
""" Handles the 'feature_changed' trait being changed
"""
self.set_feature_mode()
#---------------------------------------------------------------------------
# Handles the 'control' trait being changed:
#---------------------------------------------------------------------------
def _control_changed ( self, old, new ):
""" Handles the 'control' trait being changed.
"""
self._tab_width = None
if old is not None:
old._dock_control = None
if new is not None:
new._dock_control = self
self.reset_tab()
#---------------------------------------------------------------------------
# Handles the 'name' trait being changed:
#---------------------------------------------------------------------------
def _name_changed ( self ):
""" Handles the 'name' trait being changed.
"""
self._tab_width = self._tab_name = None
#---------------------------------------------------------------------------
# Handles the 'style' trait being changed:
#---------------------------------------------------------------------------
def _style_changed ( self ):
""" Handles the 'style' trait being changed.
"""
if self.parent is not None:
self.parent._is_notebook = None
#---------------------------------------------------------------------------
# Handles the 'image' trait being changed:
#---------------------------------------------------------------------------
def _image_changed ( self ):
""" Handles the 'image' trait being changed.
"""
self._image = None
#---------------------------------------------------------------------------
# Handles the 'visible' trait being changed:
#---------------------------------------------------------------------------
def _visible_changed ( self ):
""" Handles the 'visible' trait being changed.
"""
if self.parent is not None:
self.parent.show_hide( self )
#---------------------------------------------------------------------------
# Handles the 'dockable' trait being changed:
#---------------------------------------------------------------------------
def _dockable_changed ( self, dockable ):
""" Handles the 'dockable' trait being changed.
"""
if dockable is not None:
dockable.dockable_bind( self )
#---------------------------------------------------------------------------
# Implementation of the 'object' property:
#---------------------------------------------------------------------------
def _get_object ( self ):
return getattr( self.control, '_object', None )
#---------------------------------------------------------------------------
# Implementation of the DockControl's property:
#---------------------------------------------------------------------------
def _get_dock_controls ( self ):
# Get all of the DockControls in the parent DockSizer:
controls = self.control.GetParent().GetSizer().GetContents(
).get_controls( False )
# Remove ourself from the list:
try:
controls.remove( self )
except:
pass
return controls
#-------------------------------------------------------------------------------
# 'DockGroup' class:
#-------------------------------------------------------------------------------
class DockGroup ( DockItem ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# The contents of the group:
contents = List
# The UI name of this group:
name = Property
# Style of drag bar/tab:
style = Property
# Are the contents of the group resizable?
resizable = Property
# Category of control when it is dragged out of the DockWindow:
export = Constant( '' )
# Is the group visible?
visible = Property
# Content items which are visible:
visible_contents = Property
# Can the control be closed?
closeable = Property
# The control associated with this group:
control = Property
# Is the group locked?
locked = Property
# Has the initial layout been performed?
initialized = Bool( False )
#---------------------------------------------------------------------------
# Implementation of the 'name' property:
#---------------------------------------------------------------------------
def _get_name ( self ):
controls = self.get_controls()
n = len( controls )
if n == 0:
return ''
if n == 1:
return controls[0].name
return '%s [%d]' % ( controls[0].name, n )
#---------------------------------------------------------------------------
# Implementation of the 'visible' property:
#---------------------------------------------------------------------------
def _get_visible ( self ):
for item in self.contents:
if item.visible:
return True
return False
#---------------------------------------------------------------------------
# Implementation of the 'visible_contents' property:
#---------------------------------------------------------------------------
def _get_visible_contents ( self ):
return [ item for item in self.contents if item.visible ]
#---------------------------------------------------------------------------
# Implementation of the 'closeable' property:
#---------------------------------------------------------------------------
def _get_closeable ( self ):
for item in self.contents:
if not item.closeable:
return False
return True
#---------------------------------------------------------------------------
# Implementation of the 'style' property:
#---------------------------------------------------------------------------
def _get_style ( self ):
# Make sure there is at least one item in the group:
if len( self.contents ) > 0:
# Return the first item's style:
return self.contents[0].style
# Otherwise, return a default style for an empty group:
return 'horizontal'
#---------------------------------------------------------------------------
# Implementation of the 'resizable' property:
#---------------------------------------------------------------------------
def _get_resizable ( self ):
if self._resizable is None:
self._resizable = False
for control in self.get_controls():
if control.resizable:
self._resizable = True
break
return self._resizable
#---------------------------------------------------------------------------
# Implementation of the 'control' property:
#---------------------------------------------------------------------------
def _get_control ( self ):
if len( self.contents ) == 0:
return None
return self.contents[0].control
#---------------------------------------------------------------------------
# Implementation of the 'locked' property:
#---------------------------------------------------------------------------
def _get_locked ( self ):
return self.contents[0].locked
#---------------------------------------------------------------------------
# Handles 'initialized' being changed:
#---------------------------------------------------------------------------
def _initialized_changed( self ):
""" Handles 'initialized' being changed.
"""
for item in self.contents:
if isinstance( item, DockGroup ):
item.initialized = self.initialized
#---------------------------------------------------------------------------
# Hides or shows the contents of the group:
#---------------------------------------------------------------------------
def show ( self, visible = True, layout = True ):
""" Hides or shows the contents of the group.
"""
for item in self.contents:
item.show( visible, False )
if layout:
window = self.control.GetParent()
window.Layout()
window.Refresh()
#---------------------------------------------------------------------------
# Replaces a specified DockControl by another:
#---------------------------------------------------------------------------
def replace_control ( self, old, new ):
""" Replaces a specified DockControl by another.
"""
for i, item in enumerate( self.contents ):
if isinstance( item, DockControl ):
if item is old:
self.contents[i] = new
new.parent = self
return True
elif item.replace_control( old, new ):
return True
return False
#---------------------------------------------------------------------------
# Returns all DockControl objects contained in the group:
#---------------------------------------------------------------------------
def get_controls ( self, visible_only = True ):
""" Returns all DockControl objects contained in the group.
"""
if visible_only:
contents = self.visible_contents
else:
contents = self.contents
result = []
for item in contents:
result.extend( item.get_controls( visible_only ) )
return result
#---------------------------------------------------------------------------
# Gets the image (if any) associated with the group:
#---------------------------------------------------------------------------
def get_image ( self ):
""" Gets the image (if any) associated with the group.
"""
if len( self.contents ) == 0:
return None
return self.contents[0].get_image()
#---------------------------------------------------------------------------
# Gets the cursor to use when the mouse is over the item:
#---------------------------------------------------------------------------
def get_cursor ( self, event ):
""" Gets the cursor to use when the mouse is over the item.
"""
return wx.CURSOR_ARROW
#---------------------------------------------------------------------------
# Toggles the 'lock' status of every control in the group:
#---------------------------------------------------------------------------
def toggle_lock ( self ):
""" Toggles the 'lock' status of every control in the group.
"""
for item in self.contents:
item.toggle_lock()
#---------------------------------------------------------------------------
# Closes the group:
#---------------------------------------------------------------------------
def close ( self, layout = True, force = False ):
""" Closes the control.
"""
window = self.control.control.GetParent()
for item in self.contents[:]:
item.close( False, force = force )
if layout:
window.Layout()
window.Refresh()
#-------------------------------------------------------------------------------
# 'DockRegion' class:
#-------------------------------------------------------------------------------
class DockRegion ( DockGroup ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# Index of the currently active 'contents' DockControl:
active = Int
# Is the region drawn as a notebook or not:
is_notebook = Property
# Index of the tab scroll image to use (-1 = No tab scroll):
tab_scroll_index = Int( -1 )
# The index of the current leftmost visible tab:
left_tab = Int
# The current maximum value for 'left_tab':
max_tab = Int
# Contents have been modified property:
modified = Property
#---------------------------------------------------------------------------
# Calculates the minimum size of the region:
#---------------------------------------------------------------------------
def calc_min ( self, use_size = False ):
""" Calculates the minimum size of the region.
"""
tab_dx = tdx = tdy = 0
contents = self.visible_contents
theme = self.theme
if self.is_notebook:
for item in contents:
dx, dy = item.calc_min( use_size )
tdx = max( tdx, dx )
tdy = max( tdy, dy )
tab_dx += item.tab_width
tis = theme.tab.image_slice
tc = theme.tab.content
tdx = max( tdx, tab_dx ) + (tis.xleft + tis.xright +
tc.left + tc.right)
tdy += (theme.tab_active.image_slice.dy +
tis.xtop + tis.xbottom + tc.top + tc.bottom)
elif len( contents ) > 0:
item = contents[0]
tdx, tdy = item.calc_min( use_size )
if not item.locked:
if item.style == 'horizontal':
tdy += theme.horizontal_drag.image_slice.dy
elif item.style == 'vertical':
tdx += theme.vertical_drag.image_slice.dx
if self.width < 0:
self.width = tdx
self.height = tdy
return ( tdx, tdy )
#---------------------------------------------------------------------------
# Layout the contents of the region based on the specified bounds:
#---------------------------------------------------------------------------
def recalc_sizes ( self, x, y, dx, dy ):
""" Layout the contents of the region based on the specified bounds.
"""
self.width = dx = max( 0, dx )
self.height = dy = max( 0, dy )
self.bounds = ( x, y, dx, dy )
theme = self.theme
contents = self.visible_contents
if self.is_notebook:
tis = theme.tab.image_slice
tc = theme.tab.content
th = theme.tab_active.image_slice.dy
# Layout the region out as a notebook:
x += tis.xleft + tc.left
tx0 = tx = x + theme.tab.label.left
dx -= (tis.xleft + tis.xright + tc.left + tc.right)
ady = dy - th
dy = ady - tis.xtop - tis.xbottom - tc.top - tc.bottom
iy = y + tis.xtop + tc.top
if theme.tabs_at_top:
iy += th
else:
y += ady
for item in contents:
item.recalc_sizes( x, iy, dx, dy )
tdx = item.tab_width
item.set_drag_bounds( tx, y, tdx, th )
tx += tdx
# Calculate the default tab clipping bounds:
cdx = dx + tc.left + tc.right
self._tab_clip_bounds = ( tx0, y, cdx, th )
# Do we need to enable tab scrolling?
xr = tx0 + cdx
if tx > xr:
# Scrolling needed, calculate maximum tab index for scrolling:
self.max_tab = 1
n = len( contents ) - 1
xr -= DockImages._tab_scroller_dx
for i in range( n, -1, -1 ):
xr -= contents[i].tab_width
if xr < tx0:
self.max_tab = min( i + 1, n )
break
# Set the new leftmost tab index:
self.left_tab = min( self.left_tab, self.max_tab )
# Determine which tab scroll image to use:
self.tab_scroll_index = ((self.left_tab < self.max_tab) +
(2 * (self.left_tab > 0))) - 1
# Now adjust each tab's bounds accordingly:
if self.left_tab > 0:
adx = contents[ self.left_tab ].drag_bounds[0] - tx0
for item in contents:
dbx, dby, dbdx, dbdy = item.drag_bounds
item.set_drag_bounds( dbx - adx, dby, item.tab_width,
dbdy )
# Exclude the scroll buttons from the tab clipping region:
self._tab_clip_bounds = ( tx0, y, cdx -
DockImages._tab_scroller_dx, th )
else:
self.tab_scroll_index = -1
self.left_tab = 0
else:
# Lay the region out as a drag bar:
item = contents[0]
drag_bounds = ( 0, 0, 0, 0 )
if not item.locked:
if item.style == 'horizontal':
db_dy = theme.horizontal_drag.image_slice.dy
drag_bounds = ( x, y, dx, db_dy )
y += db_dy
dy -= db_dy
elif item.style == 'vertical':
db_dx = theme.vertical_drag.image_slice.dx
drag_bounds = ( x, y, db_dx, dy )
x += db_dx
dx -= db_dx
item.recalc_sizes( x, y, dx, dy )
item.set_drag_bounds( *drag_bounds )
# Make sure all of the contained controls have the right visiblity:
self._set_visibility()
#---------------------------------------------------------------------------
# Adds a new control before or after a specified control:
#---------------------------------------------------------------------------
def add ( self, control, before = None, after = None, activate = True ):
""" Adds a new control before a specified control.
"""
contents = self.contents
if control.parent is self:
contents.remove( control )
if before is None:
if after is None:
i = len( contents )
else:
i = contents.index( after ) + 1
else:
i = contents.index( before )
contents.insert( i, control )
if activate:
self.active = i
#---------------------------------------------------------------------------
# Removes a specified item:
#---------------------------------------------------------------------------
def remove ( self, item ):
""" Removes a specified item.
"""
contents = self.contents
i = contents.index( item )
if isinstance( item, DockGroup ) and (len( item.contents ) == 1):
item = item.contents[0]
if isinstance( item, DockRegion ):
contents[ i: i + 1 ] = item.contents[:]
else:
contents[ i ] = item
else:
del contents[ i ]
# Change the active selection only if 'item' is in closing mode,
# or was dragged to a new location.
# If this entire dock region is being closed, then all contained
# dock items will be removed and we do not want to change 'active'
# selection.
if item._closing or item._dragging:
if (self.active > i) or (self.active >= len( contents )):
self.active -= 1
# If the active item was removed, then 'active' stays
# unchanged, but it reflects the index of the next page in
# the dock region. Since _active_changed won't be fired now,
# we fire the 'activated' event on the next page.
elif (i == self.active):
control = self.contents[ i ]
if isinstance( control, DockControl ):
control.activated = True
if self.parent is not None:
if len( contents ) == 0:
self.parent.remove( self )
elif ((len( contents ) == 1) and
isinstance( self.parent, DockRegion )):
self.parent.remove( self )
#---------------------------------------------------------------------------
# Returns a copy of the region 'structure', minus the actual content:
#---------------------------------------------------------------------------
def get_structure ( self ):
""" Returns a copy of the region 'structure', minus the actual content.
"""
return self.clone_traits( [ 'active', 'width', 'height' ] ).set(
contents = [ item.get_structure() for item in self.contents ] )
#---------------------------------------------------------------------------
# Toggles the 'lock' status of every control in the group:
#---------------------------------------------------------------------------
def toggle_lock ( self ):
""" Toggles the 'lock' status of every control in the group.
"""
super( DockRegion, self ).toggle_lock()
self._is_notebook = None
#---------------------------------------------------------------------------
# Draws the contents of the region:
#---------------------------------------------------------------------------
def draw ( self, dc ):
""" Draws the contents of the region.
"""
if self._visible is not False:
self.begin_draw( dc )
if self.is_notebook:
# fixme: There seems to be a case where 'draw' is called before
# 'recalc_sizes' (which defines '_tab_clip_bounds'), so we need
# to check to make sure it is defined. If not, it seems safe to
# exit immediately, since in all known cases, the bounds are
# ( 0, 0, 0, 0 ), so there is nothing to draw anyways. The
# question is why 'recalc_sizes' is not being called first.
if self._tab_clip_bounds is None:
self.end_draw( dc )
return
self.fill_bg_color( dc, *self.bounds )
if self.active >= len(self.contents):
# on some platforms, if the active tab was destroyed
# the new active tab may not have been set yet
self.active = len(self.contents) - 1
self._draw_notebook( dc )
active = self.active
# Draw the scroll buttons (if necessary):
x, y, dx, dy = self._tab_clip_bounds
index = self.tab_scroll_index
if index >= 0:
dc.DrawBitmap( DockImages._tab_scroller_images[ index ],
x + dx, y + 2, True )
# Draw all the inactive tabs first:
dc.SetClippingRegion( x, y, dx, dy )
last_inactive = -1
for i, item in enumerate( self.contents ):
if (i != active) and item.visible:
last_inactive = i
state = item.tab_state
if state not in NotActiveStates:
state = TabInactive
item.draw_tab( dc, state )
# Draw the active tab last:
self.contents[ active ].draw_tab( dc, TabActive )
# If the last inactive tab drawn is also the rightmost tab and
# the theme has a 'tab right edge' image, draw the image just
# to the right of the last tab:
if last_inactive > active:
if item.tab_state == TabInactive:
bitmap = self.theme.tab_inactive_edge_bitmap
else:
bitmap = self.theme.tab_hover_edge_bitmap
if bitmap is not None:
x, y, dx, dy = item.drag_bounds
dc.DrawBitmap( bitmap, x + dx, y, True )
else:
item = self.visible_contents[0]
if not item.locked:
getattr( item, 'draw_' + item.style )( dc )
self.end_draw( dc )
# Draw each of the items contained in the region:
for item in self.contents:
if item.visible:
item.draw( dc )
#---------------------------------------------------------------------------
# Returns the object at a specified window position:
#---------------------------------------------------------------------------
def object_at ( self, x, y ):
""" Returns the object at a specified window position.
"""
if (self._visible is not False) and self.is_at( x, y ):
if self.is_notebook and (self.tab_scroll_index >= 0):
cx, cy, cdx, cdy = self._tab_clip_bounds
if self.is_at( x, y, ( cx + cdx, cy + 2,
DockImages._tab_scroller_dx,
DockImages._tab_scroller_dy ) ):
return self
for item in self.visible_contents:
if item.is_at( x, y, item.drag_bounds ):
return item
object = item.object_at( x, y )
if object is not None:
return object
return None
#---------------------------------------------------------------------------
# Gets the DockInfo object for a specified window position:
#---------------------------------------------------------------------------
def dock_info_at ( self, x, y, tdx, is_control ):
""" Gets the DockInfo object for a specified window position.
"""
# Check to see if the point is in our drag bar:
info = super( DockRegion, self ).dock_info_at( x, y, tdx, is_control )
if info is not None:
return info
# If we are not visible, or the point is not contained in us, give up:
if (self._visible is False) or (not self.is_at( x, y )):
return None
# Check to see if the point is in the drag bars of any controls:
contents = self.visible_contents
for item in contents:
object = item.dock_info_at( x, y, tdx, is_control )
if object is not None:
return object
# If we are in 'notebook mode' check to see if the point is in the
# empty region outside of any tabs:
lx, ty, dx, dy = self.bounds
if self.is_notebook:
item = contents[-1]
ix, iy, idx, idy = item.drag_bounds
if (x > (ix + idx)) and (iy <= y < (iy + idy)):
return DockInfo( kind = DOCK_TAB,
tab_bounds = ( ix + idx, iy, tdx, idy ),
region = self )
# Otherwise, figure out which edge the point is closest to, and
# return a DockInfo object describing that edge:
left = x - lx
right = lx + dx - 1 - x
top = y - ty
bottom = ty + dy - 1 - y
choice = min( left, right, top, bottom )
mdx = dx / 3
mdy = dy / 3
if choice == left:
return DockInfo( kind = DOCK_LEFT,
bounds = ( lx, ty, mdx, dy ),
region = self )
if choice == right:
return DockInfo( kind = DOCK_RIGHT,
bounds = ( lx + dx - mdx, ty, mdx, dy ),
region = self )
if choice == top:
return DockInfo( kind = DOCK_TOP,
bounds = ( lx, ty, dx, mdy ),
region = self )
return DockInfo( kind = DOCK_BOTTOM,
bounds = ( lx, ty + dy - mdy, dx, mdy ),
region = self )
#---------------------------------------------------------------------------
# Handles a contained notebook tab being clicked:
#---------------------------------------------------------------------------
def tab_clicked ( self, control ):
""" Handles a contained notebook tab being clicked.
"""
# Find the page that was clicked and mark it as active:
i = self.contents.index( control )
if i != self.active:
self.active = i
# Recalculate the tab layout:
self.recalc_sizes( *self.bounds )
# Force the notebook to be redrawn:
control.control.GetParent().RefreshRect( wx.Rect( *self.bounds ) )
# Fire the 'activated' event on the control:
if isinstance( control, DockControl ):
control.activated = True
#---------------------------------------------------------------------------
# Handles the user clicking an active scroll button:
#---------------------------------------------------------------------------
def scroll ( self, type, left_tab = 0 ):
""" Handles the user clicking an active scroll button.
"""
if type == SCROLL_LEFT:
left_tab = min( self.left_tab + 1, self.max_tab )
elif type == SCROLL_RIGHT:
left_tab = max( self.left_tab - 1, 0 )
if left_tab != self.left_tab:
# Calculate the amount we need to adjust each tab by:
contents = self.visible_contents
adx = (contents[ left_tab ].drag_bounds[0] -
contents[ self.left_tab ].drag_bounds[0])
# Set the new leftmost tab index:
self.left_tab = left_tab
# Determine which tab scroll image to use:
self.tab_scroll_index = ((left_tab < self.max_tab) +
(2 * (left_tab > 0))) - 1
# Now adjust each tab's bounds accordingly:
for item in contents:
dbx, dby, dbdx, dbdy = item.drag_bounds
item.set_drag_bounds( dbx - adx, dby, item.tab_width, dbdy )
# Finally, force a redraw of the affected part of the window:
x, y, dx, dy = self._tab_clip_bounds
item.control.GetParent().RefreshRect(
wx.Rect( x, y, dx + DockImages._tab_scroller_dx, dy ) )
#---------------------------------------------------------------------------
# Handles the left mouse button being pressed:
#---------------------------------------------------------------------------
def mouse_down ( self, event ):
""" Handles the left mouse button being pressed.
"""
self._scroll = self._get_scroll_button( event )
#---------------------------------------------------------------------------
# Handles the left mouse button being released:
#---------------------------------------------------------------------------
def mouse_up ( self, event ):
""" Handles the left mouse button being released.
"""
if ((self._scroll is not None) and
(self._scroll == self._get_scroll_button( event ))):
self.scroll( self._scroll )
else:
super( DockRegion, self ).mouse_up( event )
#---------------------------------------------------------------------------
# Handles the mouse moving while the left mouse button is pressed:
#---------------------------------------------------------------------------
def mouse_move ( self, event ):
""" Handles the mouse moving while the left mouse button is pressed.
"""
pass
#---------------------------------------------------------------------------
# Sets the visibility of the region:
#---------------------------------------------------------------------------
def set_visibility ( self, visible ):
""" Sets the visibility of the region.
"""
self._visible = visible
active = self.active
for i, item in enumerate( self.contents ):
item.set_visibility( visible and (i == active) )
#---------------------------------------------------------------------------
# Activates a specified control (i.e. makes it the current notebook tab):
#---------------------------------------------------------------------------
def activate ( self, control, layout = True ):
""" Activates a specified control (i.e. makes it the current notebook
tab).
"""
if control.visible and self.is_notebook:
active = self.contents.index( control )
if active != self.active:
self.active = active
self.make_active_tab_visible()
window = control.control.GetParent()
if layout:
do_later( window.owner.update_layout )
else:
window.RefreshRect( wx.Rect( *self.bounds ) )
else:
# Fire the activated event for the control.
if isinstance( control, DockControl ):
control.activated = True
#---------------------------------------------------------------------------
# Makes sure the active control's tab is completely visible (if possible):
#---------------------------------------------------------------------------
def make_active_tab_visible ( self ):
""" Makes sure the active control's tab is completely visible (if
possible).
"""
active = self.active
if active < self.left_tab:
self.scroll( SCROLL_TO, active )
else:
x, y, dx, dy = self.contents[ active ].drag_bounds
if not self.is_at( x + dx - 1, y + dy - 1, self._tab_clip_bounds ):
self.scroll( SCROLL_TO, min( active, self.max_tab ) )
#---------------------------------------------------------------------------
# Handles a contained DockControl item being hidden or shown:
#---------------------------------------------------------------------------
def show_hide ( self, control ):
""" Handles a contained DockControl item being hidden or shown.
"""
i = self.contents.index( control )
if i == self.active:
self._update_active()
elif (self.active < 0) and control.visible:
self.active = i
self._is_notebook = None
#---------------------------------------------------------------------------
# Prints the contents of the region:
#---------------------------------------------------------------------------
def dump ( self, indent ):
""" Prints the contents of the region.
"""
print '%sRegion( %08X, active = %s, width = %d, height = %d )' % (
' ' * indent, id( self ), self.active, self.width, self.height )
for item in self.contents:
item.dump( indent + 3 )
#---------------------------------------------------------------------------
# Returns which scroll button (if any) the pointer is currently over:
#---------------------------------------------------------------------------
def _get_scroll_button ( self, event ):
""" Returns which scroll button (if any) the pointer is currently over.
"""
x, y, dx, dy = self._tab_clip_bounds
if self.is_in( event, x + dx, y + 2, DockImages._tab_scroller_dx,
DockImages._tab_scroller_dy ):
if (event.GetX() - (x + dx)) < (DockImages._tab_scroller_dx / 2):
return SCROLL_LEFT
return SCROLL_RIGHT
return None
#---------------------------------------------------------------------------
# Updates the currently active page after a change:
#---------------------------------------------------------------------------
def _update_active ( self, active = None ):
""" Updates the currently active page after a change.
"""
if active is None:
active = self.active
contents = self.contents
for i in (range( active, len( contents ) ) +
range( active - 1, -1, -1 )):
if contents[ i ].visible:
self.active = i
return
self.active = -1
#---------------------------------------------------------------------------
# Handles the 'active' trait being changed:
#---------------------------------------------------------------------------
def _active_changed ( self, old, new ):
self._set_visibility()
# Set the correct tab state for each tab:
for i, item in enumerate( self.contents ):
item.tab_state = NormalStates[ i == new ]
n = len( self.contents )
if 0 <= old < n:
# Notify the previously active dockable that the control's tab is
# being deactivated:
control = self.contents[ old ]
if (isinstance( control, DockControl ) and
(control.dockable is not None)):
control.dockable.dockable_tab_activated( control, False )
if 0 <= new < n:
# Notify the new dockable that the control's tab is being
# activated:
control = self.contents[ new ]
if isinstance( control, DockControl ):
control.activated = True
#---------------------------------------------------------------------------
# Handles the 'contents' trait being changed:
#---------------------------------------------------------------------------
def _contents_changed ( self ):
""" Handles the 'contents' trait being changed.
"""
self._is_notebook = None
for item in self.contents:
item.parent = self
self.calc_min( True )
self.modified = True
def _contents_items_changed ( self, event ):
""" Handles the 'contents' trait being changed.
"""
self._is_notebook = None
for item in event.added:
item.parent = self
self.calc_min( True )
self.modified = True
#---------------------------------------------------------------------------
# Set the proper visiblity for all contained controls:
#---------------------------------------------------------------------------
def _set_visibility ( self ):
""" Set the proper visiblity for all contained controls.
"""
active = self.active
for i, item in enumerate( self.contents ):
item.set_visibility( i == active )
#---------------------------------------------------------------------------
# Implementation of the 'modified' property:
#---------------------------------------------------------------------------
def _set_modified ( self, value ):
if self.parent is not None:
self.parent.modified = True
#---------------------------------------------------------------------------
# Implementation of the 'is_notebook' property:
#---------------------------------------------------------------------------
def _get_is_notebook ( self ):
if self._is_notebook is None:
contents = self.visible_contents
n = len( contents )
self._is_notebook = (n > 1)
if n == 1:
self._is_notebook = (contents[0].style == 'tab')
return self._is_notebook
#---------------------------------------------------------------------------
# Draws the notebook body:
#---------------------------------------------------------------------------
def _draw_notebook ( self, dc ):
""" Draws the notebook body.
"""
theme = self.theme
tab_height = theme.tab_active.image_slice.dy
x, y, dx, dy = self.bounds
self.fill_bg_color( dc, x, y, dx, dy )
# Draws a box around the frame containing the tab contents, starting
# below the tab
pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNSHADOW))
dc.SetPen(pen)
dc.DrawRectangle(x, y+tab_height, dx, dy-tab_height)
# draw highlight
pen = wx.Pen(wx.SystemSettings_GetColour(wx.SYS_COLOUR_BTNHIGHLIGHT))
dc.SetPen(pen)
dc.DrawLine(x+1, y+tab_height+1, x+dx-1, y+tab_height+1)
# Erases the line under the active tab
x0 = x + self.tab_theme.label.left
x1 = x0
for i in range(self.active+1):
x0 = x1 + 1
x1 += self.contents[i].tab_width
dc.SetPen(wx.Pen(self.get_bg_color()))
dc.DrawLine(x0, y+tab_height, x1, y+tab_height)
dc.DrawLine(x0, y+tab_height+1, x1, y+tab_height+1)
#-------------------------------------------------------------------------------
# 'DockSection' class:
#-------------------------------------------------------------------------------
class DockSection ( DockGroup ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# Is this a row (or a column)?
is_row = Bool( True )
# Bounds of any splitter bars associated with the region:
splitters = List( DockSplitter )
# The DockWindow that owns this section (set on top level section only):
dock_window = Instance( 'enthought.pyface.dock.dock_window.DockWindow' )
# Contents of the section have been modified property:
modified = Property
#---------------------------------------------------------------------------
# Re-implementation of the 'owner' property:
#---------------------------------------------------------------------------
@cached_property
def _get_owner ( self ):
if self.dock_window is not None:
return self.dock_window
if self.parent is None:
return None
return self.parent.owner
#---------------------------------------------------------------------------
# Calculates the minimum size of the section:
#---------------------------------------------------------------------------
def calc_min ( self, use_size = False ):
""" Calculates the minimum size of the section.
"""
tdx = tdy = 0
contents = self.visible_contents
n = len( contents )
if self.is_row:
# allow 10 pixels for the splitter
sdx = 10
for item in contents:
dx, dy = item.calc_min( use_size )
tdx += dx
tdy = max( tdy, dy )
if self.resizable:
tdx += ((n - 1) * sdx)
else:
tdx += ((n + 1) * 3)
tdy += 6
else:
# allow 10 pixels for the splitter
sdy = 10
for item in contents:
dx, dy = item.calc_min( use_size )
tdx = max( tdx, dx )
tdy += dy
if self.resizable:
tdy += ((n - 1) * sdy)
else:
tdx += 6
tdy += ((n + 1) * 3)
if self.width < 0:
self.width = tdx
self.height = tdy
return ( tdx, tdy )
#---------------------------------------------------------------------------
# Perform initial layout of the section based on the specified bounds:
#---------------------------------------------------------------------------
def initial_recalc_sizes ( self, x, y, dx, dy ):
""" Layout the contents of the section based on the specified bounds.
"""
self.width = dx = max( 0, dx )
self.height = dy = max( 0, dy )
self.bounds = ( x, y, dx, dy )
# If none of the contents are resizable, use the fixed layout method
if not self.resizable:
self.recalc_sizes_fixed( x, y, dx, dy )
return
contents = self.visible_contents
n = len( contents ) - 1
splitters = []
# Find out how much space is available.
splitter_size = 10
sizes = []
if self.is_row:
total = dx - (n * splitter_size)
else:
total = dy - (n * splitter_size)
# Get requested sizes from the items.
for item in contents:
size = -1.0
for dock_control in item.get_controls():
dockable = dock_control.dockable
if dockable is not None and dockable.element is not None:
if self.is_row:
size = max( size, dockable.element.width )
else:
size = max( size, dockable.element.height )
sizes.append( size )
# Allocate requested space.
avail = total
remain = 0
for i, sz in enumerate( sizes ):
if avail <= 0:
break
if sz >= 0:
if sz >= 1:
sz = min( sz, avail )
else:
sz *= total
sz = int( sz )
sizes[i] = sz
avail -= sz
else:
remain += 1
# Allocate the remainder to those parts that didn't request a width.
if remain > 0:
remain = int( avail / remain )
for i, sz in enumerate( sizes ):
if sz < 0:
sizes[i] = remain
# If all requested a width, allocate the remainder to the last item.
else:
sizes[-1] += avail
# Resize contents and add splitters
if self.is_row:
for i, item in enumerate( contents ):
idx = int( sizes[i] )
item.recalc_sizes( x, y, idx, dy )
x += idx
if i < n:
splitters.append(
DockSplitter( bounds = ( x, y, splitter_size, dy ),
style = 'vertical',
parent = self,
index = i ) )
x += splitter_size
else:
for i, item in enumerate( contents ):
idy = int( sizes[i] )
item.recalc_sizes( x, y, dx, idy )
y += idy
if i < n:
splitters.append(
DockSplitter( bounds = ( x, y, dx, splitter_size ),
style = 'horizontal',
parent = self,
index = i ) )
y += splitter_size
# Preserve the current internal '_last_bounds' for all splitters if
# possible:
cur_splitters = self.splitters
for i in range( min( len( splitters ), len( cur_splitters ) ) ):
splitters[i]._last_bounds = cur_splitters[i]._last_bounds
# Save the new set of splitter bars:
self.splitters = splitters
# Set the visibility for all contained items:
self._set_visibility()
#---------------------------------------------------------------------------
# Layout the contents of the section based on the specified bounds:
#---------------------------------------------------------------------------
def recalc_sizes ( self, x, y, dx, dy ):
""" Layout the contents of the section based on the specified bounds.
"""
# Check if we need to perform initial layout
if not self.initialized:
self.initial_recalc_sizes( x, y, dx, dy )
self.initialized = True
return
self.width = dx = max( 0, dx )
self.height = dy = max( 0, dy )
self.bounds = ( x, y, dx, dy )
# If none of the contents are resizable, use the fixed layout method:
if not self.resizable:
self.recalc_sizes_fixed( x, y, dx, dy )
return
contents = self.visible_contents
n = len( contents ) - 1
splitters = []
# Perform a horizontal layout:
if self.is_row:
# allow 10 pixels for the splitter
sdx = 10
dx -= (n * sdx)
cdx = 0
# Calculate the current and minimum width:
for item in contents:
cdx += item.width
cdx = max( 1, cdx )
# Calculate the delta between the current and new width:
delta = remaining = dx - cdx
# Allocate the change (plus or minus) proportionally based on each
# item's current size:
for i, item in enumerate( contents ):
if i < n:
idx = int( round( float( item.width * delta ) / cdx ) )
else:
idx = remaining
remaining -= idx
idx += item.width
item.recalc_sizes( x, y, idx, dy )
x += idx
# Define the splitter bar between adjacent items:
if i < n:
splitters.append(
DockSplitter( bounds = ( x, y, sdx, dy ),
style = 'vertical',
parent = self,
index = i ) )
x += sdx
# Perform a vertical layout:
else:
# allow 10 pixels for the splitter
sdy = 10
dy -= (n * sdy)
cdy = 0
# Calculate the current and minimum height:
for item in contents:
cdy += item.height
cdy = max( 1, cdy )
# Calculate the delta between the current and new height:
delta = remaining = dy - cdy
# Allocate the change (plus or minus) proportionally based on each
# item's current size:
for i, item in enumerate( contents ):
if i < n:
idy = int( round( float( item.height * delta ) / cdy ) )
else:
idy = remaining
remaining -= idy
idy += item.height
item.recalc_sizes( x, y, dx, idy )
y += idy
# Define the splitter bar between adjacent items:
if i < n:
splitters.append(
DockSplitter( bounds = ( x, y, dx, sdy ),
style = 'horizontal',
parent = self,
index = i ) )
y += sdy
# Preserve the current internal '_last_bounds' for all splitters if
# possible:
cur_splitters = self.splitters
for i in range( min( len( splitters ), len( cur_splitters ) ) ):
splitters[i]._last_bounds = cur_splitters[i]._last_bounds
# Save the new set of splitter bars:
self.splitters = splitters
# Set the visibility for all contained items:
self._set_visibility()
#---------------------------------------------------------------------------
# Layout the contents of the section based on the specified bounds using
# the minimum requested size for each item:
#---------------------------------------------------------------------------
def recalc_sizes_fixed ( self, x, y, dx, dy ):
""" Layout the contents of the section based on the specified bounds
using the minimum requested size for each item.
"""
self.splitters = []
x += 3
y += 3
dx = max( 0, dx - 3 )
dy = max( 0, dy - 3 )
# Perform a horizontal layout:
if self.is_row:
# Allocate the space for each item based on its minimum size until
# the space runs out:
for item in self.visible_contents:
idx, idy = item.calc_min()
idx = min( dx, idx )
idy = min( dy, idy )
dx = max( 0, dx - idx - 3 )
item.recalc_sizes( x, y, idx, idy )
x += idx + 3
# Perform a vertical layout:
else:
# Allocate the space for each item based on its minimum size until
# the space runs out:
for item in self.visible_contents:
idx, idy = item.calc_min()
idx = min( dx, idx )
idy = min( dy, idy )
dy = max( 0, dy - idy - 3 )
item.recalc_sizes( x, y, idx, idy )
y += idy + 3
# Set the visibility for all contained items:
self._set_visibility()
#---------------------------------------------------------------------------
# Draws the contents of the section:
#---------------------------------------------------------------------------
def draw ( self, dc ):
""" Draws the contents of the section.
"""
if self._visible is not False:
contents = self.visible_contents
x, y, dx, dy = self.bounds
self.fill_bg_color( dc, x, y, dx, dy )
for item in contents:
item.draw( dc )
self.begin_draw( dc )
for item in self.splitters:
item.draw( dc )
self.end_draw( dc )
#---------------------------------------------------------------------------
# Returns the object at a specified window position:
#---------------------------------------------------------------------------
def object_at ( self, x, y, force = False ):
""" Returns the object at a specified window position.
"""
if self._visible is not False:
for item in self.splitters:
if item.is_at( x, y ):
return item
for item in self.visible_contents:
object = item.object_at( x, y )
if object is not None:
return object
if force and self.is_at( x, y ):
return self
return None
#---------------------------------------------------------------------------
# Gets the DockInfo object for a specified window position:
#---------------------------------------------------------------------------
def dock_info_at ( self, x, y, tdx, is_control, force = False ):
""" Gets the DockInfo object for a specified window position.
"""
# Check to see if the point is in our drag bar:
info = super( DockSection, self ).dock_info_at( x, y, tdx, is_control )
if info is not None:
return info
if self._visible is False:
return None
for item in self.splitters:
if item.is_at( x, y ):
return DockInfo( kind = DOCK_SPLITTER )
for item in self.visible_contents:
object = item.dock_info_at( x, y, tdx, is_control )
if object is not None:
return object
# Check to see if we must return a DockInfo object:
if not force:
return None
# Otherwise, figure out which edge the point is closest to, and
# return a DockInfo object describing that edge:
lx, ty, dx, dy = self.bounds
left = lx - x
right = x - lx - dx + 1
top = ty - y
bottom = y - ty - dy + 1
# If the point is way outside of the section, mark it is a drag and
# drop candidate:
if max( left, right, top, bottom ) > 20:
return DockInfo( kind = DOCK_EXPORT )
left = abs( left )
right = abs( right )
top = abs( top )
bottom = abs( bottom )
choice = min( left, right, top, bottom )
mdx = dx / 3
mdy = dy / 3
if choice == left:
return DockInfo( kind = DOCK_LEFT,
bounds = ( lx, ty, mdx, dy ) )
if choice == right:
return DockInfo( kind = DOCK_RIGHT,
bounds = ( lx + dx - mdx, ty, mdx, dy ) )
if choice == top:
return DockInfo( kind = DOCK_TOP,
bounds = ( lx, ty, dx, mdy ) )
return DockInfo( kind = DOCK_BOTTOM,
bounds = ( lx, ty + dy - mdy, dx, mdy ) )
#---------------------------------------------------------------------------
# Adds a control to the section at the edge of the region specified:
#---------------------------------------------------------------------------
def add ( self, control, region, kind ):
""" Adds a control to the section at the edge of the region specified.
"""
contents = self.contents
new_region = control
if not isinstance( control, DockRegion ):
new_region = DockRegion( contents = [ control ] )
i = contents.index( region )
if self.is_row:
if (kind == DOCK_TOP) or (kind == DOCK_BOTTOM):
if kind == DOCK_TOP:
new_contents = [ new_region, region ]
else:
new_contents = [ region, new_region ]
contents[ i ] = DockSection( is_row = False ).set(
contents = new_contents )
else:
if new_region.parent is self:
contents.remove( new_region )
i = contents.index( region )
if kind == DOCK_RIGHT:
i += 1
contents.insert( i, new_region )
else:
if (kind == DOCK_LEFT) or (kind == DOCK_RIGHT):
if kind == DOCK_LEFT:
new_contents = [ new_region, region ]
else:
new_contents = [ region, new_region ]
contents[ i ] = DockSection( is_row = True ).set(
contents = new_contents )
else:
if new_region.parent is self:
contents.remove( new_region )
i = contents.index( region )
if kind == DOCK_BOTTOM:
i += 1
contents.insert( i, new_region )
#---------------------------------------------------------------------------
# Removes a specified region or section from the section:
#---------------------------------------------------------------------------
def remove ( self, item ):
""" Removes a specified region or section from the section.
"""
contents = self.contents
if isinstance( item, DockGroup ) and (len( item.contents ) == 1):
contents[ contents.index( item ) ] = item.contents[0]
else:
contents.remove( item )
if self.parent is not None:
if len( contents ) <= 1:
self.parent.remove( self )
elif (len( contents ) == 0) and (self.dock_window is not None):
self.dock_window.dock_window_empty()
#---------------------------------------------------------------------------
# Sets the visibility of the group:
#---------------------------------------------------------------------------
def set_visibility ( self, visible ):
""" Sets the visibility of the group.
"""
self._visible = visible
for item in self.contents:
item.set_visibility( visible )
#---------------------------------------------------------------------------
# Returns a copy of the section 'structure', minus the actual content:
#---------------------------------------------------------------------------
def get_structure ( self ):
""" Returns a copy of the section 'structure', minus the actual content.
"""
return self.clone_traits( [ 'is_row', 'width', 'height' ] ).set(
contents = [ item.get_structure() for item in self.contents ],
splitters = [ item.get_structure() for item in self.splitters ] )
#---------------------------------------------------------------------------
# Gets the maximum bounds that a splitter bar is allowed to be dragged:
#---------------------------------------------------------------------------
def get_splitter_bounds ( self, splitter ):
""" Gets the maximum bounds that a splitter bar is allowed to be dragged.
"""
x, y, dx, dy = splitter.bounds
i = self.splitters.index( splitter )
contents = self.visible_contents
item1 = contents[ i ]
item2 = contents[ i + 1 ]
bx, by, bdx, bdy = item2.bounds
if self.is_row:
x = item1.bounds[0]
dx = bx + bdx - x
else:
y = item1.bounds[1]
dy = by + bdy - y
return ( x, y, dx, dy )
#---------------------------------------------------------------------------
# Updates the affected regions when a splitter bar is released:
#---------------------------------------------------------------------------
def update_splitter ( self, splitter, window ):
""" Updates the affected regions when a splitter bar is released.
"""
x, y, dx, dy = splitter.bounds
i = self.splitters.index( splitter )
contents = self.visible_contents
item1 = contents[ i ]
item2 = contents[ i + 1 ]
ix1, iy1, idx1, idy1 = item1.bounds
ix2, iy2, idx2, idy2 = item2.bounds
window.Freeze()
if self.is_row:
item1.recalc_sizes( ix1, iy1, x - ix1, idy1 )
item2.recalc_sizes( x + dx, iy2, ix2 + idx2 - x - dx, idy2 )
else:
item1.recalc_sizes( ix1, iy1, idx1, y - iy1 )
item2.recalc_sizes( ix2, y + dy, idx2, iy2 + idy2 - y - dy )
window.Thaw()
if splitter.style == 'horizontal':
dx = 0
else:
dy = 0
window.RefreshRect( wx.Rect( ix1 - dx, iy1 - dy,
ix2 + idx2 - ix1 + 2 * dx, iy2 + idy2 - iy1 + 2 * dy ) )
#---------------------------------------------------------------------------
# Prints the contents of the section:
#---------------------------------------------------------------------------
def dump ( self, indent = 0 ):
""" Prints the contents of the section.
"""
print '%sSection( %08X, is_row = %s, width = %d, height = %d )' % (
' ' * indent, id( self ), self.is_row, self.width, self.height )
for item in self.contents:
item.dump( indent + 3 )
#---------------------------------------------------------------------------
# Sets the correct visiblity for all contained items:
#---------------------------------------------------------------------------
def _set_visibility ( self ):
""" Sets the correct visiblity for all contained items.
"""
for item in self.contents:
item.set_visibility( item.visible )
#---------------------------------------------------------------------------
# Handles the 'contents' trait being changed:
#---------------------------------------------------------------------------
def _contents_changed ( self ):
""" Handles the 'contents' trait being changed.
"""
for item in self.contents:
item.parent = self
self.calc_min( True )
self.modified = True
def _contents_items_changed ( self, event ):
""" Handles the 'contents' trait being changed.
"""
for item in event.added:
item.parent = self
self.calc_min( True )
self.modified = True
#---------------------------------------------------------------------------
# Handles the 'splitters' trait being changed:
#---------------------------------------------------------------------------
def _splitters_changed ( self ):
""" Handles the 'splitters' trait being changed.
"""
for item in self.splitters:
item.parent = self
def _splitters_items_changed ( self, event ):
""" Handles the 'splitters' trait being changed.
"""
for item in event.added:
item.parent = self
#---------------------------------------------------------------------------
# Implementation of the 'modified' property:
#---------------------------------------------------------------------------
def _set_modified ( self, value ):
self._resizable = None
if self.parent is not None:
self.parent.modified = True
#-------------------------------------------------------------------------------
# 'DockInfo' class:
#-------------------------------------------------------------------------------
class DockInfo ( HasPrivateTraits ):
#---------------------------------------------------------------------------
# Trait definitions:
#---------------------------------------------------------------------------
# Dock kind:
kind = Range( DOCK_TOP, DOCK_EXPORT )
# Dock bounds:
bounds = Bounds
# Tab bounds (if needed):
tab_bounds = Bounds
# Dock Region:
region = Instance( DockRegion )
# Dock Control:
control = Instance( DockItem )
def __init__(self, **kw):
super(DockInfo, self).__init__(**kw)
#---------------------------------------------------------------------------
# Draws the DockInfo on the display:
#---------------------------------------------------------------------------
def draw ( self, window, bitmap = None ):
""" Draws the DockInfo on the display.
"""
if DOCK_TOP <= self.kind <= DOCK_TABADD:
if bitmap is None:
bitmap = self._bitmap
if bitmap is None:
return
else:
self._bitmap = bitmap
sdc, bx, by = get_dc( window )
bdc = wx.MemoryDC()
bdc2 = wx.MemoryDC()
bdx, bdy = bitmap.GetWidth(), bitmap.GetHeight()
bitmap2 = wx.EmptyBitmap( bdx, bdy )
bdc.SelectObject( bitmap )
bdc2.SelectObject( bitmap2 )
bdc2.Blit( 0, 0, bdx, bdy, bdc, 0, 0 )
try:
bdc3 = wx.GCDC( bdc2 )
bdc3.SetPen( wx.TRANSPARENT_PEN )
bdc3.SetBrush( wx.Brush( wx.Colour( *DockColorBrush ) ) )
x, y, dx, dy = self.bounds
if DOCK_TAB <= self.kind <= DOCK_TABADD:
tx, ty, tdx, tdy = self.tab_bounds
bdc3.DrawRoundedRectangle( tx, ty, tdx, tdy, 4 )
else:
bdc3.DrawRoundedRectangle( x, y, dx, dy, 8 )
except Exception:
pass
sdc.Blit( bx, by, bdx, bdy, bdc2, 0, 0 )
#---------------------------------------------------------------------------
# Docks the specified control:
#---------------------------------------------------------------------------
def dock ( self, control, window ):
""" Docks the specified control.
"""
the_control = control
kind = self.kind
if kind < DOCK_NONE:
the_parent = control.parent
region = self.region
if (kind == DOCK_TAB) or (kind == DOCK_BAR):
region.add( control, self.control )
elif kind == DOCK_TABADD:
item = self.control
if isinstance( item, DockControl ):
if isinstance( control, DockControl ):
control = DockRegion( contents = [ control ] )
i = region.contents.index( item )
region.contents[ i ] = item = DockSection(
contents = [ DockRegion( contents = [ item ] ),
control ],
is_row = True )
elif isinstance( item, DockSection ):
if (isinstance( control, DockSection ) and
(item.is_row == control.is_row)):
item.contents.extend( control.contents )
else:
if isinstance( control, DockControl ):
control = DockRegion( contents = [ control ] )
item.contents.append( control )
else:
item.contents.append( control )
region.active = region.contents.index( item )
elif region is not None:
region.parent.add( control, region, kind )
else:
sizer = window.GetSizer()
section = sizer._contents
if ((section.is_row and
((kind == DOCK_TOP) or (kind == DOCK_BOTTOM))) or
((not section.is_row) and
((kind == DOCK_LEFT) or (kind == DOCK_RIGHT)))):
if len( section.contents ) > 0:
sizer._contents = section = DockSection(
is_row = not section.is_row ).set(
contents = [ section ] )
if len( section.contents ) > 0:
i = 0
if (kind == DOCK_RIGHT) or (kind == DOCK_BOTTOM):
i = -1
section.add( control, section.contents[ i ], kind )
else:
section.is_row = not section.is_row
section.contents = [ DockRegion( contents = [ control ] ) ]
section = None
if ((the_parent is not None) and
(the_parent is not the_control.parent)):
the_parent.remove( the_control )
# Force the main window to be laid out and redrawn:
window.Layout()
window.Refresh()
# Create a reusable DockInfo indicating no information available:
no_dock_info = DockInfo( kind = DOCK_NONE )
#-------------------------------------------------------------------------------
# 'SetStructureHandler' class
#-------------------------------------------------------------------------------
class SetStructureHandler ( object ):
#---------------------------------------------------------------------------
# Resolves an unresolved DockControl id:
#---------------------------------------------------------------------------
def resolve_id ( self, id ):
""" Resolves an unresolved DockControl id.
"""
return None
#---------------------------------------------------------------------------
# Resolves extra, unused DockControls not referenced by the structure:
#---------------------------------------------------------------------------
def resolve_extras ( self, structure, extras ):
""" Resolves extra, unused DockControls not referenced by the structure.
"""
for dock_control in extras:
if dock_control.control is not None:
dock_control.control.Show( False )
#-------------------------------------------------------------------------------
# 'DockSizer' class:
#-------------------------------------------------------------------------------
class DockSizer ( wx.PySizer ):
#---------------------------------------------------------------------------
# Initializes the object:
#---------------------------------------------------------------------------
def __init__ ( self, contents = None ):
super( DockSizer, self ).__init__()
# Make sure the DockImages singleton has been initialized:
DockImages.init()
# Finish initializing the sizer itself:
self._contents = self._structure = self._max_structure = None
if contents is not None:
self.SetContents( contents )
#---------------------------------------------------------------------------
# Calculates the minimum size needed by the sizer:
#---------------------------------------------------------------------------
def CalcMin ( self ):
if self._contents is None:
return wx.Size( 20, 20 )
dx, dy = self._contents.calc_min()
return wx.Size( dx, dy )
#---------------------------------------------------------------------------
# Layout the contents of the sizer based on the sizer's current size and
# position:
#---------------------------------------------------------------------------
def RecalcSizes ( self ):
""" Layout the contents of the sizer based on the sizer's current size
and position.
"""
if self._contents is None:
return
x, y = self.GetPositionTuple()
dx, dy = self.GetSizeTuple()
self._contents.recalc_sizes( x, y, dx, dy )
#---------------------------------------------------------------------------
# Returns the current sizer contents:
#---------------------------------------------------------------------------
def GetContents ( self ):
""" Returns the current sizer contents.
"""
return self._contents
#---------------------------------------------------------------------------
# Initializes the layout of a DockWindow from a content list:
#---------------------------------------------------------------------------
def SetContents ( self, contents ):
""" Initializes the layout of a DockWindow from a content list.
"""
if isinstance( contents, DockGroup ):
self._contents = contents
elif isinstance( contents, tuple ):
self._contents = self._set_region( contents )
elif isinstance( contents, list ):
self._contents = self._set_section( contents, True )
elif isinstance( contents, DockControl ):
self._contents = self._set_section( [ contents ], True )
else:
raise TypeError
# Set the owner DockWindow for the top-level group (if possible)
# so that it can notify the owner when the DockWindow becomes empty:
control = self._contents.control
if control is not None:
self._contents.dock_window = control.GetParent().owner
# If no saved structure exists yet, save the current one:
if self._structure is None:
self._structure = self.GetStructure()
def _set_region ( self, contents ):
items = []
for item in contents:
if isinstance( item, tuple ):
items.append( self._set_region( item ) )
elif isinstance( item, list ):
items.append( self._set_section( item, True ) )
elif isinstance( item, DockItem ):
items.append( item )
else:
raise TypeError
return DockRegion( contents = items )
def _set_section ( self, contents, is_row ):
items = []
for item in contents:
if isinstance( item, tuple ):
items.append( self._set_region( item ) )
elif isinstance( item, list ):
items.append( self._set_section( item, not is_row ) )
elif isinstance( item, DockControl ):
items.append( DockRegion( contents = [ item ] ) )
else:
raise TypeError
return DockSection( is_row = is_row ).set( contents = items )
#---------------------------------------------------------------------------
# Returns a copy of the layout 'structure', minus the actual content
# (i.e. controls, splitters, bounds). This method is intended for use in
# persisting the current user layout, so that it can be restored in a
# future session:
#---------------------------------------------------------------------------
def GetStructure ( self ):
""" Returns a copy of the layout 'structure', minus the actual content
(i.e. controls, splitters, bounds). This method is intended for use
in persisting the current user layout, so that it can be restored in
a future session.
"""
if self._contents is not None:
return self._contents.get_structure()
return DockSection()
#---------------------------------------------------------------------------
# Takes a previously saved 'GetStructure' result and applies it to the
# contents of the sizer in order to restore a previous layout using a
# new set of controls:
#---------------------------------------------------------------------------
def SetStructure ( self, window, structure, handler = None ):
""" Takes a previously saved 'GetStructure' result and applies it to the
contents of the sizer in order to restore a previous layout using a
new set of controls.
"""
section = self._contents
if (section is None) or (not isinstance( structure, DockGroup )):
return
# Make sure that DockSections, which have a separate layout algorithm
# for the first layout, are set as initialized.
structure.initialized = True
# Save the current structure in case a 'ResetStructure' call is made
# later:
self._structure = self.GetStructure()
extras = []
# Create a mapping for all the DockControls in the new structure:
map = {}
for control in structure.get_controls( False ):
if control.id in map:
control.parent.remove( control )
else:
map[ control.id ] = control
# Try to map each current item into an equivalent item in the saved
# preferences:
for control in section.get_controls( False ):
mapped_control = map.get( control.id )
if mapped_control is not None:
control.set( **mapped_control.get( 'visible', 'locked',
'closeable', 'resizable', 'width', 'height' ) )
if mapped_control.user_name:
control.name = mapped_control.name
if mapped_control.user_style:
control.style = mapped_control.style
structure.replace_control( mapped_control, control )
del map[ control.id ]
else:
extras.append( control )
# Try to resolve all unused saved items:
for id, item in map.items():
# If there is a handler, see if it can resolve it:
if handler is not None:
control = handler.resolve_id( id )
if control is not None:
item.control = control
continue
# If nobody knows what it is, just remove it:
item.parent.remove( item )
# Check if there are any new items that we have never seen before:
if len( extras ) > 0:
if handler is not None:
# Allow the handler to decide their fate:
handler.resolve_extras( structure, extras )
else:
# Otherwise, add them to the top level as a new region (let the
# user re-arrange them):
structure.contents.append( DockRegion( contents = extras ) )
# Finally, replace the original structure with the updated structure:
self.SetContents( structure )
#---------------------------------------------------------------------------
# Restores the previously saved structure (if any):
#---------------------------------------------------------------------------
def ResetStructure ( self, window ):
""" Restores the previously saved structure (if any).
"""
if self._structure is not None:
self.SetStructure( window, self._structure )
#---------------------------------------------------------------------------
# Toggles the current 'lock' setting of the contents:
#---------------------------------------------------------------------------
def ToggleLock ( self ):
""" Toggles the current 'lock' setting of the contents.
"""
if self._contents is not None:
self._contents.toggle_lock()
#---------------------------------------------------------------------------
# Draws the contents of the sizer:
#---------------------------------------------------------------------------
def Draw ( self, window ):
""" Draws the contents of the sizer.
"""
if self._contents is not None:
self._contents.draw( set_standard_font( wx.PaintDC( window ) ) )
else:
clear_window( window )
#---------------------------------------------------------------------------
# Returns the object at a specified x, y position:
#---------------------------------------------------------------------------
def ObjectAt ( self, x, y, force = False ):
""" Returns the object at a specified window position.
"""
if self._contents is not None:
return self._contents.object_at( x, y, force )
return None
#---------------------------------------------------------------------------
# Gets a DockInfo object at a specified x, y position:
#---------------------------------------------------------------------------
def DockInfoAt ( self, x, y, size, is_control ):
""" Gets a DockInfo object at a specified x, y position.
"""
if self._contents is not None:
return self._contents.dock_info_at( x, y, size, is_control, True )
return no_dock_info
#---------------------------------------------------------------------------
# Minimizes/Maximizes a specified DockControl:
#---------------------------------------------------------------------------
def MinMax ( self, window, dock_control ):
""" Minimizes/Maximizes a specified DockControl.
"""
if self._max_structure is None:
self._max_structure = self.GetStructure()
for control in self.GetContents().get_controls():
control.visible = (control is dock_control)
else:
self.Reset( window )
#---------------------------------------------------------------------------
# Resets the DockSizer to a known state:
#---------------------------------------------------------------------------
def Reset ( self, window ):
""" Resets the DockSizer to a known state.
"""
if self._max_structure is not None:
self.SetStructure( window, self._max_structure )
self._max_structure = None
#---------------------------------------------------------------------------
# Returns whether the sizer can be maximized now:
#---------------------------------------------------------------------------
def IsMaximizable ( self ):
""" Returns whether the sizer can be maximized now.
"""
return (self._max_structure is None)
def top_level_window_for ( control ):
""" Returns the top-level window for a specified control.
"""
parent = control.GetParent()
while parent is not None:
control = parent
parent = control.GetParent()
return control
| enthought/traitsgui | enthought/pyface/dock/dock_sizer.py | Python | bsd-3-clause | 155,245 |
#!/usr/bin/env python
# Copyright 2019 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Unit tests for the contents of cpu_temperature.py
"""
# pylint: disable=unused-argument
import logging
import unittest
from devil import devil_env
from devil.android import cpu_temperature
from devil.android import device_utils
from devil.utils import mock_calls
from devil.android.sdk import adb_wrapper
with devil_env.SysPath(devil_env.PYMOCK_PATH):
import mock # pylint: disable=import-error
class CpuTemperatureTest(mock_calls.TestCase):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def setUp(self):
# Mock the device
self.mock_device = mock.Mock(spec=device_utils.DeviceUtils)
self.mock_device.build_product = 'blueline'
self.mock_device.adb = mock.Mock(spec=adb_wrapper.AdbWrapper)
self.mock_device.FileExists.return_value = True
self.cpu_temp = cpu_temperature.CpuTemperature(self.mock_device)
self.cpu_temp.InitThermalDeviceInformation()
class CpuTemperatureInitTest(unittest.TestCase):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testInitWithDeviceUtil(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
c = cpu_temperature.CpuTemperature(d)
self.assertEqual(d, c.GetDeviceForTesting())
def testInitWithMissing_fails(self):
with self.assertRaises(TypeError):
cpu_temperature.CpuTemperature(None)
with self.assertRaises(TypeError):
cpu_temperature.CpuTemperature('')
class CpuTemperatureGetThermalDeviceInformationTest(CpuTemperatureTest):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testGetThermalDeviceInformation_noneWhenIncorrectLabel(self):
invalid_device = mock.Mock(spec=device_utils.DeviceUtils)
invalid_device.build_product = 'invalid_name'
c = cpu_temperature.CpuTemperature(invalid_device)
c.InitThermalDeviceInformation()
self.assertEqual(c.GetDeviceInfoForTesting(), None)
def testGetThermalDeviceInformation_getsCorrectInformation(self):
correct_information = {
'cpu0': '/sys/class/thermal/thermal_zone11/temp',
'cpu1': '/sys/class/thermal/thermal_zone12/temp',
'cpu2': '/sys/class/thermal/thermal_zone13/temp',
'cpu3': '/sys/class/thermal/thermal_zone14/temp',
'cpu4': '/sys/class/thermal/thermal_zone15/temp',
'cpu5': '/sys/class/thermal/thermal_zone16/temp',
'cpu6': '/sys/class/thermal/thermal_zone17/temp',
'cpu7': '/sys/class/thermal/thermal_zone18/temp'
}
self.assertEqual(
cmp(correct_information,
self.cpu_temp.GetDeviceInfoForTesting().get('cpu_temps')), 0)
class CpuTemperatureIsSupportedTest(CpuTemperatureTest):
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testIsSupported_returnsTrue(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
d.FileExists.return_value = True
c = cpu_temperature.CpuTemperature(d)
self.assertTrue(c.IsSupported())
@mock.patch('devil.android.perf.perf_control.PerfControl', mock.Mock())
def testIsSupported_returnsFalse(self):
d = mock.Mock(spec=device_utils.DeviceUtils)
d.build_product = 'blueline'
d.FileExists.return_value = False
c = cpu_temperature.CpuTemperature(d)
self.assertFalse(c.IsSupported())
class CpuTemperatureLetCpuCoolToTemperatureTest(CpuTemperatureTest):
# Return values for the mock side effect
cooling_down0 = (
[45000
for _ in range(8)] + [43000
for _ in range(8)] + [41000 for _ in range(8)])
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_coolWithin24Calls(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.cooling_down0)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 24)
cooling_down1 = [45000 for _ in range(8)] + [41000 for _ in range(16)]
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_coolWithin16Calls(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.cooling_down1)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 16)
constant_temp = [45000 for _ in range(40)]
@mock.patch('time.sleep', mock.Mock())
def testLetBatteryCoolToTemperature_timeoutAfterThree(self):
self.mock_device.ReadFile = mock.Mock(side_effect=self.constant_temp)
self.cpu_temp.LetCpuCoolToTemperature(42)
self.mock_device.ReadFile.assert_called()
self.assertEquals(self.mock_device.ReadFile.call_count, 24)
if __name__ == '__main__':
logging.getLogger().setLevel(logging.DEBUG)
unittest.main(verbosity=2)
| endlessm/chromium-browser | third_party/catapult/devil/devil/android/cpu_temperature_test.py | Python | bsd-3-clause | 4,988 |
#!/usr/bin/env python
# Licensed under a 3-clause BSD style license - see LICENSE.rst
import glob
import os
import sys
import ah_bootstrap
from setuptools import setup
#A dirty hack to get around some early import/configurations ambiguities
if sys.version_info[0] >= 3:
import builtins
else:
import __builtin__ as builtins
builtins._ASTROPY_SETUP_ = True
from astropy_helpers.setup_helpers import (register_commands, adjust_compiler,
get_debug_option, get_package_info,
add_command_option)
from astropy_helpers.git_helpers import get_git_devstr
from astropy_helpers.version_helpers import generate_version_py
# Get some values from the setup.cfg
from distutils import config
conf = config.ConfigParser()
conf.read(['setup.cfg'])
metadata = dict(conf.items('metadata'))
PACKAGENAME = metadata.get('package_name', 'packagename')
DESCRIPTION = metadata.get('description', 'Astropy affiliated package')
AUTHOR = metadata.get('author', '')
AUTHOR_EMAIL = metadata.get('author_email', '')
LICENSE = metadata.get('license', 'unknown')
URL = metadata.get('url', 'http://astropy.org')
# Get the long description from the package's docstring
#__import__(PACKAGENAME)
#package = sys.modules[PACKAGENAME]
LONG_DESCRIPTION = "" #package.__doc__
# Store the package name in a built-in variable so it's easy
# to get from other parts of the setup infrastructure
builtins._ASTROPY_PACKAGE_NAME_ = PACKAGENAME
# VERSION should be PEP386 compatible (http://www.python.org/dev/peps/pep-0386)
VERSION = '1.5.dev'
# Indicates if this version is a release version
RELEASE = 'dev' not in VERSION
if not RELEASE:
VERSION += get_git_devstr(False)
# Populate the dict of setup command overrides; this should be done before
# invoking any other functionality from distutils since it can potentially
# modify distutils' behavior.
cmdclassd = register_commands(PACKAGENAME, VERSION, RELEASE)
add_command_option('install', 'with-openmp', 'compile TARDIS without OpenMP',
is_bool=True)
add_command_option('build', 'with-openmp', 'compile TARDIS without OpenMP',
is_bool=True)
add_command_option('develop', 'with-openmp', 'compile TARDIS without OpenMP',
is_bool=True)
# Adjust the compiler in case the default on this platform is to use a
# broken one.
adjust_compiler(PACKAGENAME)
# Freeze build information in version.py
generate_version_py(PACKAGENAME, VERSION, RELEASE,
get_debug_option(PACKAGENAME))
# Treat everything in scripts except README.rst as a script to be installed
scripts = [fname for fname in glob.glob(os.path.join('scripts', '*'))
if os.path.basename(fname) != 'README.rst']
# Get configuration information from all of the various subpackages.
# See the docstring for setup_helpers.update_package_files for more
# details.
package_info = get_package_info()
# Add the project-global data
package_info['package_data'].setdefault(PACKAGENAME, [])
package_info['package_data'][PACKAGENAME].append('data/*')
# Define entry points for command-line scripts
entry_points = {}
for hook in [('prereleaser', 'middle'), ('releaser', 'middle'),
('postreleaser', 'before'), ('postreleaser', 'middle')]:
hook_ep = 'zest.releaser.' + '.'.join(hook)
hook_name = 'astropy.release.' + '.'.join(hook)
hook_func = 'astropy.utils.release:' + '_'.join(hook)
entry_points[hook_ep] = ['%s = %s' % (hook_name, hook_func)]
# Include all .c files, recursively, including those generated by
# Cython, since we can not do this in MANIFEST.in with a "dynamic"
# directory name.
c_files = []
for root, dirs, files in os.walk(PACKAGENAME):
for filename in files:
if filename.endswith('.c'):
c_files.append(
os.path.join(
os.path.relpath(root, PACKAGENAME), filename))
package_info['package_data'][PACKAGENAME].extend(c_files)
setup(name=PACKAGENAME + '-sn',
version=VERSION,
description=DESCRIPTION,
scripts=scripts,
requires=['astropy'],
install_requires=['astropy'],
provides=[PACKAGENAME],
author=AUTHOR,
author_email=AUTHOR_EMAIL,
license=LICENSE,
url=URL,
long_description=LONG_DESCRIPTION,
cmdclass=cmdclassd,
zip_safe=False,
use_2to3=True,
entry_points=entry_points,
**package_info
)
| utkbansal/tardis | setup.py | Python | bsd-3-clause | 4,442 |
from django.db.models import Aggregate, CharField
class Sql_GroupConcat(Aggregate):
function = 'GROUP_CONCAT'
allow_distinct = True
def __init__(self, expression, separator, distinct=False, ordering=None, **extra):
self.separator = separator
super(Sql_GroupConcat, self).__init__(expression,
distinct='DISTINCT ' if distinct else '',
ordering=' ORDER BY %s' % ordering if ordering is not None else '',
separator=' SEPARATOR "%s"' % separator,
output_field=CharField(),
**extra)
def as_mysql(self, compiler, connection):
return super().as_sql(compiler,
connection,
template='%(function)s(%(distinct)s%(expressions)s%(ordering)s%(separator)s)',
separator=' SEPARATOR \'%s\'' % self.separator)
def as_sql(self, compiler, connection, **extra):
return super().as_sql(compiler,
connection,
template='%(function)s(%(distinct)s%(expressions)s%(ordering)s)',
**extra)
| rackerlabs/django-DefectDojo | dojo/components/sql_group_concat.py | Python | bsd-3-clause | 1,326 |
import sys
__author__ = 'weijia'
import django.dispatch
before_server_start = django.dispatch.Signal(providing_args=[])
before_server_stop = django.dispatch.Signal(providing_args=[])
class ServerSignalTrigger(object):
def trigger_server_start_if_needed(self):
if sys.argv[1] == "runserver":
before_server_start.send(sender=self)
def trigger_server_stop_if_needed(self):
if sys.argv[1] == "runserver":
before_server_stop.send(sender=self)
print "Process exiting" | weijia/djangoautoconf | djangoautoconf/auto_conf_signals.py | Python | bsd-3-clause | 523 |
def extractKoreanovelsCom(item):
'''
Parser for 'koreanovels.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
if item['title'].startswith("Link ") and item['tags'] == ['RSS']:
return buildReleaseMessageWithType(item, 'Level 1 Skeleton', vol, chp, frag=frag, postfix=postfix, tl_type='translated')
if item['title'].startswith("MoS Link ") and item['tags'] == ['RSS']:
return buildReleaseMessageWithType(item, 'Master of Strength', vol, chp, frag=frag, postfix=postfix, tl_type='translated')
tagmap = [
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractKoreanovelsCom.py | Python | bsd-3-clause | 933 |
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import gc
import IECore
import Gaffer
import GafferTest
class ArrayPlugTest( GafferTest.TestCase ) :
def test( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
self.assertTrue( n["in"]["e1"].isSame( n["in"][0] ) )
n["in"][0].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 2 )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" in n["in"] )
n["in"][0].setInput( None )
self.assertTrue( "e1" in n["in"] )
self.assertTrue( "e2" not in n["in"] )
self.assertEqual( len( n["in"] ), 1 )
def testConnectionGaps( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
n["in"][0].setInput( a["sum"] )
n["in"][1].setInput( a["sum"] )
n["in"][2].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
self.assertTrue( n["in"]["e1"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e2"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e3"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e4"].getInput() is None )
n["in"][1].setInput( None )
self.assertEqual( len( n["in"] ), 4 )
self.assertTrue( n["in"]["e1"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e2"].getInput() is None )
self.assertTrue( n["in"]["e3"].getInput(), a["sum"] )
self.assertTrue( n["in"]["e4"].getInput() is None )
def testSerialisation( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( None )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].isSame( s["n"]["in"][0] ) )
self.assertTrue( s["n"]["in"]["e2"].isSame( s["n"]["in"][1] ) )
self.assertTrue( s["n"]["in"]["e3"].isSame( s["n"]["in"][2] ) )
self.assertTrue( s["n"]["in"]["e4"].isSame( s["n"]["in"][3] ) )
self.assertTrue( s["n"]["in"]["e1"].getInput(), s["a"]["sum"] )
self.assertTrue( s["n"]["in"]["e2"].getInput() is None )
self.assertTrue( s["n"]["in"]["e3"].getInput(), s["a"]["sum"] )
self.assertTrue( s["n"]["in"]["e4"].getInput() is None )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( len( s2["n"]["in"] ), 4 )
self.assertTrue( s2["n"]["in"]["e1"].isSame( s2["n"]["in"][0] ) )
self.assertTrue( s2["n"]["in"]["e2"].isSame( s2["n"]["in"][1] ) )
self.assertTrue( s2["n"]["in"]["e3"].isSame( s2["n"]["in"][2] ) )
self.assertTrue( s2["n"]["in"]["e4"].isSame( s2["n"]["in"][3] ) )
self.assertTrue( s2["n"]["in"]["e1"].getInput(), s2["a"]["sum"] )
self.assertTrue( s2["n"]["in"]["e2"].getInput() is None )
self.assertTrue( s2["n"]["in"]["e3"].getInput(), s2["a"]["sum"] )
self.assertTrue( s2["n"]["in"]["e4"].getInput() is None )
def testMaximumInputs( self ) :
a = GafferTest.AddNode()
n = GafferTest.ArrayPlugNode()
# connect all inputs
for i in range( 0, 6 ) :
n["in"][i].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
# check that removing the one before the last
# leaves the last in place.
n["in"][4].setInput( None )
self.assertEqual( len( n["in"] ), 6 )
for i in range( 0, 6 ) :
if i != 4 :
self.assertTrue( n["in"][i].getInput().isSame( a["sum"] ) )
else :
self.assertTrue( n["in"][i].getInput() is None )
def testMakeConnectionAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
with Gaffer.UndoContext( s ) :
s["n"]["in"][0].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
s.redo()
self.assertEqual( len( s["n"]["in"] ), 2 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( s["n"]["in"][1].isSame( s["n"]["in"]["e2"] ) )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 1 )
self.assertTrue( s["n"]["in"][0].isSame( s["n"]["in"]["e1"] ) )
self.assertTrue( "in" in s["n"] )
self.assertFalse( "in1" in s["n"] )
def testMinimumInputs( self ) :
a = GafferTest.AddNode()
n = Gaffer.Node()
n["in"] = Gaffer.ArrayPlug( "in", element = Gaffer.IntPlug( "e1" ), minSize=3 )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the middle input shouldn't create
# any new inputs, because there is still one free on the end
n["in"]["e2"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 3 )
# connecting to the last input should create a new
# one - there should always be one free input on the
# end (until the maximum is reached).
n["in"]["e3"].setInput( a["sum"] )
self.assertEqual( len( n["in"] ), 4 )
n["in"]["e3"].setInput( None )
self.assertEqual( len( n["in"] ), 3 )
def testDeleteAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"]["e1"].setInput( s["a"]["sum"] )
s["n"]["in"]["e2"].setInput( s["a"]["sum"] )
s["n"]["in"]["e3"].setInput( s["a"]["sum"] )
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["n"] ] ) )
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "n" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"]["e1"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e2"].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"]["e3"].getInput().isSame( s["a"]["sum"] ) )
def testDeleteInputNodeAndUndoAndRedo( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = GafferTest.ArrayPlugNode()
s["n"]["in"][0].setInput( s["a"]["sum"] )
s["n"]["in"][1].setInput( s["a"]["sum"] )
s["n"]["in"][2].setInput( s["a"]["sum"] )
n = s["n"]
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
with Gaffer.UndoContext( s ) :
s.deleteNodes( s, Gaffer.StandardSet( [ s["a"] ] ) )
self.assertFalse( "a" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
s.redo()
self.assertFalse( "a" in s )
s.undo()
self.assertEqual( len( s["n"]["in"] ), 4 )
self.assertTrue( s["n"]["in"][0].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["in"][2].getInput().isSame( s["a"]["sum"] ) )
def testFixedLengthDynamic( self ) :
s = Gaffer.ScriptNode()
s["a"] = GafferTest.AddNode()
s["n"] = Gaffer.Node()
s["n"]["a"] = Gaffer.ArrayPlug( "a", element = Gaffer.IntPlug(), minSize = 4, maxSize = 4, flags = Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic )
s["n"]["a"][1].setInput( s["a"]["sum"] )
s["n"]["a"][2].setInput( s["a"]["sum"] )
self.assertEqual( s["n"]["a"].minSize(), 4 )
self.assertEqual( s["n"]["a"].maxSize(), 4 )
self.assertEqual( len( s["n"]["a"] ), 4 )
self.assertTrue( s["n"]["a"][0].getInput() is None )
self.assertTrue( s["n"]["a"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["a"][1].getInput().isSame( s["a"]["sum"] ) )
self.assertTrue( s["n"]["a"][3].getInput() is None )
s2 = Gaffer.ScriptNode()
s2.execute( s.serialise() )
self.assertEqual( s2["n"]["a"].minSize(), 4 )
self.assertEqual( s2["n"]["a"].maxSize(), 4 )
self.assertEqual( len( s2["n"]["a"] ), 4 )
self.assertTrue( s2["n"]["a"][0].getInput() is None )
self.assertTrue( s2["n"]["a"][1].getInput().isSame( s2["a"]["sum"] ) )
self.assertTrue( s2["n"]["a"][1].getInput().isSame( s2["a"]["sum"] ) )
self.assertTrue( s2["n"]["a"][3].getInput() is None )
def testPythonElement( self ) :
class PythonElement( Gaffer.Plug ) :
def __init__( self, name = "PythonElement", direction = Gaffer.Plug.Direction.In, flags = Gaffer.Plug.Flags.Default ) :
Gaffer.Plug.__init__( self, name, direction, flags )
def createCounterpart( self, name, direction ) :
return PythonElement( name, direction, self.getFlags() )
n = Gaffer.Node()
n["a"] = Gaffer.ArrayPlug( element = PythonElement() )
self.assertEqual( len( n["a"] ), 1 )
self.assertTrue( isinstance( n["a"][0], PythonElement ) )
p = PythonElement()
n["a"][0].setInput( p )
self.assertEqual( len( n["a"] ), 2 )
self.assertTrue( isinstance( n["a"][1], PythonElement ) )
def testTopLevelConnection( self ) :
n = Gaffer.Node()
n["a"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
n["b"] = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
n["b"].setInput( n["a"] )
def assertInput( plug, input ) :
self.assertEqual( len( plug ), len( input ) )
for i in range( 0, len( plug ) ) :
self.assertTrue( plug[i].getInput().isSame( input[i] ) )
assertInput( n["b"], n["a"] )
a = GafferTest.AddNode()
n["a"][0].setInput( a["sum"] )
self.assertEqual( len( n["a"] ), 2 )
assertInput( n["b"], n["a"] )
n["a"][1].setInput( a["sum"] )
self.assertEqual( len( n["a"] ), 3 )
assertInput( n["b"], n["a"] )
n["a"][0].setInput( None )
self.assertEqual( len( n["a"] ), 3 )
assertInput( n["b"], n["a"] )
def testOnlyOneChildType( self ) :
p = Gaffer.ArrayPlug( element = Gaffer.IntPlug() )
self.assertTrue( p.acceptsChild( Gaffer.IntPlug() ) )
self.assertFalse( p.acceptsChild( Gaffer.FloatPlug() ) )
def tearDown( self ) :
# some bugs in the InputGenerator only showed themselves when
# the ScriptNode was deleted during garbage collection, often
# in totally unrelated tests. so we run the garbage collector
# here to localise any problems to this test, making them
# easier to diagnose and fix.
while gc.collect() :
pass
IECore.RefCounted.collectGarbage()
if __name__ == "__main__":
unittest.main()
| chippey/gaffer | python/GafferTest/ArrayPlugTest.py | Python | bsd-3-clause | 12,836 |
from maskgen.jpeg import utils
import unittest
from test_support import TestSupport
class TestJpegUtils(TestSupport):
def test_load(self):
self.assertEqual(91,utils.estimate_qf(self.locateFile('tests/images/test_project1.jpg')))
if __name__ == '__main__':
unittest.main()
| rwgdrummer/maskgen | tests/test_utils.py | Python | bsd-3-clause | 288 |
"""autogenerated by genpy from mapping_dlut/Map.msg. Do not edit."""
import sys
python3 = True if sys.hexversion > 0x03000000 else False
import genpy
import struct
import mapping_dlut.msg
import std_msgs.msg
class Map(genpy.Message):
_md5sum = "e6ab6c8862bf55f4e1b5fd48f03f1a7d"
_type = "mapping_dlut/Map"
_has_header = True #flag to mark the presence of a Header object
_full_text = """Header header
###########################################################
#Elevation Map Configuration
#half size of the map;
int32 nHalfSize
#Elevation Grid Resolution, in meter;
float32 fResolution
#x coordinate of the center of the map in world frame
float32 fCenterX
#y coordinate of the center of the map in world frame
float32 fCenterY
#maximum elevation of the map in world frame
float32 fMapMaxElevation
#minimum elevation of the map in world frame
float32 fMapMinElevation
###########################################################
###########################################################
#Vehicle Status
#vehicle x in world frame, in meters
float32 fVehicleX
#vehicle y in world frame, in meters
float32 fVehicleY
#vehicle z in world frame, in meters
float32 fVehicleZ
#vehicle heading angle, in rad
float32 fVehicleHeading
###########################################################
Grid[] map
================================================================================
MSG: std_msgs/Header
# Standard metadata for higher-level stamped data types.
# This is generally used to communicate timestamped data
# in a particular coordinate frame.
#
# sequence ID: consecutively increasing ID
uint32 seq
#Two-integer timestamp that is expressed as:
# * stamp.sec: seconds (stamp_secs) since epoch (in Python the variable is called 'secs')
# * stamp.nsec: nanoseconds since stamp_secs (in Python the variable is called 'nsecs')
# time-handling sugar is provided by the client library
time stamp
#Frame this data is associated with
# 0: no frame
# 1: global frame
string frame_id
================================================================================
MSG: mapping_dlut/Grid
#Header header
#maximum elevation in this grid;
#float32 fMaxElevation
#minimum elevation in this grid;
#float32 fMinElevation
#average elevation in this grid;
#float32 fAvgElevation
#points falling in this grid;
#int32 nPointCount
#up point falling in this grid;
#int32 nUpCount
#down point falling in this grid;
#int32 nDownCount
#average elevation in this grid;
float32 fAvgElevation
#proability
int8 proability
#texture
int8 texture
"""
__slots__ = ['header','nHalfSize','fResolution','fCenterX','fCenterY','fMapMaxElevation','fMapMinElevation','fVehicleX','fVehicleY','fVehicleZ','fVehicleHeading','map']
_slot_types = ['std_msgs/Header','int32','float32','float32','float32','float32','float32','float32','float32','float32','float32','mapping_dlut/Grid[]']
def __init__(self, *args, **kwds):
"""
Constructor. Any message fields that are implicitly/explicitly
set to None will be assigned a default value. The recommend
use is keyword arguments as this is more robust to future message
changes. You cannot mix in-order arguments and keyword arguments.
The available fields are:
header,nHalfSize,fResolution,fCenterX,fCenterY,fMapMaxElevation,fMapMinElevation,fVehicleX,fVehicleY,fVehicleZ,fVehicleHeading,map
:param args: complete set of field values, in .msg order
:param kwds: use keyword arguments corresponding to message field names
to set specific fields.
"""
if args or kwds:
super(Map, self).__init__(*args, **kwds)
#message fields cannot be None, assign default values for those that are
if self.header is None:
self.header = std_msgs.msg.Header()
if self.nHalfSize is None:
self.nHalfSize = 0
if self.fResolution is None:
self.fResolution = 0.
if self.fCenterX is None:
self.fCenterX = 0.
if self.fCenterY is None:
self.fCenterY = 0.
if self.fMapMaxElevation is None:
self.fMapMaxElevation = 0.
if self.fMapMinElevation is None:
self.fMapMinElevation = 0.
if self.fVehicleX is None:
self.fVehicleX = 0.
if self.fVehicleY is None:
self.fVehicleY = 0.
if self.fVehicleZ is None:
self.fVehicleZ = 0.
if self.fVehicleHeading is None:
self.fVehicleHeading = 0.
if self.map is None:
self.map = []
else:
self.header = std_msgs.msg.Header()
self.nHalfSize = 0
self.fResolution = 0.
self.fCenterX = 0.
self.fCenterY = 0.
self.fMapMaxElevation = 0.
self.fMapMinElevation = 0.
self.fVehicleX = 0.
self.fVehicleY = 0.
self.fVehicleZ = 0.
self.fVehicleHeading = 0.
self.map = []
def _get_types(self):
"""
internal API method
"""
return self._slot_types
def serialize(self, buff):
"""
serialize message into buffer
:param buff: buffer, ``StringIO``
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_i9f.pack(_x.nHalfSize, _x.fResolution, _x.fCenterX, _x.fCenterY, _x.fMapMaxElevation, _x.fMapMinElevation, _x.fVehicleX, _x.fVehicleY, _x.fVehicleZ, _x.fVehicleHeading))
length = len(self.map)
buff.write(_struct_I.pack(length))
for val1 in self.map:
_x = val1
buff.write(_struct_f2b.pack(_x.fAvgElevation, _x.proability, _x.texture))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize(self, str):
"""
unpack serialized message in str into this message instance
:param str: byte array of serialized message, ``str``
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.map is None:
self.map = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 40
(_x.nHalfSize, _x.fResolution, _x.fCenterX, _x.fCenterY, _x.fMapMaxElevation, _x.fMapMinElevation, _x.fVehicleX, _x.fVehicleY, _x.fVehicleZ, _x.fVehicleHeading,) = _struct_i9f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.map = []
for i in range(0, length):
val1 = mapping_dlut.msg.Grid()
_x = val1
start = end
end += 6
(_x.fAvgElevation, _x.proability, _x.texture,) = _struct_f2b.unpack(str[start:end])
self.map.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
def serialize_numpy(self, buff, numpy):
"""
serialize message with numpy array types into buffer
:param buff: buffer, ``StringIO``
:param numpy: numpy python module
"""
try:
_x = self
buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))
_x = self.header.frame_id
length = len(_x)
if python3 or type(_x) == unicode:
_x = _x.encode('utf-8')
length = len(_x)
if python3:
buff.write(struct.pack('<I%sB'%length, length, *_x))
else:
buff.write(struct.pack('<I%ss'%length, length, _x))
_x = self
buff.write(_struct_i9f.pack(_x.nHalfSize, _x.fResolution, _x.fCenterX, _x.fCenterY, _x.fMapMaxElevation, _x.fMapMinElevation, _x.fVehicleX, _x.fVehicleY, _x.fVehicleZ, _x.fVehicleHeading))
length = len(self.map)
buff.write(_struct_I.pack(length))
for val1 in self.map:
_x = val1
buff.write(_struct_f2b.pack(_x.fAvgElevation, _x.proability, _x.texture))
except struct.error as se: self._check_types(struct.error("%s: '%s' when writing '%s'" % (type(se), str(se), str(_x))))
except TypeError as te: self._check_types(ValueError("%s: '%s' when writing '%s'" % (type(te), str(te), str(_x))))
def deserialize_numpy(self, str, numpy):
"""
unpack serialized message in str into this message instance using numpy for array types
:param str: byte array of serialized message, ``str``
:param numpy: numpy python module
"""
try:
if self.header is None:
self.header = std_msgs.msg.Header()
if self.map is None:
self.map = None
end = 0
_x = self
start = end
end += 12
(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs,) = _struct_3I.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
start = end
end += length
if python3:
self.header.frame_id = str[start:end].decode('utf-8')
else:
self.header.frame_id = str[start:end]
_x = self
start = end
end += 40
(_x.nHalfSize, _x.fResolution, _x.fCenterX, _x.fCenterY, _x.fMapMaxElevation, _x.fMapMinElevation, _x.fVehicleX, _x.fVehicleY, _x.fVehicleZ, _x.fVehicleHeading,) = _struct_i9f.unpack(str[start:end])
start = end
end += 4
(length,) = _struct_I.unpack(str[start:end])
self.map = []
for i in range(0, length):
val1 = mapping_dlut.msg.Grid()
_x = val1
start = end
end += 6
(_x.fAvgElevation, _x.proability, _x.texture,) = _struct_f2b.unpack(str[start:end])
self.map.append(val1)
return self
except struct.error as e:
raise genpy.DeserializationError(e) #most likely buffer underfill
_struct_I = genpy.struct_I
_struct_f2b = struct.Struct("<f2b")
_struct_3I = struct.Struct("<3I")
_struct_i9f = struct.Struct("<i9f")
| WuNL/mylaptop | install/lib/python2.7/dist-packages/mapping_dlut/msg/_Map.py | Python | bsd-3-clause | 10,485 |
"""Utilities for working with data structures.
Version Added:
2.1
"""
from __future__ import unicode_literals
from collections import OrderedDict
from django_evolution.compat import six
def filter_dup_list_items(items):
"""Return list items with duplicates filtered out.
The order of items will be preserved, but only the first occurrence of
any given item will remain in the list.
Version Added:
2.1
Args:
items (list):
The list of items.
Returns:
list:
The resulting de-duplicated list of items.
"""
return list(six.iterkeys(OrderedDict(
(item, True)
for item in items
)))
def merge_dicts(dest, source):
"""Merge two dictionaries together.
This will recursively merge a source dictionary into a destination
dictionary with the following rules:
* Any keys in the source that aren't in the destination will be placed
directly to the destination (using the same instance of the value, not
a copy).
* Any lists that are in both the source and destination will be combined
by appending the source list to the destinataion list (and this will not
recurse into lists).
* Any dictionaries that are in both the source and destinataion will be
merged using this function.
* Any keys that are not a list or dictionary that exist in both
dictionaries will result in a :py:exc:`TypeError`.
Version Added:
2.1
Args:
dest (dict):
The destination dictionary to merge into.
source (dict):
The source dictionary to merge into the destination.
Raises:
TypeError:
A key was present in both dictionaries with a type that could not
be merged.
"""
for key, value in six.iteritems(source):
if key in dest:
if isinstance(value, list):
if not isinstance(dest[key], list):
raise TypeError(
'Cannot merge a list into a %r for key "%s".'
% (type(dest[key]), key))
dest[key] += value
elif isinstance(value, dict):
if not isinstance(dest[key], dict):
raise TypeError(
'Cannot merge a dictionary into a %r for key "%s".'
% (type(dest[key]), key))
merge_dicts(dest[key], value)
else:
raise TypeError(
'Key "%s" was not an expected type (found %r) '
'when merging dictionaries.'
% (key, type(value)))
else:
dest[key] = value
| beanbaginc/django-evolution | django_evolution/utils/datastructures.py | Python | bsd-3-clause | 2,717 |
#!/usr/bin/env python
from build import ninja_common
build = ninja_common.Build('serial/seriald')
build.build_cmd(
'auv-seriald',
[
'main.cpp',
'config.cpp',
'device.cpp',
'device_list.cpp',
'sub_status.cpp',
],
deps=['nanomsg'],
auv_deps=['shm', 'auvlog', 'auv-serial', 'fmt'],)
#build.test_gtest(
# 'seriald-new',
# [
# 'config.cpp',
# 'device.cpp',
# 'device_list.cpp',
# 'sub_status.cpp',
# # 'test/config/config.cpp',
# # 'test/device/device.cpp',
# ],
# deps=['nanomsg', 'cppformat'],
# auv_deps=['shm', 'auvlog', 'auv-serial'],)
| cuauv/software | serial/seriald/configure.py | Python | bsd-3-clause | 740 |
# Copyright (c) 2017 pandas-gbq Authors All rights reserved.
# Use of this source code is governed by a BSD-style
# license that can be found in the LICENSE file.
"""Helper methods for BigQuery schemas"""
import copy
# API may return data types as legacy SQL, so maintain a mapping of aliases
# from standard SQL to legacy data types.
_TYPE_ALIASES = {
"BOOL": "BOOLEAN",
"FLOAT64": "FLOAT",
"INT64": "INTEGER",
"STRUCT": "RECORD",
}
def to_pandas_gbq(client_schema):
"""Given a sequence of :class:`google.cloud.bigquery.schema.SchemaField`,
return a schema in pandas-gbq API format.
"""
remote_fields = [
field_remote.to_api_repr() for field_remote in client_schema
]
for field in remote_fields:
field["type"] = field["type"].upper()
field["mode"] = field["mode"].upper()
return {"fields": remote_fields}
def to_google_cloud_bigquery(pandas_gbq_schema):
"""Given a schema in pandas-gbq API format,
return a sequence of :class:`google.cloud.bigquery.schema.SchemaField`.
"""
from google.cloud import bigquery
# Need to convert from JSON representation to format used by client library.
schema = add_default_nullable_mode(pandas_gbq_schema)
return [
bigquery.SchemaField.from_api_repr(field) for field in schema["fields"]
]
def _clean_schema_fields(fields):
"""Return a sanitized version of the schema for comparisons.
The ``mode`` and ``description`` properties areis ignored because they
are not generated by func:`pandas_gbq.schema.generate_bq_schema`.
"""
fields_sorted = sorted(fields, key=lambda field: field["name"])
clean_schema = []
for field in fields_sorted:
field_type = field["type"].upper()
field_type = _TYPE_ALIASES.get(field_type, field_type)
clean_schema.append({"name": field["name"], "type": field_type})
return clean_schema
def schema_is_subset(schema_remote, schema_local):
"""Indicate whether the schema to be uploaded is a subset
Compare the BigQuery table identified in the parameters with
the schema passed in and indicate whether a subset of the fields in
the former are present in the latter. Order is not considered.
Parameters
----------
schema_remote : dict
Schema for comparison. Each item of ``fields`` should have a 'name'
and a 'type'
schema_local : dict
Schema for comparison. Each item of ``fields`` should have a 'name'
and a 'type'
Returns
-------
bool
Whether the passed schema is a subset
"""
fields_remote = _clean_schema_fields(schema_remote.get("fields", []))
fields_local = _clean_schema_fields(schema_local.get("fields", []))
return all(field in fields_remote for field in fields_local)
def generate_bq_schema(dataframe, default_type="STRING"):
"""Given a passed dataframe, generate the associated Google BigQuery schema.
Arguments:
dataframe (pandas.DataFrame): D
default_type : string
The default big query type in case the type of the column
does not exist in the schema.
"""
# If you update this mapping, also update the table at
# `docs/source/writing.rst`.
type_mapping = {
"i": "INTEGER",
"b": "BOOLEAN",
"f": "FLOAT",
"O": "STRING",
"S": "STRING",
"U": "STRING",
"M": "TIMESTAMP",
}
fields = []
for column_name, dtype in dataframe.dtypes.iteritems():
fields.append(
{
"name": column_name,
"type": type_mapping.get(dtype.kind, default_type),
}
)
return {"fields": fields}
def update_schema(schema_old, schema_new):
"""
Given an old BigQuery schema, update it with a new one.
Where a field name is the same, the new will replace the old. Any
new fields not present in the old schema will be added.
Arguments:
schema_old: the old schema to update
schema_new: the new schema which will overwrite/extend the old
"""
old_fields = schema_old["fields"]
new_fields = schema_new["fields"]
output_fields = list(old_fields)
field_indices = {field["name"]: i for i, field in enumerate(output_fields)}
for field in new_fields:
name = field["name"]
if name in field_indices:
# replace old field with new field of same name
output_fields[field_indices[name]] = field
return {"fields": output_fields}
def add_default_nullable_mode(schema):
"""Manually create the schema objects, adding NULLABLE mode.
Workaround for error in SchemaField.from_api_repr, which required
"mode" to be set:
https://github.com/GoogleCloudPlatform/google-cloud-python/issues/4456
"""
# Returns a copy rather than modifying the mutable arg,
# per Issue #277
result = copy.deepcopy(schema)
for field in result["fields"]:
field.setdefault("mode", "NULLABLE")
return result
def remove_policy_tags(schema):
"""Manually create the schema objects, removing policyTags.
Workaround for 403 error with policy tags, which are not required in a load
job: https://github.com/googleapis/python-bigquery/pull/557
"""
# Returns a copy rather than modifying the mutable arg,
# per Issue #277
result = copy.deepcopy(schema)
for field in result["fields"]:
if "policyTags" in field:
del field["policyTags"]
return result
| pydata/pandas-gbq | pandas_gbq/schema.py | Python | bsd-3-clause | 5,518 |
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
from __future__ import absolute_import
from __future__ import division
import toyplot.cairo.png
def render(canvas, fobj=None, width=None, height=None, scale=None):
"""Render the PNG bitmap representation of a canvas.
By default, canvas dimensions in CSS pixels are mapped directly to pixels in
the output PNG image. Use one of `width`, `height`, or `scale` to override
this behavior.
Parameters
----------
canvas: :class:`toyplot.canvas.Canvas`
Canvas to be rendered.
fobj: file-like object or string, optional
The file to write. Use a string filepath to write data directly to disk.
If `None` (the default), the PNG data will be returned to the caller
instead.
width: number, optional
Specify the width of the output image in pixels.
height: number, optional
Specify the height of the output image in pixels.
scale: number, optional
Ratio of output image pixels to `canvas` pixels.
Returns
-------
png: PNG image data, or `None`
PNG representation of `canvas`, or `None` if the caller specifies the
`fobj` parameter.
Notes
-----
The output PNG is currently rendered using
:func:`toyplot.cairo.png.render()`. This may change in the future.
"""
return toyplot.cairo.png.render(canvas, fobj, width, height, scale)
def render_frames(canvas, width=None, height=None, scale=None):
"""Render a canvas as a sequence of PNG images.
By default, canvas dimensions in CSS pixels are mapped directly to pixels in
the output PNG images. Use one of `width`, `height`, or `scale` to override
this behavior.
Parameters
----------
canvas: :class:`toyplot.canvas.Canvas`
Canvas to be rendered.
width: number, optional
Specify the width of the output image in pixels.
height: number, optional
Specify the height of the output image in pixels.
scale: number, optional
Ratio of output image pixels to `canvas` pixels.
Returns
-------
frames: Python generator expression that returns each PNG image in the sequence.
The caller must iterate over the returned frames and is responsible for all
subsequent processing, including disk I/O, video compression, etc.
Notes
-----
The output PNG images are currently rendered using
:func:`toyplot.cairo.png.render_frames()`. This may change in the future.
Examples
--------
>>> for frame, png in enumerate(toyplot.cairo.render_png_frames(canvas)):
... open("frame-%s.png" % frame, "wb").write(png)
"""
return toyplot.cairo.png.render_frames(canvas, width, height, scale)
| cmorgan/toyplot | toyplot/png.py | Python | bsd-3-clause | 2,849 |
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.exceptions import ObjectDoesNotExist
from django.views.decorators.cache import never_cache
from django.http import HttpResponse, HttpResponseRedirect
from session_csrf import anonymous_csrf
from ..models import ZLB, ZLBVirtualServer, ZLBVirtualServerRule, ZLBVirtualServerProtection
from ..models import ZLBRule, ZLBProtection, Offender, ZLBVirtualServerPref
from ..forms import ZLBForm, VirtualServerConfirm
from BanHammer.blacklist.management import zeus
import BanHammer.blacklist.tasks as tasks
from BanHammer import settings
@anonymous_csrf
@never_cache
def index(request, zlb=None, action=None):
request.session['order_by'] = request.GET.get('order_by', 'hostname')
request.session['order'] = request.GET.get('order', 'asc')
order_by = request.session.get('order_by', 'address')
order = request.session.get('order', 'asc')
zlbs = ZLB.objects.all()
if order_by == 'created_date':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.created_date)
elif order_by == 'updated_date':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.updated_date)
elif order_by == 'name':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.name)
elif order_by == 'hostname':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.hostname)
elif order_by == 'datacenter':
zlbs = sorted(list(zlbs), key=lambda zlb: zlb.datacenter)
if order == 'desc':
zlbs.reverse()
data = {'zlbs': zlbs}
if action == 'update':
data['zlb'] = zlb
data['action'] = 'update'
data['testing_env'] = settings.TESTING_ENV
return render_to_response(
'zlb/index.html',
data,
context_instance = RequestContext(request)
)
@anonymous_csrf
def new(request):
if request.method == 'POST':
form = ZLBForm(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
hostname = form.cleaned_data['hostname']
datacenter = form.cleaned_data['datacenter']
doc_url = form.cleaned_data['doc_url']
login = form.cleaned_data['login']
password = form.cleaned_data['password']
comment = form.cleaned_data['comment']
zlb = ZLB(
name=name,
hostname=hostname,
datacenter=datacenter,
doc_url=doc_url,
login=login,
password=password,
comment=comment,
)
zlb.save()
return HttpResponseRedirect('/zlbs')
else:
form = ZLBForm()
return render_to_response(
'zlb/new.html',
{'form': form},
context_instance = RequestContext(request)
)
@anonymous_csrf
def edit(request, id):
if request.method == 'POST':
form = ZLBForm(request.POST)
if form.is_valid():
zlb = ZLB.objects.get(id=id)
zlb.name = form.cleaned_data['name']
zlb.hostname = form.cleaned_data['hostname']
zlb.datacenter = form.cleaned_data['datacenter']
zlb.doc_url = form.cleaned_data['doc_url']
zlb.comment = form.cleaned_data['comment']
zlb.login = form.cleaned_data['login']
if form.cleaned_data['password']:
zlb.password = form.cleaned_data['password']
zlb.save()
return HttpResponseRedirect('/zlbs')
else:
initial = ZLB.objects.get(id=id)
initial = initial.__dict__
id = initial['id']
initial['password'] = ''
form = ZLBForm(initial)
return render_to_response(
'zlb/edit.html',
{'form': form, 'id': id},
context_instance = RequestContext(request)
)
@anonymous_csrf
def delete(request, id):
zlb = ZLB.objects.get(id=id)
zlb.delete()
return HttpResponseRedirect('/zlbs')
@anonymous_csrf
@never_cache
def show(request, id):
zlb = ZLB.objects.get(id=id)
if zlb.updating:
return render_to_response(
'zlb/updating.html',
{'zlb': zlb,},
context_instance = RequestContext(request)
)
vs = ZLBVirtualServer.objects.filter(zlb_id=zlb.id)
prefs_o = ZLBVirtualServerPref.objects.filter(zlb=zlb)
prefs = {}
for p in prefs_o:
prefs[p.vs_name] = p
pr = {}
rul = {}
return render_to_response(
'zlb/show.html',
{'zlb': zlb,
'prefs': prefs,
'vs': vs,
'testing_env': settings.TESTING_ENV,},
context_instance = RequestContext(request)
)
@anonymous_csrf
@never_cache
def update(request, id):
tasks.update_zlb.delay(id)
zlb = ZLB.objects.get(id=id)
return HttpResponseRedirect('/zlbs')
def _parse_addr(addresses):
addr_list = addresses.split(', ')
addresses = []
for addr in addr_list:
network = addr.split('/')
addr = network[0]
if len(network) == 2:
cidr = network[1]
else:
cidr = None
if cidr:
offender = Offender.objects.filter(address=addr, cidr=cidr)
else:
offender = Offender.objects.filter(address=addr)
if offender.count() != 0:
addresses.append(offender[0])
else:
addresses.append(addr)
return addresses
@anonymous_csrf
def index_protection(request, zlb_id):
zlb = ZLB.objects.get(id=zlb_id)
protections = ZLBProtection.objects.filter(zlb_id=zlb_id)
for p in protections:
p.allowed_addresses = _parse_addr(p.allowed_addresses)
p.banned_addresses = _parse_addr(p.banned_addresses)
p.virtual_servers = ZLBVirtualServerProtection.objects.filter(zlb_id=zlb_id, protection_id=p.id)
return render_to_response(
'zlb/protections.html',
{'zlb': zlb,
'protections': protections,},
context_instance = RequestContext(request)
)
@anonymous_csrf
def index_rules(request, zlb_id):
zlb = ZLB.objects.get(id=zlb_id)
rules = ZLBRule.objects.filter(zlb_id=zlb_id)
for rule in rules:
rule.virtual_servers = ZLBVirtualServerRule.objects.filter(zlb_id=zlb_id, rule_id=rule.id)
return render_to_response(
'zlb/rules.html',
{'zlb': zlb,
'rules': rules,},
context_instance = RequestContext(request)
)
@never_cache
@anonymous_csrf
def virtual_server(request, zlb_id, vs_id):
if request.method == 'POST':
form = VirtualServerConfirm(request.POST)
if form.is_valid():
confirm = form.cleaned_data['confirm']
vs = ZLBVirtualServer.objects.get(id=vs_id)
pref = ZLBVirtualServerPref.objects.filter(zlb_id=zlb_id,vs_name=vs.name)
if pref.count() == 0:
p = ZLBVirtualServerPref(
zlb_id=zlb_id,
vs_name=vs.name,
favorite=False,
confirm=confirm,
)
p.save()
else:
pref = pref[0]
pref.confirm = confirm
pref.save()
return HttpResponseRedirect('/zlb/%s/virtual_server/%s' % (zlb_id, vs_id))
else:
form = VirtualServerConfirm()
zlb = ZLB.objects.get(id=zlb_id)
virtual_server = ZLBVirtualServer.objects.get(id=vs_id)
prefs = ZLBVirtualServerPref.objects.filter(zlb=zlb,vs_name=virtual_server.name)
rules = ZLBVirtualServerRule.objects.filter(virtualserver=virtual_server)
protections = ZLBVirtualServerProtection.objects.filter(virtualserver=virtual_server)
for p in protections:
p.protection.allowed_addresses = _parse_addr(p.protection.allowed_addresses)
p.protection.banned_addresses = _parse_addr(p.protection.banned_addresses)
return render_to_response(
'zlb/virtual_server.html',
{'zlb': zlb,
'virtual_server': virtual_server,
'prefs': prefs,
'rules': rules,
'protections': protections,
'form': form,},
context_instance = RequestContext(request)
)
@never_cache
@anonymous_csrf
def virtual_server_name(request, zlb_id, vs_name):
virtual_server_o = ZLBVirtualServer.objects.get(zlb_id=zlb_id, name=vs_name)
return virtual_server(request, zlb_id, virtual_server_o.id)
@anonymous_csrf
def virtual_server_favorite(request, zlb_id, vs_id):
vs = ZLBVirtualServer.objects.get(id=vs_id)
pref = ZLBVirtualServerPref.objects.filter(zlb_id=zlb_id,vs_name=vs.name)
if pref.count() == 0:
p = ZLBVirtualServerPref(
zlb_id=zlb_id,
vs_name=vs.name,
favorite=True,
)
p.save()
else:
pref = pref[0]
pref.favorite = True
pref.save()
return HttpResponseRedirect('/zlb/%s/virtual_server/%s' % (zlb_id, vs_id))
@anonymous_csrf
def virtual_server_unfavorite(request, zlb_id, vs_id):
vs = ZLBVirtualServer.objects.get(id=vs_id)
pref = ZLBVirtualServerPref.objects.get(zlb_id=zlb_id,vs_name=vs.name)
pref.favorite = False
pref.save()
return HttpResponseRedirect('/zlb/%s/virtual_server/%s' % (zlb_id, vs_id))
@anonymous_csrf
def virtual_server_unconfirm(request, zlb_id, vs_id):
vs = ZLBVirtualServer.objects.get(id=vs_id)
pref = ZLBVirtualServerPref.objects.get(zlb_id=zlb_id,vs_name=vs.name)
pref.confirm = ''
pref.save()
return HttpResponseRedirect('/zlb/%s/virtual_server/%s' % (zlb_id, vs_id))
| mozilla/BanHammer | BanHammer/blacklist/views/zlb.py | Python | bsd-3-clause | 9,627 |
from __future__ import division, print_function, absolute_import
import selfsup
import tensorflow as tf
import os
from .base import Method
from collections import OrderedDict
import deepdish as dd
import numpy as np
import itertools
import selfsup.jigsaw
PERMUTATIONS = selfsup.jigsaw.load_permutations(selfsup.res('jigsaw/permutations_100_max.bin'))
def _make_random_patches(x, y, patch_size, permutations, size=3):
batch_size = x.get_shape().as_list()[0]
crop_size = x.get_shape().as_list()[1]
perm_idx = tf.expand_dims(y, 1)
perm = tf.gather_nd(permutations, perm_idx)
WINDOW_SIZE = crop_size // size
N = x.get_shape().as_list()[0]
C = x.get_shape().as_list()[3]
patches = []
for i, j in dd.multi_range(size, size):
#tf.slice(x, [
M = WINDOW_SIZE - patch_size + 1
assert M > 0, f'Jigsaw: Window size ({WINDOW_SIZE}) and patch size ({patch_size}) not compatible'
limit = np.array([1, M, M, 1])
offset = np.array([0, i * WINDOW_SIZE, j * WINDOW_SIZE, 0]) + tf.random_uniform(
[4], dtype=tf.int32,
maxval=M,
) % limit
patch = tf.slice(x, offset, [N, patch_size, patch_size, C])
patches.append(patch)
patches1 = tf.stack(patches, axis=1)
xyz = np.arange(batch_size)[:, np.newaxis] * size**2 + (perm - 1)
#import ipdb
#ipdb.set_trace()
perm0 = tf.reshape(xyz, [-1])
patches_flat = tf.reshape(patches1, [-1] + patches1.get_shape().as_list()[2:])
#import ipdb
##ipdb.set_trace()
patches2 = tf.gather(patches_flat, perm0)
#return tf.reshape(patches2, [-1, PATCH_SIZE, PATCH_SIZE, C])
return patches2
class Jigsaw(Method):
def __init__(self, name, basenet, loader, patch_size=75, size=3,
reduce_channels=128, use_scalers=False):
self.name = name
self.basenet = basenet
self._size = size
self._patch_size = patch_size
self._loader = loader
self._reduce_channels = reduce_channels
if size == 3:
self._permutations = PERMUTATIONS
elif size == 2:
# They are 1-based due to the permutations file
self._permutations = 1 + np.array(list(itertools.permutations(range(size**2))))
self._use_scalers = use_scalers
@property
def basenet_settings(self):
return {'convolutional': False}
def batch(self):
x, _ = self._loader.batch()
y = tf.random_uniform([self._loader.batch_size], dtype=tf.int32, maxval=len(self._permutations))
patches = _make_random_patches(x, y, self._patch_size, self._permutations, size=self._size)
pad_both = self.basenet.canonical_input_size - self._patch_size
pad_lo = pad_both // 2
pad_up = pad_both - pad_lo
#paddings = [[0, 0], [pad_lo, pad_up], [pad_lo, pad_up], [0, 0]]
#pad_patches = tf.pad(patches, paddings=paddings, mode='REFLECT')
pad_patches = patches
self._y = y
extra = {'permutation': y}
return pad_patches, extra
def build_network(self, network, extra, phase_test, global_step):
info = selfsup.info.create(scale_summary=True)
if self._size == 3:
z = network['activations']['pool5']
else:
z = network['activations']['top']
#z = tf.squeeze(z, [1, 2])
z = tf.reshape(z, (z.get_shape().as_list()[0], -1))
if self._use_scalers:
z = selfsup.ops.scale(z, name='scale')
#W_init = tf.contrib.layers.variance_scaling_initializer()
W_init = tf.random_normal_initializer(0.0, 0.0001)
b_init = tf.constant_initializer(0.0)
reduce_ch = self._reduce_channels
with tf.variable_scope('reduction'):
c_o = reduce_ch
reduce_W = tf.get_variable('weights', [z.get_shape().as_list()[1], c_o], dtype=tf.float32,
initializer=W_init)
reduce_b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
z = tf.nn.xw_plus_b(z, reduce_W, reduce_b)
z = tf.nn.relu(z)
z = tf.reshape(z, [self._loader.batch_size, -1, z.get_shape().as_list()[-1]])
z = tf.concat(tf.unstack(z, axis=1), 1)
with tf.variable_scope('jigsaw'):
c_o = len(self._permutations)
jigsaw_W = tf.get_variable('weights', [z.get_shape().as_list()[1], c_o], dtype=tf.float32,
initializer=W_init)
jigsaw_b = tf.get_variable('biases', [c_o], dtype=tf.float32,
initializer=b_init)
z = tf.nn.xw_plus_b(z, jigsaw_W, jigsaw_b)
with tf.variable_scope('primary_loss'):
loss_each = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self._y, logits=z)
primary_loss = tf.reduce_mean(loss_each)
with tf.name_scope('weight_decay'):
wd = 1e-6
l2_loss = tf.nn.l2_loss(reduce_W) + tf.nn.l2_loss(jigsaw_W)
weight_decay = wd * l2_loss
with tf.name_scope('loss'):
loss = weight_decay + primary_loss
variables = info['vars']
self.losses = OrderedDict([
('main', primary_loss),
('+weight_decay', weight_decay),
])
self.primary_loss = primary_loss
self.loss = loss
self.feedback_variables = []
info['activations']['primary_loss'] = primary_loss
info['activations']['loss'] = loss
info['activations']['weight_decay'] = weight_decay
return info
def feedback(self, variables, iteration):
pass
| gustavla/self-supervision | selfsup/multi/methods/jigsaw.py | Python | bsd-3-clause | 5,694 |
#!/usr/bin/env python
#
# Copyright (c) 2016, The OpenThread Authors.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of the copyright holder nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
from autothreadharness.harness_case import HarnessCase
import unittest
class Router_5_5_4(HarnessCase):
role = HarnessCase.ROLE_ROUTER
case = '5 5 4'
golden_devices_required = 5
def on_dialog(self, dialog, title):
pass
if __name__ == '__main__':
unittest.main()
| turon/openthread | tools/harness-automation/cases/router_5_5_4.py | Python | bsd-3-clause | 1,877 |
from django.http import HttpResponse
from django.utils import unittest
from django.test.client import Client
from postleware import PostResponseCachebusterMiddleware
class PostResponseMiddleware(unittest.TestCase):
def setUp(self):
self.client = Client()
def test_header_added_when_necessary(self):
# 'Cache-Control: no-cache' is added to POSTs
response = self.client.post('/test1', {'foo':'bar'})
self.assertEqual(response['Cache-Control'], 'no-cache')
# 'Cache-Control' is NOT added to GETs
response = self.client.get('/test1')
self.assertFalse(response.has_header('Cache-Control'))
def test_header_not_added_when_present(self):
middleware = PostResponseCachebusterMiddleware()
test_header_setting = 'test-setting'
raw_response = HttpResponse()
# 'Cache-Control' header isn't modified when present on POSTs
request = MockRequest('POST')
raw_response['Cache-Control'] = test_header_setting
response = middleware.process_response(request, raw_response)
self.assertEqual(response['Cache-Control'], test_header_setting)
# 'Cache-Control' header isn't modified when present on GETs
request = MockRequest('GET')
raw_response['Cache-Control'] = test_header_setting
response = middleware.process_response(request, raw_response)
self.assertEqual(response['Cache-Control'], test_header_setting)
class MockRequest(object):
def __init__(self, method=None):
self.method = method
| thurloat/django-postleware | postleware/tests.py | Python | bsd-3-clause | 1,561 |
# Copyright (c) 2006-2009 Mitch Garnaat http://garnaat.org/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
class DBSnapshot(object):
"""
Represents a RDS DB Snapshot
"""
def __init__(self, connection=None, id=None):
self.connection = connection
self.id = id
self.engine = None
self.snapshot_create_time = None
self.instance_create_time = None
self.port = None
self.status = None
self.availability_zone = None
self.main_username = None
self.allocated_storage = None
self.instance_id = None
self.availability_zone = None
def __repr__(self):
return 'DBSnapshot:%s' % self.id
def startElement(self, name, attrs, connection):
pass
def endElement(self, name, value, connection):
if name == 'Engine':
self.engine = value
elif name == 'InstanceCreateTime':
self.instance_create_time = value
elif name == 'SnapshotCreateTime':
self.snapshot_create_time = value
elif name == 'DBInstanceIdentifier':
self.instance_id = value
elif name == 'DBSnapshotIdentifier':
self.id = value
elif name == 'Port':
self.port = int(value)
elif name == 'Status':
self.status = value
elif name == 'AvailabilityZone':
self.availability_zone = value
elif name == 'MainUsername':
self.main_username = value
elif name == 'AllocatedStorage':
self.allocated_storage = int(value)
elif name == 'SnapshotTime':
self.time = value
else:
setattr(self, name, value)
| kumar303/rockit | vendor-local/boto/rds/dbsnapshot.py | Python | bsd-3-clause | 2,724 |
#!/usr/bin/env python3
# -*- mode: python -*-
# -*- coding: utf-8 -*-
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Command-line tool
NOTE: The API for the command-line tool is experimental.
"""
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
from __future__ import absolute_import
from builtins import open
from future import standard_library
standard_library.install_aliases()
import sys
import urllib
#from BaseHTTPServer import HTTPServer, BaseHTTPRequestHandler
from http.server import HTTPServer, BaseHTTPRequestHandler
from avro import io
from avro import datafile
from avro import protocol
from avro import ipc
class GenericResponder(ipc.Responder):
def __init__(self, proto, msg, datum):
proto_json = file(proto, 'r').read()
ipc.Responder.__init__(self, protocol.Parse(proto_json))
self.msg = msg
self.datum = datum
def invoke(self, message, request):
if message.name == self.msg:
print >> sys.stderr, "Message: %s Datum: %s" % (message.name, self.datum)
# server will shut down after processing a single Avro request
global server_should_shutdown
server_should_shutdown = True
return self.datum
class GenericHandler(BaseHTTPRequestHandler):
def do_POST(self):
self.responder = responder
call_request_reader = ipc.FramedReader(self.rfile)
call_request = call_request_reader.read_framed_message()
resp_body = self.responder.respond(call_request)
self.send_response(200)
self.send_header('Content-Type', 'avro/binary')
self.end_headers()
resp_writer = ipc.FramedWriter(self.wfile)
resp_writer.write_framed_message(resp_body)
if server_should_shutdown:
print >> sys.stderr, "Shutting down server."
self.server.force_stop()
class StoppableHTTPServer(HTTPServer):
"""HTTPServer.shutdown added in Python 2.6. FML."""
stopped = False
allow_reuse_address = True
def __init__(self, *args, **kw):
HTTPServer.__init__(self, *args, **kw)
self.allow_reuse_address = True
def serve_forever(self):
while not self.stopped:
self.handle_request()
def force_stop(self):
self.server_close()
self.stopped = True
self.serve_forever()
def run_server(uri, proto, msg, datum):
url_obj = urllib.parse.urlparse(uri)
server_addr = (url_obj.hostname, url_obj.port)
global responder
global server_should_shutdown
server_should_shutdown = False
responder = GenericResponder(proto, msg, datum)
server = StoppableHTTPServer(server_addr, GenericHandler)
print("Port: %s" % server.server_port)
sys.stdout.flush()
server.allow_reuse_address = True
print >> sys.stderr, "Starting server."
server.serve_forever()
def send_message(uri, proto, msg, datum):
url_obj = urllib.parse.urlparse(uri)
client = ipc.HTTPTransceiver(url_obj.hostname, url_obj.port)
proto_json = file(proto, 'r').read()
requestor = ipc.Requestor(protocol.Parse(proto_json), client)
print(requestor.request(msg, datum))
def file_or_stdin(f):
if f == "-":
return sys.stdin
else:
return file(f)
def main(args=sys.argv):
if len(args) == 1:
print("Usage: %s [dump|rpcreceive|rpcsend]" % args[0])
return 1
if args[1] == "dump":
if len(args) != 3:
print("Usage: %s dump input_file" % args[0])
return 1
for d in datafile.DataFileReader(file_or_stdin(args[2]), io.DatumReader()):
print(repr(d))
elif args[1] == "rpcreceive":
usage_str = "Usage: %s rpcreceive uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print(usage_str)
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
datum = dfr.next()
elif args[5] == "-data":
print("JSON Decoder not yet implemented.")
return 1
else:
print(usage_str)
return 1
run_server(uri, proto, msg, datum)
elif args[1] == "rpcsend":
usage_str = "Usage: %s rpcsend uri protocol_file " % args[0]
usage_str += "message_name (-data d | -file f)"
if len(args) not in [5, 7]:
print(usage_str)
return 1
uri, proto, msg = args[2:5]
datum = None
if len(args) > 5:
if args[5] == "-file":
reader = open(args[6], 'rb')
datum_reader = io.DatumReader()
dfr = datafile.DataFileReader(reader, datum_reader)
datum = dfr.next()
elif args[5] == "-data":
print("JSON Decoder not yet implemented.")
return 1
else:
print(usage_str)
return 1
send_message(uri, proto, msg, datum)
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))
| kineticadb/kinetica-api-python | gpudb/packages/avro/avro_py3/tool.py | Python | mit | 5,599 |
# orm/persistence.py
# Copyright (C) 2005-2013 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""private module containing functions used to emit INSERT, UPDATE
and DELETE statements on behalf of a :class:`.Mapper` and its descending
mappers.
The functions here are called only by the unit of work functions
in unitofwork.py.
"""
import operator
from itertools import groupby
from .. import sql, util, exc as sa_exc, schema
from . import attributes, sync, exc as orm_exc, evaluator
from .base import _state_mapper, state_str, _attr_as_key
from ..sql import expression
from . import loading
def save_obj(base_mapper, states, uowtransaction, single=False):
"""Issue ``INSERT`` and/or ``UPDATE`` statements for a list
of objects.
This is called within the context of a UOWTransaction during a
flush operation, given a list of states to be flushed. The
base mapper in an inheritance hierarchy handles the inserts/
updates for all descendant mappers.
"""
# if batch=false, call _save_obj separately for each object
if not single and not base_mapper.batch:
for state in _sort_states(states):
save_obj(base_mapper, [state], uowtransaction, single=True)
return
states_to_insert, states_to_update = _organize_states_for_save(
base_mapper,
states,
uowtransaction)
cached_connections = _cached_connection_dict(base_mapper)
for table, mapper in base_mapper._sorted_tables.items():
insert = _collect_insert_commands(base_mapper, uowtransaction,
table, states_to_insert)
update = _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update)
if update:
_emit_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
if insert:
_emit_insert_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, insert)
_finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update)
def post_update(base_mapper, states, uowtransaction, post_update_cols):
"""Issue UPDATE statements on behalf of a relationship() which
specifies post_update.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_update = _organize_states_for_post_update(
base_mapper,
states, uowtransaction)
for table, mapper in base_mapper._sorted_tables.items():
update = _collect_post_update_commands(base_mapper, uowtransaction,
table, states_to_update,
post_update_cols)
if update:
_emit_post_update_statements(base_mapper, uowtransaction,
cached_connections,
mapper, table, update)
def delete_obj(base_mapper, states, uowtransaction):
"""Issue ``DELETE`` statements for a list of objects.
This is called within the context of a UOWTransaction during a
flush operation.
"""
cached_connections = _cached_connection_dict(base_mapper)
states_to_delete = _organize_states_for_delete(
base_mapper,
states,
uowtransaction)
table_to_mapper = base_mapper._sorted_tables
for table in reversed(list(table_to_mapper.keys())):
delete = _collect_delete_commands(base_mapper, uowtransaction,
table, states_to_delete)
mapper = table_to_mapper[table]
_emit_delete_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, delete)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
mapper.dispatch.after_delete(mapper, connection, state)
def _organize_states_for_save(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for INSERT or
UPDATE.
This includes splitting out into distinct lists for
each, calling before_insert/before_update, obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state,
and the identity flag.
"""
states_to_insert = []
states_to_update = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
has_identity = bool(state.key)
instance_key = state.key or mapper._identity_key_from_state(state)
row_switch = None
# call before_XXX extensions
if not has_identity:
mapper.dispatch.before_insert(mapper, connection, state)
else:
mapper.dispatch.before_update(mapper, connection, state)
if mapper._validate_polymorphic_identity:
mapper._validate_polymorphic_identity(mapper, state, dict_)
# detect if we have a "pending" instance (i.e. has
# no instance_key attached to it), and another instance
# with the same identity key already exists as persistent.
# convert to an UPDATE if so.
if not has_identity and \
instance_key in uowtransaction.session.identity_map:
instance = \
uowtransaction.session.identity_map[instance_key]
existing = attributes.instance_state(instance)
if not uowtransaction.is_deleted(existing):
raise orm_exc.FlushError(
"New instance %s with identity key %s conflicts "
"with persistent instance %s" %
(state_str(state), instance_key,
state_str(existing)))
base_mapper._log_debug(
"detected row switch for identity %s. "
"will update %s, remove %s from "
"transaction", instance_key,
state_str(state), state_str(existing))
# remove the "delete" flag from the existing element
uowtransaction.remove_state_actions(existing)
row_switch = existing
if not has_identity and not row_switch:
states_to_insert.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
else:
states_to_update.append(
(state, dict_, mapper, connection,
has_identity, instance_key, row_switch)
)
return states_to_insert, states_to_update
def _organize_states_for_post_update(base_mapper, states,
uowtransaction):
"""Make an initial pass across a set of states for UPDATE
corresponding to post_update.
This includes obtaining key information for each state
including its dictionary, mapper, the connection to use for
the execution per state.
"""
return list(_connections_for_states(base_mapper, uowtransaction,
states))
def _organize_states_for_delete(base_mapper, states, uowtransaction):
"""Make an initial pass across a set of states for DELETE.
This includes calling out before_delete and obtaining
key information for each state including its dictionary,
mapper, the connection to use for the execution per state.
"""
states_to_delete = []
for state, dict_, mapper, connection in _connections_for_states(
base_mapper, uowtransaction,
states):
mapper.dispatch.before_delete(mapper, connection, state)
states_to_delete.append((state, dict_, mapper,
bool(state.key), connection))
return states_to_delete
def _collect_insert_commands(base_mapper, uowtransaction, table,
states_to_insert):
"""Identify sets of values to use in INSERT statements for a
list of states.
"""
insert = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
has_all_pks = True
has_all_defaults = True
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col and \
mapper.version_id_generator is not False:
val = mapper.version_id_generator(None)
params[col.key] = val
else:
# pull straight from the dict for
# pending objects
prop = mapper._columntoproperty[col]
value = state_dict.get(prop.key, None)
if value is None:
if col in pks:
has_all_pks = False
elif col.default is None and \
col.server_default is None:
params[col.key] = value
elif col.server_default is not None and \
mapper.base_mapper.eager_defaults:
has_all_defaults = False
elif isinstance(value, sql.ClauseElement):
value_params[col] = value
else:
params[col.key] = value
insert.append((state, state_dict, params, mapper,
connection, value_params, has_all_pks,
has_all_defaults))
return insert
def _collect_update_commands(base_mapper, uowtransaction,
table, states_to_update):
"""Identify sets of values to use in UPDATE statements for a
list of states.
This function works intricately with the history system
to determine exactly what values should be updated
as well as how the row should be matched within an UPDATE
statement. Includes some tricky scenarios where the primary
key of an object might have been changed.
"""
update = []
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
value_params = {}
hasdata = hasnull = False
for col in mapper._cols_by_table[table]:
if col is mapper.version_id_col:
params[col._label] = \
mapper._get_committed_state_attr_by_column(
row_switch or state,
row_switch and row_switch.dict
or state_dict,
col)
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE
)
if history.added:
params[col.key] = history.added[0]
hasdata = True
else:
if mapper.version_id_generator is not False:
val = mapper.version_id_generator(params[col._label])
params[col.key] = val
# HACK: check for history, in case the
# history is only
# in a different table than the one
# where the version_id_col is.
for prop in mapper._columntoproperty.values():
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
hasdata = True
else:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
if isinstance(history.added[0],
sql.ClauseElement):
value_params[col] = history.added[0]
else:
value = history.added[0]
params[col.key] = value
if col in pks:
if history.deleted and \
not row_switch:
# if passive_updates and sync detected
# this was a pk->pk sync, use the new
# value to locate the row, since the
# DB would already have set this
if ("pk_cascaded", state, col) in \
uowtransaction.attributes:
value = history.added[0]
params[col._label] = value
else:
# use the old value to
# locate the row
value = history.deleted[0]
params[col._label] = value
hasdata = True
else:
# row switch logic can reach us here
# remove the pk from the update params
# so the update doesn't
# attempt to include the pk in the
# update statement
del params[col.key]
value = history.added[0]
params[col._label] = value
if value is None:
hasnull = True
else:
hasdata = True
elif col in pks:
value = state.manager[prop.key].impl.get(
state, state_dict)
if value is None:
hasnull = True
params[col._label] = value
if hasdata:
if hasnull:
raise orm_exc.FlushError(
"Can't update table "
"using NULL for primary "
"key value")
update.append((state, state_dict, params, mapper,
connection, value_params))
return update
def _collect_post_update_commands(base_mapper, uowtransaction, table,
states_to_update, post_update_cols):
"""Identify sets of values to use in UPDATE statements for a
list of states within a post_update operation.
"""
update = []
for state, state_dict, mapper, connection in states_to_update:
if table not in mapper._pks_by_table:
continue
pks = mapper._pks_by_table[table]
params = {}
hasdata = False
for col in mapper._cols_by_table[table]:
if col in pks:
params[col._label] = \
mapper._get_state_attr_by_column(
state,
state_dict, col)
elif col in post_update_cols:
prop = mapper._columntoproperty[col]
history = attributes.get_state_history(
state, prop.key,
attributes.PASSIVE_NO_INITIALIZE)
if history.added:
value = history.added[0]
params[col.key] = value
hasdata = True
if hasdata:
update.append((state, state_dict, params, mapper,
connection))
return update
def _collect_delete_commands(base_mapper, uowtransaction, table,
states_to_delete):
"""Identify values to use in DELETE statements for a list of
states to be deleted."""
delete = util.defaultdict(list)
for state, state_dict, mapper, has_identity, connection \
in states_to_delete:
if not has_identity or table not in mapper._pks_by_table:
continue
params = {}
delete[connection].append(params)
for col in mapper._pks_by_table[table]:
params[col.key] = \
value = \
mapper._get_state_attr_by_column(
state, state_dict, col)
if value is None:
raise orm_exc.FlushError(
"Can't delete from table "
"using NULL for primary "
"key value")
if mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col):
params[mapper.version_id_col.key] = \
mapper._get_committed_state_attr_by_column(
state, state_dict,
mapper.version_id_col)
return delete
def _emit_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_update_commands()."""
needs_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
if needs_version_id:
clause.clauses.append(mapper.version_id_col ==\
sql.bindparam(mapper.version_id_col._label,
type_=mapper.version_id_col.type))
stmt = table.update(clause)
if mapper.base_mapper.eager_defaults:
stmt = stmt.return_defaults()
elif mapper.version_id_col is not None:
stmt = stmt.return_defaults(mapper.version_id_col)
return stmt
statement = base_mapper._memo(('update', table), update_stmt)
rows = 0
for state, state_dict, params, mapper, \
connection, value_params in update:
if value_params:
c = connection.execute(
statement.values(value_params),
params)
else:
c = cached_connections[connection].\
execute(statement, params)
_postfetch(
mapper,
uowtransaction,
table,
state,
state_dict,
c,
c.context.compiled_parameters[0],
value_params)
rows += c.rowcount
if connection.dialect.supports_sane_rowcount:
if rows != len(update):
raise orm_exc.StaleDataError(
"UPDATE statement on table '%s' expected to "
"update %d row(s); %d were matched." %
(table.description, len(update), rows))
elif needs_version_id:
util.warn("Dialect %s does not support updated rowcount "
"- versioning cannot be verified." %
c.dialect.dialect_description,
stacklevel=12)
def _emit_insert_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, insert):
"""Emit INSERT statements corresponding to value lists collected
by _collect_insert_commands()."""
statement = base_mapper._memo(('insert', table), table.insert)
for (connection, pkeys, hasvalue, has_all_pks, has_all_defaults), \
records in groupby(insert,
lambda rec: (rec[4],
list(rec[2].keys()),
bool(rec[5]),
rec[6], rec[7])
):
if \
(
has_all_defaults
or not base_mapper.eager_defaults
or not connection.dialect.implicit_returning
) and has_all_pks and not hasvalue:
records = list(records)
multiparams = [rec[2] for rec in records]
c = cached_connections[connection].\
execute(statement, multiparams)
for (state, state_dict, params, mapper_rec,
conn, value_params, has_all_pks, has_all_defaults), \
last_inserted_params in \
zip(records, c.context.compiled_parameters):
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
c,
last_inserted_params,
value_params)
else:
if not has_all_defaults and base_mapper.eager_defaults:
statement = statement.return_defaults()
elif mapper.version_id_col is not None:
statement = statement.return_defaults(mapper.version_id_col)
for state, state_dict, params, mapper_rec, \
connection, value_params, \
has_all_pks, has_all_defaults in records:
if value_params:
result = connection.execute(
statement.values(value_params),
params)
else:
result = cached_connections[connection].\
execute(statement, params)
primary_key = result.context.inserted_primary_key
if primary_key is not None:
# set primary key attributes
for pk, col in zip(primary_key,
mapper._pks_by_table[table]):
prop = mapper_rec._columntoproperty[col]
if state_dict.get(prop.key) is None:
# TODO: would rather say:
#state_dict[prop.key] = pk
mapper_rec._set_state_attr_by_column(
state,
state_dict,
col, pk)
_postfetch(
mapper_rec,
uowtransaction,
table,
state,
state_dict,
result,
result.context.compiled_parameters[0],
value_params)
def _emit_post_update_statements(base_mapper, uowtransaction,
cached_connections, mapper, table, update):
"""Emit UPDATE statements corresponding to value lists collected
by _collect_post_update_commands()."""
def update_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(col == sql.bindparam(col._label,
type_=col.type))
return table.update(clause)
statement = base_mapper._memo(('post_update', table), update_stmt)
# execute each UPDATE in the order according to the original
# list of states to guarantee row access order, but
# also group them into common (connection, cols) sets
# to support executemany().
for key, grouper in groupby(
update, lambda rec: (rec[4], list(rec[2].keys()))
):
connection = key[0]
multiparams = [params for state, state_dict,
params, mapper, conn in grouper]
cached_connections[connection].\
execute(statement, multiparams)
def _emit_delete_statements(base_mapper, uowtransaction, cached_connections,
mapper, table, delete):
"""Emit DELETE statements corresponding to value lists collected
by _collect_delete_commands()."""
need_version_id = mapper.version_id_col is not None and \
table.c.contains_column(mapper.version_id_col)
def delete_stmt():
clause = sql.and_()
for col in mapper._pks_by_table[table]:
clause.clauses.append(
col == sql.bindparam(col.key, type_=col.type))
if need_version_id:
clause.clauses.append(
mapper.version_id_col ==
sql.bindparam(
mapper.version_id_col.key,
type_=mapper.version_id_col.type
)
)
return table.delete(clause)
for connection, del_objects in delete.items():
statement = base_mapper._memo(('delete', table), delete_stmt)
connection = cached_connections[connection]
if need_version_id:
# TODO: need test coverage for this [ticket:1761]
if connection.dialect.supports_sane_rowcount:
rows = 0
# execute deletes individually so that versioned
# rows can be verified
for params in del_objects:
c = connection.execute(statement, params)
rows += c.rowcount
if rows != len(del_objects):
raise orm_exc.StaleDataError(
"DELETE statement on table '%s' expected to "
"delete %d row(s); %d were matched." %
(table.description, len(del_objects), c.rowcount)
)
else:
util.warn(
"Dialect %s does not support deleted rowcount "
"- versioning cannot be verified." %
connection.dialect.dialect_description,
stacklevel=12)
connection.execute(statement, del_objects)
else:
connection.execute(statement, del_objects)
def _finalize_insert_update_commands(base_mapper, uowtransaction,
states_to_insert, states_to_update):
"""finalize state on states that have been inserted or updated,
including calling after_insert/after_update events.
"""
for state, state_dict, mapper, connection, has_identity, \
instance_key, row_switch in states_to_insert + \
states_to_update:
if mapper._readonly_props:
readonly = state.unmodified_intersection(
[p.key for p in mapper._readonly_props
if p.expire_on_flush or p.key not in state.dict]
)
if readonly:
state._expire_attributes(state.dict, readonly)
# if eager_defaults option is enabled, load
# all expired cols. Else if we have a version_id_col, make sure
# it isn't expired.
toload_now = []
if base_mapper.eager_defaults and state.unloaded:
toload_now.extend(state.unloaded)
elif mapper.version_id_col is not None and \
mapper.version_id_generator is False:
prop = mapper._columntoproperty[mapper.version_id_col]
if prop.key in state.unloaded:
toload_now.extend([prop.key])
if toload_now:
state.key = base_mapper._identity_key_from_state(state)
loading.load_on_ident(
uowtransaction.session.query(base_mapper),
state.key, refresh_state=state,
only_load_props=toload_now)
# call after_XXX extensions
if not has_identity:
mapper.dispatch.after_insert(mapper, connection, state)
else:
mapper.dispatch.after_update(mapper, connection, state)
def _postfetch(mapper, uowtransaction, table,
state, dict_, result, params, value_params):
"""Expire attributes in need of newly persisted database state,
after an INSERT or UPDATE statement has proceeded for that
state."""
prefetch_cols = result.context.prefetch_cols
postfetch_cols = result.context.postfetch_cols
returning_cols = result.context.returning_cols
if mapper.version_id_col is not None:
prefetch_cols = list(prefetch_cols) + [mapper.version_id_col]
if returning_cols:
row = result.context.returned_defaults
if row is not None:
for col in returning_cols:
if col.primary_key:
continue
mapper._set_state_attr_by_column(state, dict_, col, row[col])
for c in prefetch_cols:
if c.key in params and c in mapper._columntoproperty:
mapper._set_state_attr_by_column(state, dict_, c, params[c.key])
if postfetch_cols:
state._expire_attributes(state.dict,
[mapper._columntoproperty[c].key
for c in postfetch_cols if c in
mapper._columntoproperty]
)
# synchronize newly inserted ids from one table to the next
# TODO: this still goes a little too often. would be nice to
# have definitive list of "columns that changed" here
for m, equated_pairs in mapper._table_to_equated[table]:
sync.populate(state, m, state, m,
equated_pairs,
uowtransaction,
mapper.passive_updates)
def _connections_for_states(base_mapper, uowtransaction, states):
"""Return an iterator of (state, state.dict, mapper, connection).
The states are sorted according to _sort_states, then paired
with the connection they should be using for the given
unit of work transaction.
"""
# if session has a connection callable,
# organize individual states with the connection
# to use for update
if uowtransaction.session.connection_callable:
connection_callable = \
uowtransaction.session.connection_callable
else:
connection = None
connection_callable = None
for state in _sort_states(states):
if connection_callable:
connection = connection_callable(base_mapper, state.obj())
elif not connection:
connection = uowtransaction.transaction.connection(
base_mapper)
mapper = _state_mapper(state)
yield state, state.dict, mapper, connection
def _cached_connection_dict(base_mapper):
# dictionary of connection->connection_with_cache_options.
return util.PopulateDict(
lambda conn: conn.execution_options(
compiled_cache=base_mapper._compiled_cache
))
def _sort_states(states):
pending = set(states)
persistent = set(s for s in pending if s.key is not None)
pending.difference_update(persistent)
return sorted(pending, key=operator.attrgetter("insert_order")) + \
sorted(persistent, key=lambda q: q.key[1])
class BulkUD(object):
"""Handle bulk update and deletes via a :class:`.Query`."""
def __init__(self, query):
self.query = query.enable_eagerloads(False)
@property
def session(self):
return self.query.session
@classmethod
def _factory(cls, lookup, synchronize_session, *arg):
try:
klass = lookup[synchronize_session]
except KeyError:
raise sa_exc.ArgumentError(
"Valid strategies for session synchronization "
"are %s" % (", ".join(sorted(repr(x)
for x in lookup))))
else:
return klass(*arg)
def exec_(self):
self._do_pre()
self._do_pre_synchronize()
self._do_exec()
self._do_post_synchronize()
self._do_post()
def _do_pre(self):
query = self.query
self.context = context = query._compile_context()
if len(context.statement.froms) != 1 or \
not isinstance(context.statement.froms[0], schema.Table):
self.primary_table = query._only_entity_zero(
"This operation requires only one Table or "
"entity be specified as the target."
).mapper.local_table
else:
self.primary_table = context.statement.froms[0]
session = query.session
if query._autoflush:
session._autoflush()
def _do_pre_synchronize(self):
pass
def _do_post_synchronize(self):
pass
class BulkEvaluate(BulkUD):
"""BulkUD which does the 'evaluate' method of session state resolution."""
def _additional_evaluators(self, evaluator_compiler):
pass
def _do_pre_synchronize(self):
query = self.query
try:
evaluator_compiler = evaluator.EvaluatorCompiler()
if query.whereclause is not None:
eval_condition = evaluator_compiler.process(
query.whereclause)
else:
def eval_condition(obj):
return True
self._additional_evaluators(evaluator_compiler)
except evaluator.UnevaluatableError:
raise sa_exc.InvalidRequestError(
"Could not evaluate current criteria in Python. "
"Specify 'fetch' or False for the "
"synchronize_session parameter.")
target_cls = query._mapper_zero().class_
#TODO: detect when the where clause is a trivial primary key match
self.matched_objects = [
obj for (cls, pk), obj in
query.session.identity_map.items()
if issubclass(cls, target_cls) and
eval_condition(obj)]
class BulkFetch(BulkUD):
"""BulkUD which does the 'fetch' method of session state resolution."""
def _do_pre_synchronize(self):
query = self.query
session = query.session
select_stmt = self.context.statement.with_only_columns(
self.primary_table.primary_key)
self.matched_rows = session.execute(
select_stmt,
params=query._params).fetchall()
class BulkUpdate(BulkUD):
"""BulkUD which handles UPDATEs."""
def __init__(self, query, values):
super(BulkUpdate, self).__init__(query)
self.query._no_select_modifiers("update")
self.values = values
@classmethod
def factory(cls, query, synchronize_session, values):
return BulkUD._factory({
"evaluate": BulkUpdateEvaluate,
"fetch": BulkUpdateFetch,
False: BulkUpdate
}, synchronize_session, query, values)
def _do_exec(self):
update_stmt = sql.update(self.primary_table,
self.context.whereclause, self.values)
self.result = self.query.session.execute(
update_stmt, params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_update(self)
class BulkDelete(BulkUD):
"""BulkUD which handles DELETEs."""
def __init__(self, query):
super(BulkDelete, self).__init__(query)
self.query._no_select_modifiers("delete")
@classmethod
def factory(cls, query, synchronize_session):
return BulkUD._factory({
"evaluate": BulkDeleteEvaluate,
"fetch": BulkDeleteFetch,
False: BulkDelete
}, synchronize_session, query)
def _do_exec(self):
delete_stmt = sql.delete(self.primary_table,
self.context.whereclause)
self.result = self.query.session.execute(delete_stmt,
params=self.query._params)
self.rowcount = self.result.rowcount
def _do_post(self):
session = self.query.session
session.dispatch.after_bulk_delete(self)
class BulkUpdateEvaluate(BulkEvaluate, BulkUpdate):
"""BulkUD which handles UPDATEs using the "evaluate"
method of session resolution."""
def _additional_evaluators(self, evaluator_compiler):
self.value_evaluators = {}
for key, value in self.values.items():
key = _attr_as_key(key)
self.value_evaluators[key] = evaluator_compiler.process(
expression._literal_as_binds(value))
def _do_post_synchronize(self):
session = self.query.session
states = set()
evaluated_keys = list(self.value_evaluators.keys())
for obj in self.matched_objects:
state, dict_ = attributes.instance_state(obj),\
attributes.instance_dict(obj)
# only evaluate unmodified attributes
to_evaluate = state.unmodified.intersection(
evaluated_keys)
for key in to_evaluate:
dict_[key] = self.value_evaluators[key](obj)
state._commit(dict_, list(to_evaluate))
# expire attributes with pending changes
# (there was no autoflush, so they are overwritten)
state._expire_attributes(dict_,
set(evaluated_keys).
difference(to_evaluate))
states.add(state)
session._register_altered(states)
class BulkDeleteEvaluate(BulkEvaluate, BulkDelete):
"""BulkUD which handles DELETEs using the "evaluate"
method of session resolution."""
def _do_post_synchronize(self):
self.query.session._remove_newly_deleted(
[attributes.instance_state(obj)
for obj in self.matched_objects])
class BulkUpdateFetch(BulkFetch, BulkUpdate):
"""BulkUD which handles UPDATEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
states = set([
attributes.instance_state(session.identity_map[identity_key])
for identity_key in [
target_mapper.identity_key_from_primary_key(
list(primary_key))
for primary_key in self.matched_rows
]
if identity_key in session.identity_map
])
attrib = [_attr_as_key(k) for k in self.values]
for state in states:
session._expire_state(state, attrib)
session._register_altered(states)
class BulkDeleteFetch(BulkFetch, BulkDelete):
"""BulkUD which handles DELETEs using the "fetch"
method of session resolution."""
def _do_post_synchronize(self):
session = self.query.session
target_mapper = self.query._mapper_zero()
for primary_key in self.matched_rows:
# TODO: inline this and call remove_newly_deleted
# once
identity_key = target_mapper.identity_key_from_primary_key(
list(primary_key))
if identity_key in session.identity_map:
session._remove_newly_deleted(
[attributes.instance_state(
session.identity_map[identity_key]
)]
)
| alex/sqlalchemy | lib/sqlalchemy/orm/persistence.py | Python | mit | 41,002 |
# -*- coding: utf-8 -*-#
__author__ = 'dolacmeo'
| dolaCmeo/quick_flask | flask_site/user/__init__.py | Python | mit | 49 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Team',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=255)),
('slug', models.SlugField(max_length=255, blank=True)),
('created', models.DateTimeField(auto_now_add=True)),
('modified', models.DateTimeField(auto_now=True)),
('members', models.ManyToManyField(related_name='member', to=settings.AUTH_USER_MODEL)),
('owner', models.ForeignKey(related_name='owner', to=settings.AUTH_USER_MODEL)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL)),
],
),
]
| reneetrei/agile-bayou-76491 | teams/migrations/0001_initial.py | Python | mit | 1,068 |
# -*- coding: utf-8 -*-
"""Unit tests for functions calling bowtie externally."""
from pathlib2 import Path
import pytest
from imfusion.external import bowtie
class TestBowtieIndex(object):
"""Unit tests for the bowtie_index function."""
def test_call(self, mocker):
"""Tests example call with Path paths."""
mock_run = mocker.patch.object(bowtie, 'run_command')
bowtie.bowtie_index(Path('reference.fa'), Path('genome'))
mock_run.assert_called_once_with(
args=['bowtie-build', 'reference.fa', 'genome'], log_path=None)
| NKI-CCB/imfusion | tests/imfusion/external/test_ext_bowtie.py | Python | mit | 579 |
import util
from util.include import *
grid_margin_w = util.input.cfg_w / 6.0
grid_margin_h = util.input.cfg_h / 6.0
cell_w = util.input.cfg_w * 2.0 / 9.0
cell_h = util.input.cfg_h * 2.0 / 9.0
mark_none = []
mark_x = []
mark_o = []
| Max-E/max-opencv-demos | screens/preferences/include.py | Python | mit | 234 |
import caffe
import surgery, score
import numpy as np
import os
import setproctitle
setproctitle.setproctitle(os.path.basename(os.getcwd()))
weights = '../ilsvrc-nets/vgg16-fcn.caffemodel'
# init
caffe.set_device(int(sys.argv[1]))
caffe.set_mode_gpu()
solver = caffe.SGDSolver('solver.prototxt')
solver.net.copy_from(weights)
# surgeries
interp_layers = [k for k in solver.net.params.keys() if 'up' in k]
surgery.interp(solver.net, interp_layers)
# scoring
test = np.loadtxt('../data/nyud/test.txt', dtype=str)
for _ in range(50):
solver.step(2000)
score.seg_tests(solver, False, val, layer='score')
| wkentaro/fcn | fcn/external/fcn.berkeleyvision.org/nyud-fcn32s-hha/solve.py | Python | mit | 616 |
import pytest
from SMIME_Messaging import Client, sign_email, encrypt_email_body, verify, decrypt_email_body, sign_and_encrypt,\
decode_str
import demistomock as demisto
with open('./test_data/signer_key.pem') as f:
private_key = f.read()
with open('./test_data/signer.pem') as file_:
public_key = file_.read()
client = Client(private_key, public_key)
note_msg = 'Note: encoding detection ended with warning: Trying to detect encoding from a tiny portion'
test_data = [
(
b'Za\xbf\xf3\xb3\xe6 g\xea\xb6l\xb1 ja\xbc\xf1',
'Zaæó³ę gź¶l± ja¼ń',
note_msg,
''
),
(
b'Za\xbf\xf3\xb3\xe6 g\xea\xb6l\xb1 ja\xbc\xf1',
'Zażółć gęślą jaźń',
'',
'iso-8859-2'
),
(b'\xe3\x81\x8c\xe3\x81\x84\xe3\x83\xa2',
'がいモ',
note_msg,
''
),
(b'\xd7\xa9\xd7\x9c\xd7\x95\xd7\x9d',
'שלום',
'',
'')
]
def test_sign():
message_body = 'text to check'
sign, _ = sign_email(client, {'message_body': message_body})
assert 'MIME-Version: 1.0\nContent-Type: multipart/signed; protocol="application/x-pkcs7-signature"; ' \
'micalg="sha1";' in sign
def test_verify(mocker):
mocker.patch.object(demisto, 'getFilePath', return_value={'path': './test_data/signed.p7'})
v, _ = verify(client, {})
assert 'a sign of our times' in v
def test_encrypt(mocker):
mocker.patch.object(demisto, 'args', return_value={'message': 'testing message'})
encrypt, _ = encrypt_email_body(client, {})
assert 'MIME-Version: 1.0\nContent-Disposition: attachment; filename="smime.p7m"\n' \
'Content-Type: application/x-pkcs7-mime; smime-type=enveloped-data; name="smime.p7m"\n' \
'Content-Transfer-Encoding: base64' in encrypt
def test_decrypt(mocker):
mocker.patch.object(demisto, 'getFilePath', return_value={'path': './test_data/encrypt.p7'})
decrypted, _ = decrypt_email_body(client, {})
assert 'Hello world' in decrypted
def test_sign_and_encrypt(mocker):
mocker.patch.object(demisto, 'args', return_value={'message': 'testing message'})
sign_encrypt, _ = sign_and_encrypt(client, {})
assert 'MIME-Version: 1.0\nContent-Disposition: attachment; filename="smime.p7m"\n' \
'Content-Type: application/x-pkcs7-mime; smime-type=enveloped-data; name="smime.p7m"\n' \
'Content-Transfer-Encoding: base64' in sign_encrypt
@pytest.mark.parametrize('decrypted_text_bytes, expected_output, error_msg, encoding', test_data)
def test_decode_using_chardet(decrypted_text_bytes, expected_output, error_msg, encoding):
"""
Given:
- Text in bytes to decode
When:
- searching for the right encoding code
Then:
- Using chardet to find the correct encoding. If confidence of the detected code is under 0.9
message to note returned
"""
out, msg = decode_str(decrypted_text_bytes, encoding)
assert error_msg in msg
assert out == expected_output
def test_test_module(mocker):
from SMIME_Messaging import test_module
mocker.patch.object(demisto, 'results')
test_module(client)
| demisto/content | Packs/SMIME_Messaging/Integrations/SMIME_Messaging/SMIME_Messaging_test.py | Python | mit | 3,174 |
from __future__ import print_function, division
import matplotlib
import logging
from sys import stdout
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import (Net, RealApplianceSource,
BLSTMLayer, DimshuffleLayer,
BidirectionalRecurrentLayer)
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment, init_experiment
from neuralnilm.net import TrainingError
from neuralnilm.layers import MixtureDensityLayer
from neuralnilm.objectives import (scaled_cost, mdn_nll,
scaled_cost_ignore_inactive, ignore_inactive,
scaled_cost3)
from neuralnilm.plot import MDNPlotter, CentralOutputPlotter, Plotter
from neuralnilm.updates import clipped_nesterov_momentum
from lasagne.nonlinearities import sigmoid, rectify, tanh, identity
from lasagne.objectives import mse, binary_crossentropy
from lasagne.init import Uniform, Normal, Identity
from lasagne.layers import (LSTMLayer, DenseLayer, Conv1DLayer,
ReshapeLayer, FeaturePoolLayer, RecurrentLayer)
from lasagne.layers.batch_norm import BatchNormLayer
from lasagne.updates import nesterov_momentum, momentum
from functools import partial
import os
import __main__
from copy import deepcopy
from math import sqrt
import numpy as np
import theano.tensor as T
import gc
"""
425: FF auto encoder with single appliance (Fridge)
"""
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
#PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
PATH = "/data/dk3810/figures"
SAVE_PLOT_INTERVAL = 2000
N_SEQ_PER_BATCH = 64
SEQ_LENGTH = 1024
source_dict = dict(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television',
'dish washer',
['washer dryer', 'washing machine']
],
max_appliance_powers=[200, 500, 200, 2500, 2400],
# max_input_power=200,
max_diff=200,
on_power_thresholds=[5] * 5,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-04-01", "2015-01-01"),
seq_length=SEQ_LENGTH,
# random_window=64,
output_one_appliance=True,
boolean_targets=False,
train_buildings=[1, 2, 3],
validation_buildings=[4, 5],
skip_probability=0.75,
skip_probability_for_first_appliance=0.5,
one_target_per_seq=False,
n_seq_per_batch=N_SEQ_PER_BATCH,
# subsample_target=4,
include_diff=False,
include_power=True,
clip_appliance_power=False,
target_is_prediction=False,
# independently_center_inputs=True,
standardise_input=True,
standardise_targets=True,
# unit_variance_targets=False,
# input_padding=2,
lag=0,
clip_input=False,
# two_pass=True,
# clock_type='ramp',
# clock_period=SEQ_LENGTH
# classification=True
# reshape_target_to_2D=True
# input_stats={'mean': np.array([ 0.05526326], dtype=np.float32),
# 'std': np.array([ 0.12636775], dtype=np.float32)},
# target_stats={
# 'mean': np.array([ 0.04066789, 0.01881946,
# 0.24639061, 0.17608672, 0.10273963],
# dtype=np.float32),
# 'std': np.array([ 0.11449792, 0.07338708,
# 0.26608968, 0.33463112, 0.21250485],
# dtype=np.float32)}
)
net_dict = dict(
save_plot_interval=SAVE_PLOT_INTERVAL,
# loss_function=partial(ignore_inactive, loss_func=mdn_nll, seq_length=SEQ_LENGTH),
# loss_function=lambda x, t: mdn_nll(x, t).mean(),
# loss_function=lambda x, t: (mse(x, t) * MASK).mean(),
loss_function=lambda x, t: mse(x, t).mean(),
# loss_function=lambda x, t: binary_crossentropy(x, t).mean(),
# loss_function=partial(scaled_cost, loss_func=mse),
# loss_function=ignore_inactive,
# loss_function=partial(scaled_cost3, ignore_inactive=False),
# updates_func=momentum,
updates_func=clipped_nesterov_momentum,
updates_kwargs={'clip_range': (0, 10)},
learning_rate=1e-2,
learning_rate_changes_by_iteration={
1000: 1e-3,
5000: 1e-4
},
do_save_activations=True,
auto_reshape=False,
# plotter=CentralOutputPlotter
plotter=Plotter(n_seq_to_plot=32)
)
def exp_a(name):
global source
# source_dict_copy = deepcopy(source_dict)
# source = RealApplianceSource(**source_dict_copy)
net_dict_copy = deepcopy(net_dict)
net_dict_copy.update(dict(
experiment_name=name,
source=source
))
net_dict_copy['layers_config'] = [
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # (batch, features, time)
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'axes': (0, 2),
'nonlinearity': rectify
},
{
'type': Conv1DLayer, # convolve over the time axis
'num_filters': 16,
'filter_length': 4,
'stride': 1,
'nonlinearity': identity,
'b': None,
'border_mode': 'same'
},
{
'type': BatchNormLayer,
'axes': (0, 2),
'nonlinearity': rectify
},
{
'type': DimshuffleLayer,
'pattern': (0, 2, 1) # back to (batch, time, features)
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': 4,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 8,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH // 4,
'nonlinearity': identity, 'b': None
},
{
'type': BatchNormLayer,
'axes': (0, 1),
'nonlinearity': rectify
},
{
'type': DenseLayer,
'num_units': SEQ_LENGTH,
'nonlinearity': None
}
]
net = Net(**net_dict_copy)
return net
def main():
EXPERIMENTS = list('a')
for experiment in EXPERIMENTS:
full_exp_name = NAME + experiment
func_call = init_experiment(PATH, experiment, full_exp_name)
logger = logging.getLogger(full_exp_name)
try:
net = eval(func_call)
run_experiment(net, epochs=None)
except KeyboardInterrupt:
logger.info("KeyboardInterrupt")
break
except Exception as exception:
logger.exception("Exception")
# raise
else:
del net.source.train_activations
gc.collect()
finally:
logging.shutdown()
if __name__ == "__main__":
main()
"""
Emacs variables
Local Variables:
compile-command: "cp /home/jack/workspace/python/neuralnilm/scripts/e438.py /mnt/sshfs/imperial/workspace/python/neuralnilm/scripts/"
End:
"""
| mmottahedi/neuralnilm_prototype | scripts/e438.py | Python | mit | 8,414 |
"""Edit the RWhois data on the account."""
# :license: MIT, see LICENSE for more details.
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
import click
@click.command()
@click.option('--abuse', help='Set the abuse email address')
@click.option('--address1', help='Update the address 1 field')
@click.option('--address2', help='Update the address 2 field')
@click.option('--city', help='Set the city name')
@click.option('--company', help='Set the company name')
@click.option('--country', help='Set the two-letter country code')
@click.option('--firstname', help='Update the first name field')
@click.option('--lastname', help='Update the last name field')
@click.option('--postal', help='Set the postal code field')
@click.option('--public/--private',
default=None,
help='Flags the address as a public or private residence.')
@click.option('--state', help='Set the two-letter state code')
@environment.pass_env
def cli(env, abuse, address1, address2, city, company, country, firstname,
lastname, postal, public, state):
"""Edit the RWhois data on the account."""
mgr = SoftLayer.NetworkManager(env.client)
update = {
'abuse_email': abuse,
'address1': address1,
'address2': address2,
'company_name': company,
'city': city,
'country': country,
'first_name': firstname,
'last_name': lastname,
'postal_code': postal,
'state': state,
'private_residence': public,
}
if public is True:
update['private_residence'] = False
elif public is False:
update['private_residence'] = True
check = [x for x in update.values() if x is not None]
if not check:
raise exceptions.CLIAbort(
"You must specify at least one field to update.")
mgr.edit_rwhois(**update)
| briancline/softlayer-python | SoftLayer/CLI/rwhois/edit.py | Python | mit | 1,891 |
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
import sys
import json
from twit.models import User, Tweet, Mention, UserMention
from javanlp.models import Sentence, Sentiment
from javanlp.util import AnnotationException, annotate_document_with_sentiment
class Command(BaseCommand):
"""Annotate tweets and load into database."""
help = __doc__
#def add_arguments(self, parser):
# import argparse
# parser.add_argument('--input', type=argparse.FileType('r'), help="Input file containing a json tweet on each line.")
def handle(self, *args, **options):
for tweet in Tweet.objects.all():
if Sentence.objects.filter(doc_id = tweet.id).exists(): continue
try:
with transaction.atomic():
for sentence, sentiment in annotate_document_with_sentiment(tweet.id, tweet.text):
sentence.save()
sentiment.sentence = sentence
sentiment.save()
except AnnotationException:
pass # Couldn't annotate this sentence...
| arunchaganty/aeschines | django/twit/management/commands/annotate_features.py | Python | mit | 1,154 |
from django import forms
from selectable.forms import AutoCompleteSelectField
from selectable.forms import AutoCompleteSelectWidget
from opendata.catalog.lookups import CityLookup, CountyLookup
from .models import Request
class SearchForm(forms.Form):
text = forms.CharField(required=False)
class RequestForm(forms.ModelForm):
county = AutoCompleteSelectField(
lookup_class=CountyLookup,
required=False,
widget=AutoCompleteSelectWidget(
lookup_class=CountyLookup,
attrs={"class": "suggestions-hidden suggestions-county"},
)
)
city = AutoCompleteSelectField(
lookup_class=CityLookup,
required=False,
widget=AutoCompleteSelectWidget(
lookup_class=CityLookup,
attrs={"class": "suggestions-hidden suggestions-city"},
)
)
class Meta:
model = Request
exclude = ('suggested_by', 'resources', 'rating', 'status', )
class Media:
js = (
"suggestions/js/form.js",
)
| openrural/open-data-nc | opendata/requests/forms.py | Python | mit | 1,046 |
# -*- coding: utf-8 -*-
# See:
# https://github.com/codeforamerica/pittsburgh-purchasing-suite/blob/master/purchasing_test/unit/util.py
import datetime
from feedback.user.models import User
def create_a_user(email='foo@foo.com'):
return User(email=email, first_name='foo', last_name='foo')
def insert_a_user(email='foo@foo.com'):
user = create_a_user(email)
user.save()
return user
| codeforamerica/mdc-feedback | feedback_test/unit/util.py | Python | mit | 402 |
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file() or not get_service_name():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name()):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from GenomeFeatureComparatorImpl import GenomeFeatureComparator
impl_GenomeFeatureComparator = GenomeFeatureComparator(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
async_run_methods = {}
async_check_methods = {}
async_run_methods['GenomeFeatureComparator.compare_genome_features_async'] = ['GenomeFeatureComparator', 'compare_genome_features']
async_check_methods['GenomeFeatureComparator.compare_genome_features_check'] = ['GenomeFeatureComparator', 'compare_genome_features']
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'GenomeFeatureComparator'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_GenomeFeatureComparator.compare_genome_features,
name='GenomeFeatureComparator.compare_genome_features',
types=[object])
self.method_authentication['GenomeFeatureComparator.compare_genome_features'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"GenomeFeatureComparator but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
ctx['rpc_context'] = req['context']
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if len(sys.argv) == 4 and os.path.isfile(sys.argv[1]):
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], sys.argv[3]))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
| kbase/narrative_method_store | test/data/test_repo_1/service/GenomeFeatureComparatorServer.py | Python | mit | 25,877 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import copy
import warnings
from itertools import chain
from django.core.exceptions import ImproperlyConfigured
from django.utils import six
from haystack import fields as haystack_fields
from haystack.query import EmptySearchQuerySet
from haystack.utils import Highlighter
from rest_framework import serializers
from rest_framework.compat import OrderedDict
from rest_framework.fields import empty
from rest_framework.utils.field_mapping import ClassLookupDict, get_field_kwargs
from .fields import (
HaystackBooleanField, HaystackCharField, HaystackDateField, HaystackDateTimeField,
HaystackDecimalField, HaystackFloatField, HaystackIntegerField
)
class HaystackSerializer(serializers.Serializer):
"""
A `HaystackSerializer` which populates fields based on
which models that are available in the SearchQueryset.
"""
_field_mapping = ClassLookupDict({
haystack_fields.BooleanField: HaystackBooleanField,
haystack_fields.CharField: HaystackCharField,
haystack_fields.DateField: HaystackDateField,
haystack_fields.DateTimeField: HaystackDateTimeField,
haystack_fields.DecimalField: HaystackDecimalField,
haystack_fields.EdgeNgramField: HaystackCharField,
haystack_fields.FacetBooleanField: HaystackBooleanField,
haystack_fields.FacetCharField: HaystackCharField,
haystack_fields.FacetDateField: HaystackDateField,
haystack_fields.FacetDateTimeField: HaystackDateTimeField,
haystack_fields.FacetDecimalField: HaystackDecimalField,
haystack_fields.FacetFloatField: HaystackFloatField,
haystack_fields.FacetIntegerField: HaystackIntegerField,
haystack_fields.FacetMultiValueField: HaystackCharField,
haystack_fields.FloatField: HaystackFloatField,
haystack_fields.IntegerField: HaystackIntegerField,
haystack_fields.LocationField: HaystackCharField,
haystack_fields.MultiValueField: HaystackCharField,
haystack_fields.NgramField: HaystackCharField,
})
def __init__(self, instance=None, data=empty, **kwargs):
super(HaystackSerializer, self).__init__(instance, data, **kwargs)
try:
if not hasattr(self.Meta, "index_classes") and not hasattr(self.Meta, "serializers"):
raise ImproperlyConfigured("You must set either the 'index_classes' or 'serializers' "
"attribute on the serializer Meta class.")
except AttributeError:
raise ImproperlyConfigured("%s must implement a Meta class." % self.__class__.__name__)
if not self.instance:
self.instance = EmptySearchQuerySet()
@staticmethod
def _get_default_field_kwargs(model, field):
"""
Get the required attributes from the model field in order
to instantiate a REST Framework serializer field.
"""
kwargs = {}
if field.model_attr in model._meta.get_all_field_names():
model_field = model._meta.get_field_by_name(field.model_attr)[0]
kwargs = get_field_kwargs(field.model_attr, model_field)
# Remove stuff we don't care about!
delete_attrs = [
"allow_blank",
"choices",
"model_field",
]
for attr in delete_attrs:
if attr in kwargs:
del kwargs[attr]
return kwargs
def get_fields(self):
"""
Get the required fields for serializing the result.
"""
fields = getattr(self.Meta, "fields", [])
exclude = getattr(self.Meta, "exclude", [])
if fields and exclude:
raise ImproperlyConfigured("Cannot set both `fields` and `exclude`.")
ignore_fields = getattr(self.Meta, "ignore_fields", [])
indices = getattr(self.Meta, "index_classes")
declared_fields = copy.deepcopy(self._declared_fields)
prefix_field_names = len(indices) > 1
field_mapping = OrderedDict()
# overlapping fields on multiple indices is supported by internally prefixing the field
# names with the index class to which they belong or, optionally, a user-provided alias
# for the index.
for index_cls in self.Meta.index_classes:
prefix = ""
if prefix_field_names:
prefix = "_%s__" % self._get_index_class_name(index_cls)
for field_name, field_type in six.iteritems(index_cls.fields):
orig_name = field_name
field_name = "%s%s" % (prefix, field_name)
# This has become a little more complex, but provides convenient flexibility for users
if not exclude:
if orig_name not in fields and field_name not in fields:
continue
elif orig_name in exclude or field_name in exclude or orig_name in ignore_fields or field_name in ignore_fields:
continue
# Look up the field attributes on the current index model,
# in order to correctly instantiate the serializer field.
model = index_cls().get_model()
kwargs = self._get_default_field_kwargs(model, field_type)
kwargs['prefix_field_names'] = prefix_field_names
field_mapping[field_name] = self._field_mapping[field_type](**kwargs)
# Add any explicitly declared fields. They *will* override any index fields
# in case of naming collision!.
if declared_fields:
for field_name in declared_fields:
if field_name in field_mapping:
warnings.warn("Field '{field}' already exists in the field list. This *will* "
"overwrite existing field '{field}'".format(field=field_name))
field_mapping[field_name] = declared_fields[field_name]
return field_mapping
def to_representation(self, instance):
"""
If we have a serializer mapping, use that. Otherwise, use standard serializer behavior
Since we might be dealing with multiple indexes, some fields might
not be valid for all results. Do not render the fields which don't belong
to the search result.
"""
if getattr(self.Meta, "serializers", None):
ret = self.multi_serializer_representation(instance)
else:
ret = super(HaystackSerializer, self).to_representation(instance)
prefix_field_names = len(getattr(self.Meta, "index_classes")) > 1
current_index = self._get_index_class_name(type(instance.searchindex))
for field in self.fields.keys():
orig_field = field
if prefix_field_names:
parts = field.split("__")
if len(parts) > 1:
index = parts[0][1:] # trim the preceding '_'
field = parts[1]
if index == current_index:
ret[field] = ret[orig_field]
del ret[orig_field]
elif field not in chain(instance.searchindex.fields.keys(), self._declared_fields.keys()):
del ret[orig_field]
# include the highlighted field in either case
if getattr(instance, "highlighted", None):
ret["highlighted"] = instance.highlighted[0]
return ret
def multi_serializer_representation(self, instance):
serializers = self.Meta.serializers
index = instance.searchindex
serializer_class = serializers.get(type(index), None)
if not serializer_class:
raise ImproperlyConfigured("Could not find serializer for %s in mapping" % index)
return serializer_class(context=self._context).to_representation(instance)
def _get_index_class_name(self, index_cls):
"""
Converts in index model class to a name suitable for use as a field name prefix. A user
may optionally specify custom aliases via an 'index_aliases' attribute on the Meta class
"""
cls_name = index_cls.__name__
aliases = getattr(self.Meta, "index_aliases", {})
return aliases.get(cls_name, cls_name.split('.')[-1])
class HaystackSerializerMixin(object):
"""
This mixin can be added to a rerializer to use the actual object as the data source for serialization rather
than the data stored in the search index fields. This makes it easy to return data from search results in
the same format as elswhere in your API and reuse your existing serializers
"""
def to_representation(self, instance):
obj = instance.object
return super(HaystackSerializerMixin, self).to_representation(obj)
class HighlighterMixin(object):
"""
This mixin adds support for ``highlighting`` (the pure python, portable
version, not SearchQuerySet().highlight()). See Haystack docs
for more info).
"""
highlighter_class = Highlighter
highlighter_css_class = "highlighted"
highlighter_html_tag = "span"
highlighter_max_length = 200
highlighter_field = None
def get_highlighter(self):
if not self.highlighter_class:
raise ImproperlyConfigured(
"%(cls)s is missing a highlighter_class. Define %(cls)s.highlighter_class, "
"or override %(cls)s.get_highlighter()." %
{"cls": self.__class__.__name__}
)
return self.highlighter_class
@staticmethod
def get_document_field(instance):
"""
Returns which field the search index has marked as it's
`document=True` field.
"""
for name, field in instance.searchindex.fields.items():
if field.document is True:
return name
def to_representation(self, instance):
ret = super(HighlighterMixin, self).to_representation(instance)
terms = " ".join(six.itervalues(self.context["request"].GET))
if terms:
highlighter = self.get_highlighter()(terms, **{
"html_tag": self.highlighter_html_tag,
"css_class": self.highlighter_css_class,
"max_length": self.highlighter_max_length
})
document_field = self.get_document_field(instance)
if highlighter and document_field:
ret["highlighted"] = highlighter.highlight(getattr(instance, self.highlighter_field or document_field))
return ret
| fladi/drf-haystack | drf_haystack/serializers.py | Python | mit | 10,695 |
import logging
import time
from scrapy.dupefilters import BaseDupeFilter
from scrapy.utils.request import request_fingerprint
from . import defaults
from .connection import get_redis_from_settings
logger = logging.getLogger(__name__)
# TODO: Rename class to RedisDupeFilter.
class RFPDupeFilter(BaseDupeFilter):
"""Redis-based request duplicates filter.
This class can also be used with default Scrapy's scheduler.
"""
logger = logger
def __init__(self, server, key, debug=False):
"""Initialize the duplicates filter.
Parameters
----------
server : redis.StrictRedis
The redis server instance.
key : str
Redis key Where to store fingerprints.
debug : bool, optional
Whether to log filtered requests.
"""
self.server = server
self.key = key
self.debug = debug
self.logdupes = True
@classmethod
def from_settings(cls, settings):
"""Returns an instance from given settings.
This uses by default the key ``dupefilter:<timestamp>``. When using the
``scrapy_redis.scheduler.Scheduler`` class, this method is not used as
it needs to pass the spider name in the key.
Parameters
----------
settings : scrapy.settings.Settings
Returns
-------
RFPDupeFilter
A RFPDupeFilter instance.
"""
server = get_redis_from_settings(settings)
# XXX: This creates one-time key. needed to support to use this
# class as standalone dupefilter with scrapy's default scheduler
# if scrapy passes spider on open() method this wouldn't be needed
# TODO: Use SCRAPY_JOB env as default and fallback to timestamp.
key = defaults.DUPEFILTER_KEY % {'timestamp': int(time.time())}
debug = settings.getbool('DUPEFILTER_DEBUG')
return cls(server, key=key, debug=debug)
@classmethod
def from_crawler(cls, crawler):
"""Returns instance from crawler.
Parameters
----------
crawler : scrapy.crawler.Crawler
Returns
-------
RFPDupeFilter
Instance of RFPDupeFilter.
"""
return cls.from_settings(crawler.settings)
def request_seen(self, request):
"""Returns True if request was already seen.
Parameters
----------
request : scrapy.http.Request
Returns
-------
bool
"""
fp = self.request_fingerprint(request)
# This returns the number of values added, zero if already exists.
added = self.server.sadd(self.key, fp)
return added == 0
def request_fingerprint(self, request):
"""Returns a fingerprint for a given request.
Parameters
----------
request : scrapy.http.Request
Returns
-------
str
"""
return request_fingerprint(request)
def close(self, reason=''):
"""Delete data on close. Called by Scrapy's scheduler.
Parameters
----------
reason : str, optional
"""
self.clear()
def clear(self):
"""Clears fingerprints data."""
self.server.delete(self.key)
def log(self, request, spider):
"""Logs given request.
Parameters
----------
request : scrapy.http.Request
spider : scrapy.spiders.Spider
"""
if self.debug:
msg = "Filtered duplicate request: %(request)s"
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
elif self.logdupes:
msg = ("Filtered duplicate request %(request)s"
" - no more duplicates will be shown"
" (see DUPEFILTER_DEBUG to show all duplicates)")
self.logger.debug(msg, {'request': request}, extra={'spider': spider})
self.logdupes = False
| hanbox/spider_template | spider_template/scrapy_redis/dupefilter.py | Python | mit | 3,992 |
# -*- coding: utf-8 -*-
# Generated by Django 1.10.3 on 2016-11-11 03:13
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('administration', '0014_auto_20161111_0255'),
]
operations = [
migrations.AlterField(
model_name='flag',
name='resolved_by',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='resolved_flags', to=settings.AUTH_USER_MODEL),
),
]
| makyo/honeycomb | administration/migrations/0015_auto_20161111_0313.py | Python | mit | 629 |
"""Animation.
Animation is set of keyframes.
Value of selected attribute changes in time.
Keyframe:
(time, value)
Objects have animation manager which manages animation graph and switching."""
from operator import itemgetter
from eaf import Timer
from xoinvader.utils import Point
class AnimationBoundariesExceeded(Exception):
"""Exception to show that interpolated value will be incorrect."""
def __init__(self, first, current_time, second):
super(AnimationBoundariesExceeded, self).__init__(
self,
f"Animation frame boundaries exceeded: {first} <= {current_time} <= {second}",
)
class InterpolationUnknownTypes(Exception):
"""Such type combination is unsupported."""
def __init__(self, first, second):
super(InterpolationUnknownTypes, self).__init__(
self, f"Unknown types of interpolating values: {first} and {second}"
)
# TODO: Implement animation graph and etc
class AnimationManager(object):
"""Manage list of object animation."""
def __init__(self):
self._animations = {}
self._animation = None
@property
def animation(self):
"""AnimationManager's current animation name.
To set animation - assign it's name.
:getter: yes
:setter: yes
:type: str
"""
if self._animation:
return self._animation.name
else:
raise AttributeError("There is no available animation.")
@animation.setter
def animation(self, name):
if name in self._animations:
self._animation = self._animations[name]
else:
raise ValueError(f"No such animation: '{name}'.")
def add(self, name, *args, **kwargs):
"""Add new animation, pass args to Animation class.
See interface of `class::xoinvader.animation.Animation`.
:param str name: animation name
"""
animation = Animation(name, *args, **kwargs)
self._animations[name] = animation
if not self._animation:
self._animation = animation
def update(self, dt):
"""Update manager's state."""
if not self._animation:
return
try:
self._animation.update(dt)
except StopIteration:
return # TODO: think about method to change animation
# pylint: disable=too-many-instance-attributes,too-many-arguments
# pylint: disable=too-few-public-methods
class Animation(object):
"""Animation unit.
Animation object holds sorted list of (time, value) items and changes
selected attribute of bound object according to local animation time.
Time measured by timer. When current time is greater or equal then time
of next keyframe - animation object changes it to appropriate value.
When animation is done and if not looped - raise StopIteration.
In case of interpolated animation value calculation occurs within two
bounding frames and on frame switch.
:param str name: animation name
:param object bind: object to bind animation
:param str attr: attribute to change in frames
:param list keyframes: (float, object) tuples
:param bool interp: interpolate values between frames or not
:param bool loop: loop animation or not
"""
def __init__(self, name, bind, attr, keyframes, interp=False, loop=False):
self._name = name
self._obj = bind
self._attr = attr
if not keyframes:
raise ValueError("Animation keyframes must not be empty.")
self._keyframes = sorted(keyframes, key=itemgetter(0))
self._interp = interp
self._loop = loop
# Timer for tracking local time
self._timer = Timer(self._keyframes[-1][0], lambda: True)
self._timer.start()
# Current keyframe index
self._current = 0
if self._interp:
self.update = self._update_interpolated
else:
self.update = self._update_discrete
@property
def name(self):
"""Animation's name.
:getter: yes
:setter: no
:type: str
"""
return self._name
def _apply_value(self, value):
"""Apply new value to linked object.
:param obj value: value to apply
"""
setattr(self._obj, self._attr, value)
def _update_interpolated(self, dt):
"""Advance animation and interpolate value.
NOTE: animation frame switching depends on interp mode
animation with interpolation switches frame only when current local
time exceeds NEXT frames' time border.
"""
self._check_animation_state()
self._timer.update(dt)
current_time = self._timer.elapsed
keyframe = self._keyframes[self._current]
next_keyframe = self._keyframes[self._current + 1]
# it's time to switch keyframe
if current_time >= next_keyframe[0]:
self._current += 1
keyframe = self._keyframes[self._current]
if self._current == len(self._keyframes) - 1:
self._apply_value(keyframe[1])
self._current += 1
self._check_animation_state()
return
next_keyframe = self._keyframes[self._current + 1]
value = interpolate(keyframe, next_keyframe, current_time)
self._apply_value(value)
def _update_discrete(self, dt):
"""Advance animation without interpolating value.
NOTE: animation frame switching depends on interp mode
discrete animation swiches frame and updates value only if
current local time is >= time of current keyframe.
No need to worry about calculating value between frames - thus
no need to complicate behaviour.
"""
self._check_animation_state()
self._timer.update(dt)
keyframe = self._keyframes[self._current]
# Check if animation need to switch keyframe
if self._timer.elapsed >= keyframe[0]:
self._apply_value(keyframe[1])
self._current += 1
def _check_animation_state(self):
"""Check animation state and restart if needed.
:raise StopIteration: when animation exceeded frames.
"""
if len(self._keyframes) == self._current:
if self._loop:
self._current = 0
self._timer.restart()
else:
self._timer.stop()
raise StopIteration
def linear_equation(val1, val2, time1, time2, current_time):
"""Linear equation to get interpolated value.
:param float val1: first keyframe value
:param float val2: second keyframe value
:param float time1: first keyframe local time
:param float time2: second keyframe local time
:param float current_time: current animation local time
"""
return val1 + (val2 - val1) / (time2 - time1) * (current_time - time1)
def same_type(values, types):
"""Check if values are belongs to same type or type tuple.
:param collections.Iterable values: values to check type similarity
:param tuple|type types: type or tuple of types
"""
return all(map(lambda it: isinstance(it, types), values))
def interpolate(first, second, current_time):
"""Interpolate value by two bounding keyframes.
:param collections.Iterable first: first bounding keyframe
:param collections.Iterable second: second bounding keyframe
:param float current_time: current animation local time
:raises AnimationBoundariesExceeded: when time interval is invalid
:raises InterpolationUnknownTypes: when interpolating invalid types
"""
if not first[0] <= current_time <= second[0]:
raise AnimationBoundariesExceeded(first[0], current_time, second[0])
def frames_of(*args):
"""If frames both of specified type."""
return same_type((first[1], second[1]), args)
if frames_of(int, float):
value = linear_equation(
float(first[1]),
float(second[1]),
float(first[0]),
float(second[0]),
float(current_time),
)
elif frames_of(Point):
value = linear_equation(
first[1],
second[1],
float(first[0]),
float(second[0]),
float(current_time),
)
else:
raise InterpolationUnknownTypes(type(first[1]), type(second[1]))
return value
| pankshok/xoinvader | xoinvader/animation.py | Python | mit | 8,490 |
from __future__ import print_function
import sys
import os
import subprocess
path = os.path.abspath(__file__)
modpath = os.path.dirname(path)
base_dir = os.getcwd()
install_mode = 'normal'
plus = False
mon = False
venv = "y"
if len(sys.argv) > 1:
if '-django' in sys.argv:
install_mode = 'django'
elif '-dev' in sys.argv:
install_mode = 'dev'
elif '-modules' in sys.argv:
install_mode = 'modules'
elif "-q" is sys.argv:
install_mode = "default"
if '-plus' in sys.argv:
plus = True
if '-mon' in sys.argv:
mon = True
if '-noenv' in sys.argv:
venv = "n"
msg = 'What is the name of the project? > '
if sys.version_info[:2] <= (2, 7):
get_input = raw_input
else:
get_input = input
user_input = get_input(msg)
if user_input == "":
print("You must provide a project name")
sys.exit()
project_name = user_input
bscript = modpath + '/install/init/install.sh'
print("Starting install ...")
if not install_mode == 'modules':
subprocess.call([bscript, project_name, base_dir,
install_mode, modpath, venv])
pages_installed = "n"
if install_mode != 'django':
bscript = modpath + '/install/pages/install.sh'
subprocess.call([bscript, project_name, base_dir, install_mode, modpath])
# contact
bscript = modpath + '/install/contact/install.sh'
subprocess.call([bscript, project_name, base_dir, install_mode, modpath])
if install_mode != "default":
# real time
msg = 'Install the realtime modules? [y/N] > '
rt = "n"
user_input = get_input(msg)
if user_input == "y":
rt = "y"
bscript = modpath + '/install/real_time/install.sh'
subprocess.call([bscript, project_name, base_dir, modpath])
if plus is True:
# users
bscript = modpath + '/install/users/install.sh'
subprocess.call(
[bscript, project_name, base_dir, install_mode, modpath])
if mon is True:
bscript = modpath + '/install/mon/install.sh'
subprocess.call(
[bscript, project_name, base_dir, install_mode, modpath])
# end
bscript = modpath + '/install/end/install.sh'
subprocess.call([bscript, project_name, base_dir, install_mode, modpath, rt])
| synw/django-mogo | djangomogo/__main__.py | Python | mit | 2,304 |
# The MIT License (MIT)
# Copyright (c) 2016 HeyItsShuga
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import logging
import re
import html
from urllib.parse import urlsplit
import traceback
import requests
import mimeparse
import praw
class E621Plugin:
"""
Mirrors e621 images using either their API or using their CDN links.
Created by /u/HeyItsShuga
"""
def __init__(self, useragent: str, **options):
"""Initialize the e621 importer.
:param useragent: The useragent to use for querying e621.
:param options: Other options in the configuration. Ignored.
"""
self.log = logging.getLogger('lapis.e621')
self.headers = {'User-Agent': useragent}
self.regex = re.compile(
r'^https?://(((?:www\.)?(?:static1\.)?'
r'(?P<service>(e621)|(e926))\.net/(data/.+/(?P<md5>\w+))?'
r'(post/show/(?P<post_id>\d+)/?)?.*))$')
def import_submission(self, submission: praw.objects.Submission) -> dict:
"""Import a submission from e621.
This function will define the following values in its return data:
- author: simply "an anonymous user on e621"
- source: The url of the submission
- importer_display/header
- import_urls
After we define that, we need to get the image. Since e621 has an
API, we use that to try to get the image if the image is a non-CDN URL.
If it is a CDN URL, we take the image directory and upload *that* to
Imgur.
image_url is the variable of the image to upload.
:param submission: A reddit submission to parse.
"""
try:
url = html.unescape(submission.url)
match = self.regex.match(submission.url)
if not match:
return None
r = requests.head(url, headers=self.headers)
mime_text = r.headers.get('Content-Type')
mime = mimeparse.parse_mime_type(mime_text)
if mime[0] == 'image':
md5 = match.group('md5')
service = match.group('service')
endpoint = 'http://e926.net/post/check_md5.json?md5=' + md5
self.log.debug('Will use MD5 checker endpoint at %s', endpoint)
callapi = requests.get(endpoint)
json = callapi.json()
post_id = json['post_id']
post_id = str(post_id)
else:
self.log.debug('No CDN used, md5 retrieval not neccesary.')
# For non-CDN links, the plugin attempts to get the post_id
# out of the URL using regex.
post_id = match.group('post_id')
endpoint = 'http://e926.net/post/show.json?id=' + post_id
service = match.group('service')
self.log.debug('Will use API endpoint at %s', endpoint)
# We will use the e621 API to get the image URL.
callapi = requests.get(endpoint)
json = callapi.json()
img = json['file_url']
author = json['artist']
author = ''.join(author) # Converts the list into a string to be used later.
data = {'author': author,
'source': url,
'importer_display':
{'header': 'Mirrored [image](https://' + service + '.net/post/show/' + post_id + ') \
by ' + service + ' artist [' + author + '](https://' + service + '.net/post/index/1/' + author + '\
):\n\n'}}
image_url = img
data['import_urls'] = [image_url]
return data
except Exception:
self.log.error('Could not import e621 URL %s (%s)',
submission.url, traceback.format_exc())
return None
__plugin__ = E621Plugin
# END OF LINE.
| kupiakos/LapisMirror | plugins/e621.py | Python | mit | 4,898 |
import os
from fabric.decorators import task
from fabric.api import local, run, cd, env, prefix, hide
from fabric.colors import cyan, red, green, yellow
import app
import git
import virtualenv
@task
def init():
"""Execute init tasks for all components (virtualenv, pip)."""
print(yellow("# Setting up development environment...\n", True))
virtualenv.init()
virtualenv.update()
print(green("\n# DONE.", True))
print(green("Type ") + green("activate", True) + green(" to enable your dev virtual environment."))
@task
def update():
"""Update virtual env with requirements packages."""
virtualenv.update()
@task
def dev():
"""Setting up Development mode."""
print(yellow("# Setting up development environment...\n", True))
virtualenv.init()
virtualenv.update()
print(green("\n# DONE.", True))
print(green("Type ") + green("activate", True) + green(" to enable your dev virtual environment."))
@task
def clean():
"""Clean .pyc files"""
app.clean()
| ronhanson/python-tbx | fabfile/__init__.py | Python | mit | 1,018 |
#!/usr/bin/python3
from messaging.messaging_manager import MessagingManager, MessagingManagerType
from common.monitoring import monitor_event_types as event_types
from common.monitoring.monitor_message import MonitorMessage
from inotify.adapters import Inotify
from inotify import constants as inotify_constants
from threading import Event, Timer
from pprint import pprint
__event_type_to_flag_mapping = {
event_types.FILE_CREATED: inotify_constants.IN_CREATE,
event_types.FILE_NAME_CHANGED: inotify_constants.IN_MOVE,
event_types.FILE_CONTENT_CHANGED: inotify_constants.IN_MODIFY,
event_types.FILE_INCLUDED: inotify_constants.IN_MOVED_TO,
event_types.FILE_EXCLUDED: inotify_constants.IN_MOVED_FROM,
event_types.FILE_DELETED: inotify_constants.IN_DELETE,
event_types.FILE_METADATA_CHANGED: inotify_constants.IN_ATTRIB,
event_types.DIRECTORY_REPLACED: inotify_constants.IN_MOVE_SELF,
event_types.DIRECTORY_DELETED: inotify_constants.IN_DELETE_SELF
}
def get_event_type_by_mask(event_mask):
for key in __event_type_to_flag_mapping.keys():
value = __event_type_to_flag_mapping.get(key)
if event_mask & value == value:
return key
def get_message_by_event(event):
(header, type_names, watch_path, file_name) = event
monitor_message = MonitorMessage()
monitor_message.target_directory = watch_path.decode("utf-8")
monitor_message.target_file = file_name.decode("utf-8")
monitor_message.event_type = get_event_type_by_mask(header.mask)
return monitor_message
def get_message_by_event_pair(first_event, second_event):
(first_event_header, _, watch_path, first_file_name) = first_event
(second_event_header, _, _, second_file_name) = second_event
monitor_message = MonitorMessage()
if first_event_header.mask == inotify_constants.IN_MOVED_FROM and second_event_header.mask == inotify_constants.IN_MOVED_TO:
monitor_message.target_directory = watch_path.decode("utf-8")
monitor_message.target_file = first_file_name.decode("utf-8")
monitor_message.additional_information = second_file_name.decode("utf-8")
monitor_message.event_type = event_types.FILE_NAME_CHANGED
return monitor_message
def is_first_pair_event(event_header):
return event_header.mask == inotify_constants.IN_MOVED_FROM
def is_second_pair_event(event_header):
return event_header.mask == inotify_constants.IN_MOVED_TO
def process_possible_pair_event(current_event, pair_event_list):
(header, _, watch_path, _) = current_event
if len(pair_event_list) == 0 and is_first_pair_event(header):
pair_event_list.append(current_event)
return []
elif len(pair_event_list) != 0:
previous_event = pair_event_list.pop()
(_, _, previous_watch_path, _) = previous_event
if previous_watch_path == watch_path and is_second_pair_event(header):
pair_events_message = get_message_by_event_pair(previous_event, current_event)
return [ pair_events_message ]
elif is_first_pair_event(header):
pair_event_list.append(current_event)
previous_event_message = get_message_by_event(previous_event)
return [ previous_event_message ]
else:
previous_event_message = get_message_by_event(previous_event)
current_event_message = get_message_by_event(current_event)
return [ previous_event_message, current_event_message ]
return None
def process_received_messages(monitor, messages_list):
print("message processing")
pass
def process_received_events(monitor, monitor_messaging_manager, pair_events_list, stop_events_processing_flag):
for event in monitor.event_gen():
if event is not None:
messages = process_possible_pair_event(event, pair_events_list)
if messages is None:
message = get_message_by_event(event)
monitor_messaging_manager.send_message(str(message))
else:
for message in messages:
monitor_messaging_manager.send_message(str(message))
else:
if stop_events_processing_flag.is_set():
stop_events_processing_flag.clear()
break
def main():
monitor = Inotify()
monitor_messaging_manager = MessagingManager(MessagingManagerType.SERVER, "tcp://127.0.0.1:5555")
watch_paths_list = [ b'/home/anton/test/' ]
pair_events_list = []
stop_events_processing_flag = Event()
for path in watch_paths_list:
monitor.add_watch(path, inotify_constants.IN_MOVE | inotify_constants.IN_MOVED_TO | inotify_constants.IN_MOVED_FROM | inotify_constants.IN_CREATE)
try:
while True:
messages = monitor_messaging_manager.get_all_received_messages()
process_received_messages(monitor, messages)
stop_events_processing_timer = Timer(10, stop_events_processing_flag.set)
stop_events_processing_timer.start()
process_received_events(monitor, monitor_messaging_manager, pair_events_list, stop_events_processing_flag)
except KeyboardInterrupt:
for path in watch_paths_list:
monitor.remove_watch(path)
raise SystemExit()
if __name__ == "__main__":
main()
| AntonPashkowskiy/Tars | Program/monitoring/monitor.py | Python | mit | 5,353 |
import pytest
pytestmark = [pytest.mark.setone, pytest.mark.working]
@pytest.fixture
def data_release_update_1(award, lab):
return{
"schema_version": '1',
"award": award['@id'],
"lab": lab['@id'],
"summary": "Upgrader test.",
"update_tag": "UPGRADERTEST",
"submitted_by": "4dndcic@gmail.com",
"severity": 1,
"is_internal": False,
"parameters": [
"tags=4DN Joint Analysis 2018"
],
"comments": "Test upgrader",
"foursight_uuid": "2018-02-12T16:54:38.526810+00:00",
"end_date": "2018-02-14",
"start_date": "2018-02-13",
"update_items": [
{
"primary_id": "431106bc-8535-4448-903e-854af460b112",
"secondary_id": "431106bc-8535-4448-903e-854af460b112"
}
]
}
@pytest.fixture
def data_release_update_2(award, lab):
return{
"schema_version": '2',
"award": award['@id'],
"lab": lab['@id'],
"summary": "Upgrader test.",
"update_tag": "UPGRADERTEST",
"submitted_by": "4dndcic@gmail.com",
"severity": 1,
"is_internal": False,
"parameters": [
"tags=4DN Joint Analysis 2018"
],
"comments": "Test upgrader 2 to 3",
"foursight_uuid": "2018-02-12T16:54:38.526810+00:00",
"end_date": "2018-02-14",
"start_date": "2018-02-13",
"update_items": [
{
"primary_id": "431106bc-8535-4448-903e-854af460b112",
"secondary_ids": ["431106bc-8535-4448-903e-854af460b112"]
}
]
}
def test_data_release_updates_secondary_id_to_secondary_ids(
app, data_release_update_1):
migrator = app.registry['upgrader']
value = migrator.upgrade('data_release_update', data_release_update_1, current_version='1', target_version='2')
assert value['schema_version'] == '2'
update_items = value['update_items']
assert len(update_items) == 1
assert 'primary_id' in update_items[0]
assert 'secondary_ids' in update_items[0]
assert 'secondary_id' not in update_items[0]
assert isinstance(update_items[0]['secondary_ids'], list)
assert len(update_items[0]['secondary_ids']) == 1
def test_data_release_updates_secondary_ids_to_objects(
app, data_release_update_2):
"""
Needed because secondary IDs got the 'additional_info' field and are now
an array of objects
"""
migrator = app.registry['upgrader']
value = migrator.upgrade('data_release_update', data_release_update_2, current_version='2', target_version='3')
assert value['schema_version'] == '3'
update_items = value['update_items']
assert len(update_items) == 1
assert 'primary_id' in update_items[0]
assert 'secondary_ids' in update_items[0]
assert isinstance(update_items[0]['secondary_ids'], list)
assert len(update_items[0]['secondary_ids']) == 1
assert isinstance(update_items[0]['secondary_ids'][0], dict)
assert 'secondary_id' in update_items[0]['secondary_ids'][0]
assert 'additional_info' in update_items[0]['secondary_ids'][0]
| 4dn-dcic/fourfront | src/encoded/tests/test_upgrade_data_release_update.py | Python | mit | 3,172 |
# For Capstone Engine. AUTO-GENERATED FILE, DO NOT EDIT [mos65xx_const.py]
MOS65XX_REG_INVALID = 0
MOS65XX_REG_ACC = 1
MOS65XX_REG_X = 2
MOS65XX_REG_Y = 3
MOS65XX_REG_P = 4
MOS65XX_REG_SP = 5
MOS65XX_REG_DP = 6
MOS65XX_REG_B = 7
MOS65XX_REG_K = 8
MOS65XX_REG_ENDING = 9
MOS65XX_AM_NONE = 0
MOS65XX_AM_IMP = 1
MOS65XX_AM_ACC = 2
MOS65XX_AM_IMM = 3
MOS65XX_AM_REL = 4
MOS65XX_AM_INT = 5
MOS65XX_AM_BLOCK = 6
MOS65XX_AM_ZP = 7
MOS65XX_AM_ZP_X = 8
MOS65XX_AM_ZP_Y = 9
MOS65XX_AM_ZP_REL = 10
MOS65XX_AM_ZP_IND = 11
MOS65XX_AM_ZP_X_IND = 12
MOS65XX_AM_ZP_IND_Y = 13
MOS65XX_AM_ZP_IND_LONG = 14
MOS65XX_AM_ZP_IND_LONG_Y = 15
MOS65XX_AM_ABS = 16
MOS65XX_AM_ABS_X = 17
MOS65XX_AM_ABS_Y = 18
MOS65XX_AM_ABS_IND = 19
MOS65XX_AM_ABS_X_IND = 20
MOS65XX_AM_ABS_IND_LONG = 21
MOS65XX_AM_ABS_LONG = 22
MOS65XX_AM_ABS_LONG_X = 23
MOS65XX_AM_SR = 24
MOS65XX_AM_SR_IND_Y = 25
MOS65XX_INS_INVALID = 0
MOS65XX_INS_ADC = 1
MOS65XX_INS_AND = 2
MOS65XX_INS_ASL = 3
MOS65XX_INS_BBR = 4
MOS65XX_INS_BBS = 5
MOS65XX_INS_BCC = 6
MOS65XX_INS_BCS = 7
MOS65XX_INS_BEQ = 8
MOS65XX_INS_BIT = 9
MOS65XX_INS_BMI = 10
MOS65XX_INS_BNE = 11
MOS65XX_INS_BPL = 12
MOS65XX_INS_BRA = 13
MOS65XX_INS_BRK = 14
MOS65XX_INS_BRL = 15
MOS65XX_INS_BVC = 16
MOS65XX_INS_BVS = 17
MOS65XX_INS_CLC = 18
MOS65XX_INS_CLD = 19
MOS65XX_INS_CLI = 20
MOS65XX_INS_CLV = 21
MOS65XX_INS_CMP = 22
MOS65XX_INS_COP = 23
MOS65XX_INS_CPX = 24
MOS65XX_INS_CPY = 25
MOS65XX_INS_DEC = 26
MOS65XX_INS_DEX = 27
MOS65XX_INS_DEY = 28
MOS65XX_INS_EOR = 29
MOS65XX_INS_INC = 30
MOS65XX_INS_INX = 31
MOS65XX_INS_INY = 32
MOS65XX_INS_JML = 33
MOS65XX_INS_JMP = 34
MOS65XX_INS_JSL = 35
MOS65XX_INS_JSR = 36
MOS65XX_INS_LDA = 37
MOS65XX_INS_LDX = 38
MOS65XX_INS_LDY = 39
MOS65XX_INS_LSR = 40
MOS65XX_INS_MVN = 41
MOS65XX_INS_MVP = 42
MOS65XX_INS_NOP = 43
MOS65XX_INS_ORA = 44
MOS65XX_INS_PEA = 45
MOS65XX_INS_PEI = 46
MOS65XX_INS_PER = 47
MOS65XX_INS_PHA = 48
MOS65XX_INS_PHB = 49
MOS65XX_INS_PHD = 50
MOS65XX_INS_PHK = 51
MOS65XX_INS_PHP = 52
MOS65XX_INS_PHX = 53
MOS65XX_INS_PHY = 54
MOS65XX_INS_PLA = 55
MOS65XX_INS_PLB = 56
MOS65XX_INS_PLD = 57
MOS65XX_INS_PLP = 58
MOS65XX_INS_PLX = 59
MOS65XX_INS_PLY = 60
MOS65XX_INS_REP = 61
MOS65XX_INS_RMB = 62
MOS65XX_INS_ROL = 63
MOS65XX_INS_ROR = 64
MOS65XX_INS_RTI = 65
MOS65XX_INS_RTL = 66
MOS65XX_INS_RTS = 67
MOS65XX_INS_SBC = 68
MOS65XX_INS_SEC = 69
MOS65XX_INS_SED = 70
MOS65XX_INS_SEI = 71
MOS65XX_INS_SEP = 72
MOS65XX_INS_SMB = 73
MOS65XX_INS_STA = 74
MOS65XX_INS_STP = 75
MOS65XX_INS_STX = 76
MOS65XX_INS_STY = 77
MOS65XX_INS_STZ = 78
MOS65XX_INS_TAX = 79
MOS65XX_INS_TAY = 80
MOS65XX_INS_TCD = 81
MOS65XX_INS_TCS = 82
MOS65XX_INS_TDC = 83
MOS65XX_INS_TRB = 84
MOS65XX_INS_TSB = 85
MOS65XX_INS_TSC = 86
MOS65XX_INS_TSX = 87
MOS65XX_INS_TXA = 88
MOS65XX_INS_TXS = 89
MOS65XX_INS_TXY = 90
MOS65XX_INS_TYA = 91
MOS65XX_INS_TYX = 92
MOS65XX_INS_WAI = 93
MOS65XX_INS_WDM = 94
MOS65XX_INS_XBA = 95
MOS65XX_INS_XCE = 96
MOS65XX_INS_ENDING = 97
MOS65XX_GRP_INVALID = 0
MOS65XX_GRP_JUMP = 1
MOS65XX_GRP_CALL = 2
MOS65XX_GRP_RET = 3
MOS65XX_GRP_INT = 4
MOS65XX_GRP_IRET = 5
MOS65XX_GRP_BRANCH_RELATIVE = 6
MOS65XX_GRP_ENDING = 7
MOS65XX_OP_INVALID = 0
MOS65XX_OP_REG = 1
MOS65XX_OP_IMM = 2
MOS65XX_OP_MEM = 3
| capstone-rust/capstone-rs | capstone-sys/capstone/bindings/python/capstone/mos65xx_const.py | Python | mit | 3,179 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/weapon/shared_reinforcement_core.iff"
result.attribute_template_id = -1
result.stfName("craft_weapon_ingredients_n","reinforcement_core")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/component/weapon/shared_reinforcement_core.py | Python | mit | 484 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.administrator_page),
url(r'^new_jd/$', views.new_jd_page),
url(r'^test_data/refresh/$', views.test_data_refresh),
url(r'^jd/(?P<jd_id>\d+)/delete/', views.delete_jd),
url(r'^jd/(?P<jd_id>\d+)/create/$', views.create_jd),
url(r'^new_ssh_key/$', views.new_ssh_key_page),
url(r'^ssh_key/(?P<ssh_key_id>\d+)/delete/', views.delete_ssh_key),
url(r'^ssh_key/(?P<ssh_key_id>\d+)/create/$', views.create_ssh_key),
url(r'^new_parameter/$', views.new_parameter_page),
url(r'^parameters/(?P<param_id>\d+)/delete/', views.delete_parameter),
url(r'^parameters/(?P<param_id>\d+)/create/$', views.create_parameter),
]
| v0devil/jltom | ltc/administrator/urls.py | Python | mit | 734 |
"""
Logger classes for the ZAP CLI.
.. moduleauthor:: Daniel Grunwell (grunny)
"""
import logging
import sys
from termcolor import colored
class ColorStreamHandler(logging.StreamHandler):
"""
StreamHandler that prints color. This is used by the console client.
"""
level_map = {
logging.DEBUG: ('magenta', ['bold']),
logging.INFO: ('cyan', ['bold']),
logging.WARNING: ('yellow', ['bold']),
logging.ERROR: ('red', ['bold']),
logging.CRITICAL: ('red', ['bold', 'reverse'])
}
@property
def is_tty(self):
"""is the stream a tty?"""
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty()
def emit(self, record):
colorize = 'console' in globals() and getattr(console, 'colorize', False)
if self.is_tty and colorize:
color, attr = self.level_map[record.levelno]
prefix = colored(str('[' + record.levelname + ']').ljust(18), color, attrs=attr)
if hasattr(record, 'highlight') and record.highlight:
record.msg = colored(record.msg, color, attrs=['bold', 'reverse'])
else:
prefix = str('[' + record.levelname + ']').ljust(18)
record.msg = prefix + record.msg
logging.StreamHandler.emit(self, record)
class ConsoleLogger(logging.Logger):
"""Log to the console with some color decorations."""
def __init__(self, name):
super(ConsoleLogger, self).__init__(name)
self.setLevel(logging.DEBUG)
self.addHandler(ColorStreamHandler(sys.stdout))
# Save the current logger
default_logger_class = logging.getLoggerClass()
# Console logging for CLI
logging.setLoggerClass(ConsoleLogger)
console = logging.getLogger('zap')
# Restore the previous logger
logging.setLoggerClass(default_logger_class)
| Grunny/zap-cli | zapcli/log.py | Python | mit | 1,840 |
from django.conf.urls import url
from waldur_mastermind.booking import views
def register_in(router):
router.register(
r'booking-resources', views.ResourceViewSet, basename='booking-resource'
)
router.register(
r'booking-offerings', views.OfferingViewSet, basename='booking-offering'
)
urlpatterns = [
url(
r'^api/marketplace-bookings/(?P<uuid>[a-f0-9]+)/$',
views.OfferingBookingsViewSet.as_view(),
),
]
| opennode/nodeconductor-assembly-waldur | src/waldur_mastermind/booking/urls.py | Python | mit | 466 |
from flask import Flask
app = Flask(__name__)
app.config.from_object('config')
| Akagi201/learning-python | flask/Flask-Script/test1/app/__init__.py | Python | mit | 80 |
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Examples for Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from nvd3 import discreteBarChart
#Open File for test
output_file = open('test_discreteBarChart.html', 'w')
type = "discreteBarChart"
chart = discreteBarChart(name='mygraphname', height=400, width=600)
chart.set_containerheader("\n\n<h2>" + type + "</h2>\n\n")
xdata = ["A", "B", "C", "D", "E", "F", "G"]
ydata = [3, 12, -10, 5, 25, -7, 2]
extra_serie = {"tooltip": {"y_start": "", "y_end": " cal"}}
chart.add_serie(y=ydata, x=xdata, extra=extra_serie)
chart.buildhtml()
output_file.write(chart.htmlcontent)
#---------------------------------------
#close Html file
output_file.close()
| mgx2/python-nvd3 | examples/discreteBarChart.py | Python | mit | 899 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Creature()
result.template = "object/mobile/shared_dressed_commoner_old_human_male_02.iff"
result.attribute_template_id = 9
result.stfName("npc_name","human_base_male")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/mobile/shared_dressed_commoner_old_human_male_02.py | Python | mit | 459 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
from config.template_middleware import TemplateResponse
from gaecookie.decorator import no_csrf
from gaepermission.decorator import login_not_required
from tekton import router
from tekton.gae.middleware.redirect import RedirectResponse
from models import Game, Quest
from forms import GameForm, QuestForm
@login_not_required
@no_csrf
def index():
contexto = {'criar_modelo': router.to_path(salvar)}
return TemplateResponse(contexto)
titulo = ''
def salvar(**propriedades):
global titulo
game_form = GameForm(**propriedades)
titulo = propriedades['tit']
erros = game_form.validate()
if erros:
contexto = {'criar_modelo': router.to_path(salvar),
'game': game_form,
'erros': erros}
return TemplateResponse(contexto, 'criar/form.html')
else:
jogo = game_form.fill_model()
jogo.put()
return RedirectResponse(router.to_path(continuar))
@login_not_required
@no_csrf
def continuar():
ctx = {'criar_jogo': router.to_path(inserir)}
return TemplateResponse(ctx, "/criar/criando.html")
def inserir(**propriedades):
quest_form = QuestForm(**propriedades)
erro = quest_form.validate()
if erro:
contexto={'criar_modelo': router.to_path(salvar),
'quest': quest_form,
'erro': erro}
return TemplateResponse(contexto, 'criar/criandoform.html')
else:
questao = Quest(**propriedades)
query = Game.query(Game.tit == titulo)
if query is not None:
jogos = query.fetch()
for j in jogos:
questao.jog.append(j.key)
questao.put()
return RedirectResponse(router.to_path(continuar))
| rodrigomalk/Novos-Bandeirantes | backend/appengine/routes/criar.py | Python | mit | 1,831 |
from .models import Framework
from .utils import get_json_from_request, json_has_required_keys, \
json_has_matching_id
from .validation import get_validation_errors
def validate_and_return_draft_request(draft_id=0):
json_payload = get_json_from_request()
json_has_required_keys(json_payload, ['services'])
if draft_id:
json_has_matching_id(json_payload['services'], draft_id)
return json_payload['services']
def get_draft_validation_errors(draft_json, lot,
framework_id=0, slug=None, required=None):
if not slug and not framework_id:
raise Exception('Validation requires either framework_id or slug')
if not slug:
framework = Framework.query.filter(
Framework.id == framework_id
).first()
slug = framework.slug
errs = get_validation_errors(
"services-{0}-{1}".format(slug, lot.lower()),
draft_json,
enforce_required=False,
required_fields=required
)
return errs
| mtekel/digitalmarketplace-api | app/draft_utils.py | Python | mit | 1,022 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Intangible()
result.template = "object/draft_schematic/clothing/shared_clothing_armor_bone_leggings.iff"
result.attribute_template_id = -1
result.stfName("string_id_table","")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/draft_schematic/clothing/shared_clothing_armor_bone_leggings.py | Python | mit | 466 |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2013-2017 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilentBase8590 import *
class agilentBase8590A(agilentBase8590):
"Agilent 8590A series IVI spectrum analyzer driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', '')
super(agilentBase8590A, self).__init__(*args, **kwargs)
self._identity_description = "Agilent 8590 series IVI spectrum analyzer driver"
self._identity_supported_instrument_models = ['8590A', '8590B', '8591A', '8592A', '8592B',
'8593A', '8594A', '8595A']
def _display_fetch_screenshot(self, format='bmp', invert=False):
if self._driver_operation_simulate:
return b''
#if format not in ScreenshotImageFormatMapping:
# raise ivi.ValueNotSupportedException()
#format = ScreenshotImageFormatMapping[format]
self._write("PRINT 1")
rtl = io.BytesIO(self._read_raw())
img = hprtl.parse_hprtl(rtl)
# rescale to get white background
# presuming background of (90, 88, 85)
img[:,:,0] *= 255/90
img[:,:,1] *= 255/88
img[:,:,2] *= 255/85
bmp = hprtl.generate_bmp(img)
return bmp
| alexforencich/python-ivi | ivi/agilent/agilentBase8590A.py | Python | mit | 2,338 |
import csv
import json
import sys
import traceback
from six.moves import StringIO
import requests
import mock
import gevent
from gevent import wsgi
from locust import web, runners, stats
from locust.runners import LocustRunner
from locust.main import parse_options
from .testcases import LocustTestCase
class TestWebUI(LocustTestCase):
def setUp(self):
super(TestWebUI, self).setUp()
stats.global_stats.clear_all()
parser = parse_options()[0]
options = parser.parse_args([])[0]
runners.locust_runner = LocustRunner([], options)
self._web_ui_server = wsgi.WSGIServer(('127.0.0.1', 0), web.app, log=None)
gevent.spawn(lambda: self._web_ui_server.serve_forever())
gevent.sleep(0.01)
self.web_port = self._web_ui_server.server_port
def tearDown(self):
super(TestWebUI, self).tearDown()
self._web_ui_server.stop()
def test_index(self):
self.assertEqual(200, requests.get("http://127.0.0.1:%i/" % self.web_port).status_code)
def test_stats_no_data(self):
self.assertEqual(200, requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).status_code)
def test_stats(self):
stats.global_stats.get("/test", "GET").log(120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port)
self.assertEqual(200, response.status_code)
data = json.loads(response.text)
self.assertEqual(2, len(data["stats"])) # one entry plus Total
self.assertEqual("/test", data["stats"][0]["name"])
self.assertEqual("GET", data["stats"][0]["method"])
self.assertEqual(120, data["stats"][0]["avg_response_time"])
def test_stats_cache(self):
stats.global_stats.get("/test", "GET").log(120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port)
self.assertEqual(200, response.status_code)
data = json.loads(response.text)
self.assertEqual(2, len(data["stats"])) # one entry plus Total
# add another entry
stats.global_stats.get("/test2", "GET").log(120, 5612)
data = json.loads(requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).text)
self.assertEqual(2, len(data["stats"])) # old value should be cached now
web.request_stats.clear_cache()
data = json.loads(requests.get("http://127.0.0.1:%i/stats/requests" % self.web_port).text)
self.assertEqual(3, len(data["stats"])) # this should no longer be cached
def test_request_stats_csv(self):
stats.global_stats.get("/test", "GET").log(120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/requests/csv" % self.web_port)
self.assertEqual(200, response.status_code)
def test_distribution_stats_csv(self):
stats.global_stats.get("/test", "GET").log(120, 5612)
response = requests.get("http://127.0.0.1:%i/stats/distribution/csv" % self.web_port)
self.assertEqual(200, response.status_code)
def test_exceptions_csv(self):
try:
raise Exception("Test exception")
except Exception as e:
tb = sys.exc_info()[2]
runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb)))
runners.locust_runner.log_exception("local", str(e), "".join(traceback.format_tb(tb)))
response = requests.get("http://127.0.0.1:%i/exceptions/csv" % self.web_port)
self.assertEqual(200, response.status_code)
reader = csv.reader(StringIO(response.text))
rows = []
for row in reader:
rows.append(row)
self.assertEqual(2, len(rows))
self.assertEqual("Test exception", rows[1][1])
self.assertEqual(2, int(rows[1][0]), "Exception count should be 2")
| pmdarrow/locust | locust/test/test_web.py | Python | mit | 3,957 |
"""
policy.py
Janbaanz Launde
Apr 1, 2017
"""
class Policy(object):
"""Abstract class for all policies"""
name = 'POLICY'
def __init__(self, contexts):
self.contexts = contexts
def predict_arm(self, contexts=None):
raise NotImplementedError("You need to override this function in child class.")
def pull_arm(self, arm, reward, contexts=None):
raise NotImplementedError("You need to override this function in child class.")
| rakshify/News_Recommender | policy/policy.py | Python | mit | 472 |
from toontown.toonbase import ToontownGlobals
from toontown.coghq import DistributedLevelBattleAI
from direct.directnotify import DirectNotifyGlobal
from direct.fsm import State
from direct.fsm import ClassicFSM, State
from toontown.battle.BattleBase import *
import CogDisguiseGlobals
from toontown.toonbase.ToontownBattleGlobals import getMintCreditMultiplier
from direct.showbase.PythonUtil import addListsByValue
class DistributedMintBattleAI(DistributedLevelBattleAI.DistributedLevelBattleAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedMintBattleAI')
def __init__(self, air, battleMgr, pos, suit, toonId, zoneId, level, battleCellId, roundCallback = None, finishCallback = None, maxSuits = 4):
DistributedLevelBattleAI.DistributedLevelBattleAI.__init__(self, air, battleMgr, pos, suit, toonId, zoneId, level, battleCellId, 'MintReward', roundCallback, finishCallback, maxSuits)
self.battleCalc.setSkillCreditMultiplier(1)
if self.bossBattle:
self.level.d_setBossConfronted(toonId)
self.fsm.addState(State.State('MintReward', self.enterMintReward, self.exitMintReward, ['Resume']))
playMovieState = self.fsm.getStateNamed('PlayMovie')
playMovieState.addTransition('MintReward')
def getTaskZoneId(self):
return self.level.mintId
def handleToonsWon(self, toons):
extraMerits = [0,
0,
0,
0]
amount = ToontownGlobals.MintCogBuckRewards[self.level.mintId]
index = ToontownGlobals.cogHQZoneId2deptIndex(self.level.mintId)
extraMerits[index] = amount
for toon in toons:
recovered, notRecovered = self.air.questManager.recoverItems(toon, self.suitsKilled, self.getTaskZoneId())
self.toonItems[toon.doId][0].extend(recovered)
self.toonItems[toon.doId][1].extend(notRecovered)
meritArray = self.air.promotionMgr.recoverMerits(toon, self.suitsKilled, self.getTaskZoneId(), getMintCreditMultiplier(self.getTaskZoneId()), extraMerits=extraMerits)
if toon.doId in self.helpfulToons:
self.toonMerits[toon.doId] = addListsByValue(self.toonMerits[toon.doId], meritArray)
else:
self.notify.debug('toon %d not helpful list, skipping merits' % toon.doId)
def enterMintReward(self):
self.joinableFsm.request('Unjoinable')
self.runableFsm.request('Unrunable')
self.resetResponses()
self.assignRewards()
self.bossDefeated = 1
self.level.setVictors(self.activeToons[:])
self.timer.startCallback(BUILDING_REWARD_TIMEOUT, self.serverRewardDone)
return None
def exitMintReward(self):
return None
def enterResume(self):
DistributedLevelBattleAI.DistributedLevelBattleAI.enterResume(self)
if self.bossBattle and self.bossDefeated:
self.battleMgr.level.b_setDefeated()
| ksmit799/Toontown-Source | toontown/coghq/DistributedMintBattleAI.py | Python | mit | 2,940 |
import numpy as np
import os
import sys
import os.path as op
import matplotlib as mpl
import mpl.pyplot as plt
import palettable.colorbrewer as pal
from datetime import datetime
from cycler import cycler
#plt.rc('axes', prop_cycle=cycler('color', pal.qualitative.Dark2_8.mpl_colors)+
# cycler('marker',['D','o','v','*','^','x','h','8']))
mpl.rcParams['lines.markersize'] = 10
mpl.rcParams['lines.linewidth'] = 3
thispath = op.abspath(op.dirname(__file__))
mpi = np.genfromtxt('MPICompare.txt')
heat = np.genfromtxt('HeatComplete.txt')
KSdiv = np.genfromtxt('Divides.txt')
KSall = np.genfromtxt('KSComplete.txt')
ylbl = "Time per timestep (us)"
xlbl = "Number of spatial points"
#mpi
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(14,8))
plt.suptitle("MPI and GPU performance",fontsize='large', fontweight="bold")
mpiLabels = ['MPIClassic', 'MPISwept', 'GPUClassic', 'GPUShared']
for i,mp in enumerate(mpiLabels):
ax1.loglog(mpi[:,0],mpi[:,i+1])
ax1.hold(True)
ax2.semilogx(mpi[:,0],mpi[:,-2],mpi[:,0],mpi[:,-1])
ax1.hold(True)
ax1.legend(mpiLabels, loc='upper left', fontsize='medium')
ax2.legend(["Classic", "Shared"], loc='upper left', fontsize='medium')
ax1.grid(alpha=0.5)
ax2.grid(alpha=0.5)
ax1.set_ylabel(ylbl)
ax2.set_ylabel("Speedup vs MPI")
ax1.set_xlabel(xlbl)
ax2.set_xlabel(xlbl)
plotfile = op.join(thispath,"mpiPlot.pdf")
ax1.set_xlim([heat[0,0],heat[-1,0]])
ax2.set_xlim([heat[0,0],heat[-1,0]])
fig.subplots_adjust(bottom=0.08, right=0.92, top=0.92)
plt.savefig(plotfile, bbox_inches='tight')
#KSdiv
divs = ["Divide","Multiply"]
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(14,8), sharey=True)
plt.suptitle("Improvement to KS from division avoidance",fontsize='large', fontweight="bold")
ax1.loglog(KSdiv[:,0],KSdiv[:,1], KSdiv[:,0], KSdiv[:,2])
ax1.set_title("Double Precision")
ax2.loglog(KSdiv[:,0],KSdiv[:,3], KSdiv[:,0], KSdiv[:,4])
ax2.set_title("Single Precision")
ax1.set_ylabel(ylbl)
ax1.set_xlabel(xlbl)
ax2.set_xlabel(xlbl)
ax1.set_xlim([heat[0,0],heat[-1,0]])
plt.legend(divs, loc='upper left', fontsize='medium')
ax1.grid(alpha=0.5)
ax2.grid(alpha=0.5)
plotfile = op.join(thispath,"divisionPlot.pdf")
ax2.set_xlim([heat[0,0],heat[-1,0]])
plt.savefig(plotfile, bbox_inches='tight')
#hand, lbl = ax.get_legend_handles_labels()
#Heat complete
prec = ["Double", "Single"]
ksorder = mpiLabels[2:]
heatorder = ['Classic', 'GPUShared', 'Hybrid']
ho=[prec[0]+" "+rd for rd in heatorder]+[prec[1]+" "+rd for rd in heatorder]
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(14,8))
plt.suptitle("Heat",fontsize='large', fontweight="bold")
ax1.loglog(heat[:,0],heat[:,1], heat[:,0], heat[:,2], heat[:,0], heat[:,3])
ax1.hold(True)
ax1.loglog(heat[:,0],heat[:,6], heat[:,0], heat[:,7], heat[:,0], heat[:,8])
ax1.legend(ho, loc='upper left', fontsize='medium')
ax1.set_ylabel(ylbl)
ax1.set_xlabel(xlbl)
ax1.set_xlim([heat[0,0],heat[-1,0]])
ho.pop(3)
ho.pop(0)
ax2.semilogx(heat[:,0],heat[:,4], heat[:,0], heat[:,5])
ax2.hold(True)
ax2.semilogx(heat[:,0],heat[:,9], heat[:,0], heat[:,10])
ax2.legend(ho, loc='upper right', fontsize='medium')
ax1.grid(alpha=0.5)
ax2.grid(alpha=0.5)
ax2.set_xlabel(xlbl)
ax2.set_ylabel("Speedup vs Classic")
fig.tight_layout(pad=0.2, w_pad=0.75, h_pad=1.5)
fig.subplots_adjust(bottom=0.08, right=0.92, top=0.92)
plotfile = op.join(thispath,"heatComplete.pdf")
ax2.set_xlim([heat[0,0],heat[-1,0]])
plt.savefig(plotfile, bbox_inches='tight')
reg = ["Register"]
ksorder += reg
#KS complete
ko=[prec[0]+" "+ rd for rd in ksorder]+[prec[1]+" "+ rd for rd in ksorder]
fig, (ax1,ax2) = plt.subplots(1,2, figsize=(14,8))
plt.suptitle("KS",fontsize='large', fontweight="bold")
ax1.loglog(KSall[:,0],KSall[:,1], KSall[:,0], KSall[:,2], KSall[:,0], KSall[:,3])
ax1.hold(True)
ax1.loglog(KSall[:,0],KSall[:,6], KSall[:,0], KSall[:,7], KSall[:,0], KSall[:,8])
ax1.legend(ko, loc='upper left', fontsize='medium')
ax1.set_ylabel(ylbl)
ax1.set_xlabel(xlbl)
ax1.set_xlim([heat[0,0],heat[-1,0]])
ko.pop(3)
ko.pop(0)
ax2.semilogx(KSall[:,0],KSall[:,4], KSall[:,0], KSall[:,5])
ax2.hold(True)
ax2.semilogx(KSall[:,0],KSall[:,9], KSall[:,0], KSall[:,10])
ax2.legend(ko, loc='upper right', fontsize='medium')
ax1.grid(alpha=0.5)
ax2.grid(alpha=0.5)
ax2.set_xlabel(xlbl)
ax2.set_ylabel("Speedup vs Classic")
fig.tight_layout(pad=0.2, w_pad=0.75, h_pad=1.0)
fig.subplots_adjust(bottom=0.08, right=0.92, top=0.92)
plotfile = op.join(thispath,"KSallComplete.pdf")
ax2.set_xlim([heat[0,0],heat[-1,0]])
plt.savefig(plotfile, bbox_inches='tight')
| OSUmageed/1DSweptCUDA | ResultPlots/ConferencePaper/Parsed/plotit.py | Python | mit | 4,498 |
"""holds locking functionality that works across processes"""
from __future__ import absolute_import, unicode_literals
from contextlib import contextmanager
import py
from filelock import FileLock, Timeout
from tox.reporter import verbosity1
@contextmanager
def hold_lock(lock_file, reporter=verbosity1):
py.path.local(lock_file.dirname).ensure(dir=1)
lock = FileLock(str(lock_file))
try:
try:
lock.acquire(0.0001)
except Timeout:
reporter("lock file {} present, will block until released".format(lock_file))
lock.acquire()
yield
finally:
lock.release(force=True)
def get_unique_file(path, prefix, suffix):
"""get a unique file in a folder having a given prefix and suffix,
with unique number in between"""
lock_file = path.join(".lock")
prefix = "{}-".format(prefix)
with hold_lock(lock_file):
max_value = -1
for candidate in path.listdir("{}*{}".format(prefix, suffix)):
try:
max_value = max(max_value, int(candidate.basename[len(prefix) : -len(suffix)]))
except ValueError:
continue
winner = path.join("{}{}{}".format(prefix, max_value + 1, suffix))
winner.ensure(dir=0)
return winner
| tox-dev/tox | src/tox/util/lock.py | Python | mit | 1,295 |
#/u/Goldensights
import praw
import time
import datetime
'''USER CONFIG'''
USERNAME = ""
#This is the bot's Username. In order to send mail, he must have some amount of Karma.
PASSWORD = ""
#This is the bot's Password.
USERAGENT = ""
#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot"
MAXPOSTS = 1000
#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time.
WAIT = 30
#This is how many seconds you will wait between cycles. The bot is completely inactive during this time.
PRINTFILE = 'messages.txt'
#This is the file, in the same directory as the .py file, where the messages are stored
SUBJECTLINE = "Newsletterly"
ITEMTYPE = 't4'
#The type of item to gather. t4 is a PM
'''All done!'''
WAITS = str(WAIT)
try:
import bot #This is a file in my python library which contains my Bot's username and password. I can push code to Git without showing credentials
USERNAME = bot.uG
PASSWORD = bot.pG
USERAGENT = bot.aG
except ImportError:
pass
r = praw.Reddit(USERAGENT)
r.login(USERNAME, PASSWORD)
def work():
unread = r.get_unread(limit=MAXPOSTS)
results = []
for message in unread:
if ITEMTYPE in message.fullname:
print(message.id, message.subject, end=" ")
if SUBJECTLINE.lower() in message.subject.lower():
print(message.body)
messagedate = datetime.datetime.utcfromtimestamp(message.created_utc)
messagedate = datetime.datetime.strftime(messagedate, "%B %d %Y %H:%M UTC")
results += [message.fullname + " : " + message.author.name, messagedate, message.body, "\n\n"]
else:
print()
message.mark_as_read()
logfile = open(PRINTFILE, "a")
for result in results:
print(result, file=logfile)
logfile.close()
while True:
try:
work()
except Exception as e:
print('An error has occured:', str(e))
print('Running again in ' + WAITS + ' seconds \n')
time.sleep(WAIT) | tehp/reddit | MessageArchiveSimple/messagearchivesimple.py | Python | mit | 1,943 |
# encoding: utf-8
from collections import namedtuple
import inspect
import keyword
import sys
try:
import unittest2 as unittest
except ImportError:
import unittest
try:
import jedi
has_jedi = True
except ImportError:
has_jedi = False
from bpython import autocomplete
from bpython._py3compat import py3
from bpython.test import mock
is_py34 = sys.version_info[:2] >= (3, 4)
if is_py34:
glob_function = 'glob.iglob'
else:
glob_function = 'glob.glob'
class TestSafeEval(unittest.TestCase):
def test_catches_syntax_error(self):
with self.assertRaises(autocomplete.EvaluationError):
autocomplete.safe_eval('1re', {})
class TestFormatters(unittest.TestCase):
def test_filename(self):
completer = autocomplete.FilenameCompletion()
last_part_of_filename = completer.format
self.assertEqual(last_part_of_filename('abc'), 'abc')
self.assertEqual(last_part_of_filename('abc/'), 'abc/')
self.assertEqual(last_part_of_filename('abc/efg'), 'efg')
self.assertEqual(last_part_of_filename('abc/efg/'), 'efg/')
self.assertEqual(last_part_of_filename('/abc'), 'abc')
self.assertEqual(last_part_of_filename('ab.c/e.f.g/'), 'e.f.g/')
def test_attribute(self):
self.assertEqual(autocomplete.after_last_dot('abc.edf'), 'edf')
def completer(matches):
mock_completer = autocomplete.BaseCompletionType()
mock_completer.matches = mock.Mock(return_value=matches)
return mock_completer
class TestGetCompleter(unittest.TestCase):
def test_no_completers(self):
self.assertTupleEqual(autocomplete.get_completer([], 0, ''),
([], None))
def test_one_completer_without_matches_returns_empty_list_and_none(self):
a = completer([])
self.assertTupleEqual(autocomplete.get_completer([a], 0, ''),
([], None))
def test_one_completer_returns_matches_and_completer(self):
a = completer(['a'])
self.assertTupleEqual(autocomplete.get_completer([a], 0, ''),
(['a'], a))
def test_two_completers_with_matches_returns_first_matches(self):
a = completer(['a'])
b = completer(['b'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), (['a'], a))
def test_first_non_none_completer_matches_are_returned(self):
a = completer([])
b = completer(['a'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), ([], None))
def test_only_completer_returns_None(self):
a = completer(None)
self.assertEqual(autocomplete.get_completer([a], 0, ''), ([], None))
def test_first_completer_returns_None(self):
a = completer(None)
b = completer(['a'])
self.assertEqual(autocomplete.get_completer([a, b], 0, ''), (['a'], b))
class TestCumulativeCompleter(unittest.TestCase):
def completer(self, matches, ):
mock_completer = autocomplete.BaseCompletionType()
mock_completer.matches = mock.Mock(return_value=matches)
return mock_completer
def test_no_completers_fails(self):
with self.assertRaises(ValueError):
autocomplete.CumulativeCompleter([])
def test_one_empty_completer_returns_empty(self):
a = self.completer([])
cumulative = autocomplete.CumulativeCompleter([a])
self.assertEqual(cumulative.matches(3, 'abc'), set())
def test_one_none_completer_returns_none(self):
a = self.completer(None)
cumulative = autocomplete.CumulativeCompleter([a])
self.assertEqual(cumulative.matches(3, 'abc'), None)
def test_two_completers_get_both(self):
a = self.completer(['a'])
b = self.completer(['b'])
cumulative = autocomplete.CumulativeCompleter([a, b])
self.assertEqual(cumulative.matches(3, 'abc'), set(['a', 'b']))
class TestFilenameCompletion(unittest.TestCase):
def setUp(self):
self.completer = autocomplete.FilenameCompletion()
def test_locate_fails_when_not_in_string(self):
self.assertEqual(self.completer.locate(4, "abcd"), None)
def test_locate_succeeds_when_in_string(self):
self.assertEqual(self.completer.locate(4, "a'bc'd"), (2, 4, 'bc'))
def test_issue_491(self):
self.assertNotEqual(self.completer.matches(9, '"a[a.l-1]'), None)
@mock.patch(glob_function, new=lambda text: [])
def test_match_returns_none_if_not_in_string(self):
self.assertEqual(self.completer.matches(2, 'abcd'), None)
@mock.patch(glob_function, new=lambda text: [])
def test_match_returns_empty_list_when_no_files(self):
self.assertEqual(self.completer.matches(2, '"a'), set())
@mock.patch(glob_function, new=lambda text: ['abcde', 'aaaaa'])
@mock.patch('os.path.expanduser', new=lambda text: text)
@mock.patch('os.path.isdir', new=lambda text: False)
@mock.patch('os.path.sep', new='/')
def test_match_returns_files_when_files_exist(self):
self.assertEqual(sorted(self.completer.matches(2, '"x')),
['aaaaa', 'abcde'])
@mock.patch(glob_function, new=lambda text: ['abcde', 'aaaaa'])
@mock.patch('os.path.expanduser', new=lambda text: text)
@mock.patch('os.path.isdir', new=lambda text: True)
@mock.patch('os.path.sep', new='/')
def test_match_returns_dirs_when_dirs_exist(self):
self.assertEqual(sorted(self.completer.matches(2, '"x')),
['aaaaa/', 'abcde/'])
@mock.patch(glob_function,
new=lambda text: ['/expand/ed/abcde', '/expand/ed/aaaaa'])
@mock.patch('os.path.expanduser',
new=lambda text: text.replace('~', '/expand/ed'))
@mock.patch('os.path.isdir', new=lambda text: False)
@mock.patch('os.path.sep', new='/')
def test_tilde_stays_pretty(self):
self.assertEqual(sorted(self.completer.matches(4, '"~/a')),
['~/aaaaa', '~/abcde'])
@mock.patch('os.path.sep', new='/')
def test_formatting_takes_just_last_part(self):
self.assertEqual(self.completer.format('/hello/there/'), 'there/')
self.assertEqual(self.completer.format('/hello/there'), 'there')
class MockNumPy(object):
"""This is a mock numpy object that raises an error when there is an atempt
to convert it to a boolean."""
def __nonzero__(self):
raise ValueError("The truth value of an array with more than one "
"element is ambiguous. Use a.any() or a.all()")
class TestDictKeyCompletion(unittest.TestCase):
def test_set_of_keys_returned_when_matches_found(self):
com = autocomplete.DictKeyCompletion()
local = {'d': {"ab": 1, "cd": 2}}
self.assertSetEqual(com.matches(2, "d[", locals_=local),
set(["'ab']", "'cd']"]))
def test_none_returned_when_eval_error(self):
com = autocomplete.DictKeyCompletion()
local = {'e': {"ab": 1, "cd": 2}}
self.assertEqual(com.matches(2, "d[", locals_=local), None)
def test_none_returned_when_not_dict_type(self):
com = autocomplete.DictKeyCompletion()
local = {'l': ["ab", "cd"]}
self.assertEqual(com.matches(2, "l[", locals_=local), None)
def test_none_returned_when_no_matches_left(self):
com = autocomplete.DictKeyCompletion()
local = {'d': {"ab": 1, "cd": 2}}
self.assertEqual(com.matches(3, "d[r", locals_=local), None)
def test_obj_that_does_not_allow_conversion_to_bool(self):
com = autocomplete.DictKeyCompletion()
local = {'mNumPy': MockNumPy()}
self.assertEqual(com.matches(7, "mNumPy[", locals_=local), None)
class Foo(object):
a = 10
def __init__(self):
self.b = 20
def method(self, x):
pass
class OldStyleFoo:
a = 10
def __init__(self):
self.b = 20
def method(self, x):
pass
skip_old_style = unittest.skipIf(py3,
'In Python 3 there are no old style classes')
class Properties(Foo):
@property
def asserts_when_called(self):
raise AssertionError("getter method called")
class Slots(object):
__slots__ = ['a', 'b']
class TestAttrCompletion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.com = autocomplete.AttrCompletion()
def test_att_matches_found_on_instance(self):
self.assertSetEqual(self.com.matches(2, 'a.', locals_={'a': Foo()}),
set(['a.method', 'a.a', 'a.b']))
@skip_old_style
def test_att_matches_found_on_old_style_instance(self):
self.assertSetEqual(self.com.matches(2, 'a.',
locals_={'a': OldStyleFoo()}),
set(['a.method', 'a.a', 'a.b']))
self.assertIn(u'a.__dict__',
self.com.matches(4, 'a.__',
locals_={'a': OldStyleFoo()}))
@skip_old_style
def test_att_matches_found_on_old_style_class_object(self):
self.assertIn(u'A.__dict__',
self.com.matches(4, 'A.__', locals_={'A': OldStyleFoo}))
@skip_old_style
def test_issue536(self):
class OldStyleWithBrokenGetAttr:
def __getattr__(self, attr):
raise Exception()
locals_ = {'a': OldStyleWithBrokenGetAttr()}
self.assertIn(u'a.__module__',
self.com.matches(4, 'a.__', locals_=locals_))
def test_descriptor_attributes_not_run(self):
com = autocomplete.AttrCompletion()
self.assertSetEqual(com.matches(2, 'a.', locals_={'a': Properties()}),
set(['a.b', 'a.a', 'a.method',
'a.asserts_when_called']))
def test_slots_not_crash(self):
com = autocomplete.AttrCompletion()
self.assertSetEqual(com.matches(2, 'A.', locals_={'A': Slots}),
set(['A.b', 'A.a', 'A.mro']))
class TestExpressionAttributeCompletion(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.com = autocomplete.ExpressionAttributeCompletion()
def test_att_matches_found_on_instance(self):
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': [Foo()]}),
set(['method', 'a', 'b']))
@skip_old_style
def test_att_matches_found_on_old_style_instance(self):
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': [OldStyleFoo()]}),
set(['method', 'a', 'b']))
def test_other_getitem_methods_not_called(self):
class FakeList(object):
def __getitem__(inner_self, i):
self.fail("possibly side-effecting __getitem_ method called")
self.com.matches(5, 'a[0].', locals_={'a': FakeList()})
def test_tuples_complete(self):
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': (Foo(),)}),
set(['method', 'a', 'b']))
@unittest.skip('TODO, subclasses do not complete yet')
def test_list_subclasses_complete(self):
class ListSubclass(list):
pass
self.assertSetEqual(self.com.matches(5, 'a[0].',
locals_={'a': ListSubclass([Foo()])}),
set(['method', 'a', 'b']))
def test_getitem_not_called_in_list_subclasses_overriding_getitem(self):
class FakeList(list):
def __getitem__(inner_self, i):
self.fail("possibly side-effecting __getitem_ method called")
self.com.matches(5, 'a[0].', locals_={'a': FakeList()})
def test_literals_complete(self):
self.assertSetEqual(self.com.matches(10, '[a][0][0].',
locals_={'a': (Foo(),)}),
set(['method', 'a', 'b']))
def test_dictionaries_complete(self):
self.assertSetEqual(self.com.matches(7, 'a["b"].',
locals_={'a': {'b': Foo()}}),
set(['method', 'a', 'b']))
class TestMagicMethodCompletion(unittest.TestCase):
def test_magic_methods_complete_after_double_underscores(self):
com = autocomplete.MagicMethodCompletion()
block = "class Something(object)\n def __"
self.assertSetEqual(com.matches(10, ' def __', current_block=block),
set(autocomplete.MAGIC_METHODS))
Comp = namedtuple('Completion', ['name', 'complete'])
@unittest.skipUnless(has_jedi, "jedi required")
class TestMultilineJediCompletion(unittest.TestCase):
def test_returns_none_with_single_line(self):
com = autocomplete.MultilineJediCompletion()
self.assertEqual(com.matches(2, 'Va', current_block='Va', history=[]),
None)
def test_returns_non_with_blank_second_line(self):
com = autocomplete.MultilineJediCompletion()
self.assertEqual(com.matches(0, '', current_block='class Foo():\n',
history=['class Foo():']), None)
def matches_from_completions(self, cursor, line, block, history,
completions):
with mock.patch('bpython.autocomplete.jedi.Script') as Script:
script = Script.return_value
script.completions.return_value = completions
com = autocomplete.MultilineJediCompletion()
return com.matches(cursor, line, current_block=block,
history=history)
def test_completions_starting_with_different_letters(self):
matches = self.matches_from_completions(
2, ' a', 'class Foo:\n a', ['adsf'],
[Comp('Abc', 'bc'), Comp('Cbc', 'bc')])
self.assertEqual(matches, None)
def test_completions_starting_with_different_cases(self):
matches = self.matches_from_completions(
2, ' a', 'class Foo:\n a', ['adsf'],
[Comp('Abc', 'bc'), Comp('ade', 'de')])
self.assertSetEqual(matches, set(['ade']))
@unittest.skipUnless(is_py34, 'asyncio required')
def test_issue_544(self):
com = autocomplete.MultilineJediCompletion()
code = '@asyncio.coroutine\ndef'
history = ('import asyncio', '@asyncio.coroutin')
com.matches(3, 'def', current_block=code, history=history)
class TestGlobalCompletion(unittest.TestCase):
def setUp(self):
self.com = autocomplete.GlobalCompletion()
def test_function(self):
def function():
pass
self.assertEqual(self.com.matches(8, 'function',
locals_={'function': function}),
set(('function(', )))
def test_completions_are_unicode(self):
for m in self.com.matches(1, 'a', locals_={'abc': 10}):
self.assertIsInstance(m, type(u''))
@unittest.skipIf(py3, "in Python 3 invalid identifiers are passed through")
def test_ignores_nonascii_encodable(self):
self.assertEqual(self.com.matches(3, 'abc', locals_={'abcß': 10}),
None)
def test_mock_kwlist(self):
with mock.patch.object(keyword, 'kwlist', new=['abcd']):
self.assertEqual(self.com.matches(3, 'abc', locals_={}), None)
def test_mock_kwlist_non_ascii(self):
with mock.patch.object(keyword, 'kwlist', new=['abcß']):
self.assertEqual(self.com.matches(3, 'abc', locals_={}), None)
class TestParameterNameCompletion(unittest.TestCase):
def test_set_of_params_returns_when_matches_found(self):
def func(apple, apricot, banana, carrot):
pass
if py3:
argspec = list(inspect.getfullargspec(func))
else:
argspec = list(inspect.getargspec(func))
argspec = ["func", argspec, False]
com = autocomplete.ParameterNameCompletion()
self.assertSetEqual(com.matches(1, "a", argspec=argspec),
set(['apple=', 'apricot=']))
self.assertSetEqual(com.matches(2, "ba", argspec=argspec),
set(['banana=']))
self.assertSetEqual(com.matches(3, "car", argspec=argspec),
set(['carrot=']))
| MarkWh1te/xueqiu_predict | python3_env/lib/python3.4/site-packages/bpython/test/test_autocomplete.py | Python | mit | 16,423 |
"""
"""
import argparse
parser = argparse.ArgumentParser(
prog="portainer",
fromfile_prefix_chars="@"
)
subparsers = parser.add_subparsers()
def subcommand(name, callback=None):
"""A decorator for main functions to add themselves as subcommands."""
def decorator(fn):
subparser = subparsers.add_parser(name)
subparser.set_defaults(_fn=fn, _name=name, _parser=subparser)
if callback:
callback(subparser)
return fn
return decorator
| duedil-ltd/portainer | portainer/app/__init__.py | Python | mit | 504 |