code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
import factory
from .models import (FKDummyModel, O2ODummyModel, BaseModel, ManyToManyToBaseModel,
ForeignKeyToBaseModel, OneToOneToBaseModel, ClassLevel1, ClassLevel2, ClassLevel3,
ManyToManyToBaseModelWithRelatedName, ChildModel, SubClassOfBaseModel)
class FKDummyModelFactory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda x: "FKDummyModelName#{number}".format(number=str(x)))
class Meta:
model = FKDummyModel
class O2ODummyModelFactory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda x: "O2ODummyModelName#{number}".format(number=str(x)))
class Meta:
model = O2ODummyModel
class BaseModelFactory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda x: "BaseModelName#{number}".format(number=str(x)))
fkey = factory.SubFactory(FKDummyModelFactory)
o2o = factory.SubFactory(O2ODummyModelFactory)
class Meta:
model = BaseModel
class SubClassOfBaseModelFactory(BaseModelFactory):
class Meta:
model = SubClassOfBaseModel
class ManyToManyToBaseModelFactory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda x: "MaynyToManyToBaseModelName#{number}".format(number=str(x)))
class Meta:
model = ManyToManyToBaseModel
@factory.post_generation
def base_models(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for base_model in extracted:
self.m2m.add(base_model)
class ManyToManyToBaseModelWithRelatedNameFactory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda x: "MaynyToManyToBaseModelName#{number}".format(number=str(x)))
class Meta:
model = ManyToManyToBaseModelWithRelatedName
@factory.post_generation
def base_models(self, create, extracted, **kwargs):
if not create:
return
if extracted:
for base_model in extracted:
self.m2m.add(base_model)
class ForeignKeyToBaseModelFactory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda x: "ForeignKeyToBseModelName#{number}".format(number=str(x)))
fkeyto = factory.SubFactory(BaseModelFactory)
class Meta:
model = ForeignKeyToBaseModel
class OneToOneToBaseModelFactory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda x: "OneToOneToBaseModelName#{number}".format(number=str(x)))
o2oto = factory.SubFactory(BaseModelFactory)
class Meta:
model = OneToOneToBaseModel
class ClassLevel1Factory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda x: "ClassLevel1#{number}".format(number=str(x)))
class Meta:
model = ClassLevel1
class ClassLevel2Factory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda x: "ClassLevel2#{number}".format(number=str(x)))
fkey = factory.SubFactory(ClassLevel1Factory)
class Meta:
model = ClassLevel2
class ClassLevel3Factory(factory.django.DjangoModelFactory):
name = factory.Sequence(lambda x: "ClassLevel3#{number}".format(number=str(x)))
fkey = factory.SubFactory(ClassLevel2Factory)
class Meta:
model = ClassLevel3
class ChildModelFactory(BaseModelFactory):
child_field = factory.Sequence(lambda x: "ChildField#{number}".format(number=str(x)))
class Meta:
model = ChildModel
| iwoca/django-deep-collector | tests/factories.py | Python | bsd-3-clause | 3,407 |
import io
import os
import sys
import codecs
import contextlib
# We do not trust traditional unixes about having reliable file systems.
# In that case we know better than what the env says and declare this to
# be utf-8 always.
has_likely_buggy_unicode_filesystem = \
sys.platform.startswith('linux') or 'bsd' in sys.platform
def is_ascii_encoding(encoding):
"""Given an encoding this figures out if the encoding is actually ASCII
(which is something we don't actually want in most cases). This is
necessary because ASCII comes under many names such as ANSI_X3.4-1968.
"""
if encoding is None:
return False
try:
codec = codecs.lookup(encoding)
except LookupError:
return False
return codec.name == 'ascii'
def get_filesystem_encoding():
"""Returns the filesystem encoding that should be used. Note that
this is different from the Python understanding of the filesystem
encoding which might be deeply flawed. Do not use this value against
Python's unicode APIs because it might be different.
The concept of a filesystem encoding in generally is not something
you should rely on. As such if you ever need to use this function
except for writing wrapper code reconsider.
"""
if has_likely_buggy_unicode_filesystem:
return 'utf-8'
rv = sys.getfilesystemencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
def get_file_encoding(for_writing=False):
"""Returns the encoding for text file data. This is always the same
on all operating systems because this is the only thing that makes
sense when wanting to make data exchange feasible. This is utf-8 no
questions asked. The only simplification is that if a file is opened
for reading then we allo utf-8-sig.
"""
if for_writing:
return 'utf-8'
return 'utf-8-sig'
def get_std_stream_encoding():
"""Returns the default stream encoding if not found."""
rv = sys.getdefaultencoding()
if is_ascii_encoding(rv):
return 'utf-8'
return rv
class BrokenEnvironment(Exception):
"""This error is raised on Python 3 if the system was malconfigured
beyond repair.
"""
class _NonClosingTextIOWrapper(io.TextIOWrapper):
"""Subclass of the wrapper that does not close the underlying file
in the destructor. This is necessary so that our wrapping of the
standard streams does not accidentally close the original file.
"""
def __del__(self):
pass
class _FixupStream(object):
"""The new io interface needs more from streams than streams
traditionally implement. As such this fixup stuff is necessary in
some circumstances.
"""
def __init__(self, stream):
self._stream = stream
def __getattr__(self, name):
return getattr(self._stream, name)
def readable(self):
x = getattr(self._stream, 'readable', None)
if x is not None:
return x
try:
self._stream.read(0)
except Exception:
return False
return True
def writable(self):
x = getattr(self._stream, 'writable', None)
if x is not None:
return x
try:
self._stream.write('')
except Exception:
try:
self._stream.write(b'')
except Exception:
return False
return True
def seekable(self):
x = getattr(self._stream, 'seekable', None)
if x is not None:
return x
try:
self._stream.seek(self._stream.tell())
except Exception:
return False
return True
PY2 = sys.version_info[0] == 2
if PY2:
import StringIO
text_type = unicode
TextIO = io.StringIO
BytesIO = io.BytesIO
NativeIO = StringIO.StringIO
def _make_text_stream(stream, encoding, errors):
if encoding is None:
encoding = get_std_stream_encoding()
if errors is None:
errors = 'replace'
return _NonClosingTextIOWrapper(_FixupStream(stream), encoding, errors)
def get_binary_stdin():
return sys.stdin
def get_binary_stdout():
return sys.stdout
def get_binary_stderr():
return sys.stderr
def get_binary_argv():
return list(sys.argv)
def get_text_stdin(encoding=None, errors=None):
return _make_text_stream(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
return _make_text_stream(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
return _make_text_stream(sys.stderr, encoding, errors)
@contextlib.contextmanager
def wrap_standard_stream(stream_type, stream):
if stream_type not in ('stdin', 'stdout', 'stderr'):
raise TypeError('Invalid stream %s' % stream_type)
old_stream = getattr(sys, stream_type)
setattr(sys, stream_type, stream)
try:
yield stream
finally:
setattr(sys, stream_type, old_stream)
@contextlib.contextmanager
def capture_stdout(and_stderr=False):
stream = StringIO.StringIO()
old_stdout = sys.stdout
old_stderr = sys.stderr
sys.stdout = stream
if and_stderr:
sys.stderr = stream
try:
yield stream
finally:
sys.stdout = old_stdout
if and_stderr:
sys.stderr = old_stderr
binary_env = os.environ
else:
text_type = str
TextIO = io.StringIO
BytesIO = io.BytesIO
NativeIO = io.StringIO
def _is_binary_reader(stream, default=False):
try:
return isinstance(stream.read(0), bytes)
except Exception:
return default
# This happens in some cases where the stream was already
# closed. In this case we assume the defalt.
def _is_binary_writer(stream, default=False):
try:
stream.write(b'')
except Exception:
try:
stream.write('')
return False
except Exception:
pass
return default
return True
def _find_binary_reader(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
is_binary = _is_binary_reader(stream, False)
if is_binary:
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here, this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _find_binary_writer(stream):
# We need to figure out if the given stream is already binary.
# This can happen because the official docs recommend detatching
# the streams to get binary streams. Some code might do this, so
# we need to deal with this case explicitly.
if _is_binary_writer(stream, False):
return stream
buf = getattr(stream, 'buffer', None)
# Same situation here, this time we assume that the buffer is
# actually binary in case it's closed.
if buf is not None and _is_binary_reader(buf, True):
return buf
def _stream_is_misconfigured(stream):
"""A stream is misconfigured if it's encoding is ASCII."""
return is_ascii_encoding(getattr(stream, 'encoding', None))
def _wrap_stream_for_text(stream, encoding, errors):
if errors is None:
errors = 'replace'
if encoding is None:
encoding = get_std_stream_encoding()
return _NonClosingTextIOWrapper(_FixupStream(stream), encoding, errors)
def _is_compatible_text_stream(stream, encoding, errors):
stream_encoding = getattr(stream, 'encoding', None)
stream_errors = getattr(stream, 'errors', None)
# Perfect match.
if stream_encoding == encoding and stream_errors == errors:
return True
# Otherwise it's only a compatible stream if we did not ask for
# an encoding.
if encoding is None:
return stream_encoding is not None
return False
def _force_correct_text_reader(text_reader, encoding, errors):
if _is_binary_reader(text_reader, False):
binary_reader = text_reader
else:
# If there is no target encoding set we need to verify that the
# reader is actually not misconfigured.
if encoding is None and not _stream_is_misconfigured(text_reader):
return text_reader
if _is_compatible_text_stream(text_reader, encoding, errors):
return text_reader
# If the reader has no encoding we try to find the underlying
# binary reader for it. If that fails because the environment is
# misconfigured, we silently go with the same reader because this
# is too common to happen. In that case mojibake is better than
# exceptions.
binary_reader = _find_binary_reader(text_reader)
if binary_reader is None:
return text_reader
# At this point we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _wrap_stream_for_text(binary_reader, encoding, errors)
def _force_correct_text_writer(text_writer, encoding, errors):
if _is_binary_writer(text_writer, False):
binary_writer = text_writer
else:
# If there is no target encoding set we need to verify that the
# writer is actually not misconfigured.
if encoding is None and not _stream_is_misconfigured(text_writer):
return text_writer
if _is_compatible_text_stream(text_writer, encoding, errors):
return text_writer
# If the writer has no encoding we try to find the underlying
# binary writer for it. If that fails because the environment is
# misconfigured, we silently go with the same writer because this
# is too common to happen. In that case mojibake is better than
# exceptions.
binary_writer = _find_binary_writer(text_writer)
if binary_writer is None:
return text_writer
# At this point we default the errors to replace instead of strict
# because nobody handles those errors anyways and at this point
# we're so fundamentally fucked that nothing can repair it.
if errors is None:
errors = 'replace'
return _wrap_stream_for_text(binary_writer, encoding, errors)
def get_binary_stdin():
reader = _find_binary_reader(sys.stdin)
if reader is None:
raise BrokenEnvironment('Was not able to determine binary '
'stream for sys.stdin.')
return reader
def get_binary_stdout():
writer = _find_binary_writer(sys.stdout)
if writer is None:
raise BrokenEnvironment('Was not able to determine binary '
'stream for sys.stdout.')
return writer
def get_binary_stderr():
writer = _find_binary_writer(sys.stderr)
if writer is None:
raise BrokenEnvironment('Was not able to determine binary '
'stream for sys.stderr.')
return writer
def get_text_stdin(encoding=None, errors=None):
return _force_correct_text_reader(sys.stdin, encoding, errors)
def get_text_stdout(encoding=None, errors=None):
return _force_correct_text_writer(sys.stdout, encoding, errors)
def get_text_stderr(encoding=None, errors=None):
return _force_correct_text_writer(sys.stderr, encoding, errors)
def get_binary_argv():
fs_enc = sys.getfilesystemencoding()
return [x.encode(fs_enc, 'surrogateescape') for x in sys.argv]
binary_env = os.environb
@contextlib.contextmanager
def wrap_standard_stream(stream_type, stream):
old_stream = getattr(sys, stream_type, None)
if stream_type == 'stdin':
if _is_binary_reader(stream):
raise TypeError('Standard input stream cannot be set to a '
'binary reader directly.')
if _find_binary_reader(stream) is None:
raise TypeError('Standard input stream needs to be backed '
'by a binary stream.')
elif stream_type in ('stdout', 'stderr'):
if _is_binary_writer(stream):
raise TypeError('Standard output stream cannot be set to a '
'binary writer directly.')
if _find_binary_writer(stream) is None:
raise TypeError('Standard output and error streams need '
'to be backed by a binary streams.')
else:
raise TypeError('Invalid stream %s' % stream_type)
setattr(sys, stream_type, stream)
try:
yield old_stream
finally:
setattr(sys, stream_type, old_stream)
class _CapturedStream(object):
"""A helper that flushes before getvalue() to fix a few oddities
on Python 3.
"""
def __init__(self, stream):
self._stream = stream
def __getattr__(self, name):
return getattr(self._stream, name)
def getvalue(self):
self._stream.flush()
return self._stream.buffer.getvalue()
def __repr__(self):
return repr(self._stream)
@contextlib.contextmanager
def capture_stdout(and_stderr=False):
"""Captures stdout and yields the new bytes stream that backs it.
It also wraps it in a fake object that flushes on getting the
underlying value.
"""
ll_stream = io.BytesIO()
stream = _NonClosingTextIOWrapper(ll_stream, sys.stdout.encoding,
sys.stdout.errors)
old_stdout = sys.stdout
sys.stdout = stream
if and_stderr:
old_stderr = sys.stderr
sys.stderr = stream
try:
yield _CapturedStream(stream)
finally:
stream.flush()
sys.stdout = old_stdout
if and_stderr:
sys.stderr = old_stderr
def _fixup_path(path):
if has_likely_buggy_unicode_filesystem \
and isinstance(path, text_type):
if PY2:
path = path.encode(get_filesystem_encoding())
else:
path = path.encode(get_filesystem_encoding(),
'surrogateescape')
return path
def open(filename, mode='r', encoding=None, errors=None):
"""Opens a file either in text or binary mode. The encoding for the
file is automatically detected.
"""
filename = _fixup_path(filename)
if 'b' not in mode:
encoding = get_file_encoding('w' in mode)
if encoding is not None:
return io.open(filename, mode, encoding=encoding, errors=errors)
return io.open(filename, mode)
| mitsuhiko/python-unio | unio.py | Python | bsd-3-clause | 15,682 |
# -*- coding: utf-8 -*-
import wx
from copy import copy
sWhitespace = ' \t\n'
def SplitAndKeep(string, splitchars = " \t\n"):
substrs = []
i = 0
while len(string) > 0:
if string[i] in splitchars:
substrs.append(string[:i])
substrs.append(string[i])
string = string[i+1:]
i = 0
else:
i += 1
if i >= len(string):
substrs.append(string)
break
return substrs
class AutowrappedStaticText(wx.StaticText):
"""A StaticText-like widget which implements word wrapping."""
def __init__(self, *args, **kwargs):
wx.StaticText.__init__(self, *args, **kwargs)
self.label = super(AutowrappedStaticText, self).GetLabel()
self.pieces = SplitAndKeep(self.label, sWhitespace)
self.Bind(wx.EVT_SIZE, self.OnSize)
self.lastWrap = None
self.Wrap()
def SetLabel(self, newLabel):
"""Store the new label and recalculate the wrapped version."""
self.label = newLabel
self.pieces = SplitAndKeep(self.label, sWhitespace)
self.Wrap()
def GetLabel(self):
"""Returns the label (unwrapped)."""
return self.label
def Wrap(self):
"""Wraps the words in label."""
maxWidth = self.GetParent().GetVirtualSizeTuple()[0] - 10
#TODO: Fix this so that we're not wasting cycles, but so that it actually works
#if self.lastWrap and self.lastWrap == maxWidth:
# return
self.lastWrap = maxWidth
pieces = copy(self.pieces)
lines = []
currentLine = []
currentString = ""
while len(pieces) > 0:
nextPiece = pieces.pop(0)
newString = currentString + nextPiece
newWidth = self.GetTextExtent(newString)[0]
currentPieceCount = len(currentLine)
if (currentPieceCount > 0 and newWidth > maxWidth) or nextPiece == '\n':
if currentPieceCount > 0 and currentLine[-1] in sWhitespace:
currentLine = currentLine[:-1]
if nextPiece in sWhitespace:
pieces = pieces[1:]
currentLine.append('\n')
lines.extend(currentLine)
currentLine = [nextPiece]
currentString = nextPiece
else:
currentString += nextPiece
currentLine.append(nextPiece)
lines.extend(currentLine)
line = "".join(lines)
super(AutowrappedStaticText, self).SetLabel(line)
self.Refresh()
def OnSize(self, event):
self.Wrap()
| arthurljones/bikechurch-signin-python | src/controls/autowrapped_static_text.py | Python | bsd-3-clause | 2,223 |
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from cms.extensions import PageExtensionAdmin, TitleExtensionAdmin
from django.conf import settings
from django.contrib import admin
from django.utils.translation import ugettext_lazy as _
from .forms import TitleMetaAdminForm
from .models import PageMeta, TitleMeta
class PageMetaAdmin(PageExtensionAdmin):
raw_id_fields = ('og_author',)
fieldsets = (
(None, {'fields': ('image',)}),
(_('OpenGraph'), {
'fields': (
'og_type', ('og_author', 'og_author_url', 'og_author_fbid'),
('og_publisher', 'og_app_id')
),
'classes': ('collapse',)
}),
(_('Twitter Cards'), {
'fields': ('twitter_type', 'twitter_author'),
'classes': ('collapse',)
}),
(_('Google+ Snippets'), {
'fields': ('gplus_type', 'gplus_author'),
'classes': ('collapse',)
}),
)
class Media:
css = {
'all': ('%sdjangocms_page_meta/css/%s' % (
settings.STATIC_URL, 'djangocms_page_meta_admin.css'),)
}
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
admin.site.register(PageMeta, PageMetaAdmin)
class TitleMetaAdmin(TitleExtensionAdmin):
form = TitleMetaAdminForm
class Media:
css = {
'all': ('%sdjangocms_page_meta/css/%s' % (
settings.STATIC_URL, 'djangocms_page_meta_admin.css'),)
}
def get_model_perms(self, request):
"""
Return empty perms dict thus hiding the model from admin index.
"""
return {}
admin.site.register(TitleMeta, TitleMetaAdmin)
| Formlabs/djangocms-page-meta | djangocms_page_meta/admin.py | Python | bsd-3-clause | 1,842 |
#import pytest
#import sys
import test.api as api
#import bauble.db as db
from bauble.model.user import User
def xtest_user_json(session):
username = 'test_user_json'
password = username
users = session.query(User).filter_by(username=username)
for user in users:
session.delete(user)
session.commit()
user = User(username=username, password=password)
session.add(user)
session.commit()
def xtest_get_schema():
schema = api.get_resource("/user/schema")
def xtest_resource(session):
"""
Test the server properly /family resources
"""
return
db.set_session_schema(session, session.merge(organization).pg_schema)
families = session.query(Family)
# create a family family
first_family = api.create_resource('/family', {'family': api.get_random_name()})
# get the family
first_family = api.get_resource(first_family['ref'])
# query for families
response_json = api.query_resource('/family', q=second_family['family'])
second_family = response_json[0] # we're assuming there's only one
assert second_family['ref'] == second_ref
# delete the created resources
api.delete_resource(first_family['ref'])
api.delete_resource(second_family['ref'])
def test_password(session):
username = api.get_random_name()
email = username + '@bauble.io'
password = api.get_random_name()
user = User(email=email, username=username, password=password)
session.add(user)
session.commit()
# test the password isn't stored in plain text
assert user._password != password
# test that we can compare the password against a plain test password
assert user.password == password
session.delete(user)
session.commit()
| Bauble/bauble.api | test/spec/test_user.py | Python | bsd-3-clause | 1,763 |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
#
# Copyright 2021 The NiPreps Developers <nipreps@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# We support and encourage derived works from this project, please read
# about our expectations at
#
# https://www.nipreps.org/community/licensing/
#
"""Interfaces for handling BIDS-like neuroimaging structures."""
from collections import defaultdict
from json import dumps, loads
from pathlib import Path
from shutil import copytree, rmtree
from pkg_resources import resource_filename as _pkgres
import re
import nibabel as nb
import numpy as np
from nipype import logging
from nipype.interfaces.base import (
traits,
isdefined,
Undefined,
TraitedSpec,
BaseInterfaceInputSpec,
DynamicTraitedSpec,
File,
Directory,
InputMultiObject,
OutputMultiObject,
Str,
SimpleInterface,
)
from nipype.interfaces.io import add_traits
from templateflow.api import templates as _get_template_list
from ..utils.bids import _init_layout, relative_to_root
from ..utils.images import set_consumables, unsafe_write_nifti_header_and_data
from ..utils.misc import splitext as _splitext, _copy_any
regz = re.compile(r"\.gz$")
_pybids_spec = loads(Path(_pkgres("niworkflows", "data/nipreps.json")).read_text())
BIDS_DERIV_ENTITIES = frozenset({e["name"] for e in _pybids_spec["entities"]})
BIDS_DERIV_PATTERNS = tuple(_pybids_spec["default_path_patterns"])
STANDARD_SPACES = _get_template_list()
LOGGER = logging.getLogger("nipype.interface")
def _none():
return None
# Automatically coerce certain suffixes (DerivativesDataSink)
DEFAULT_DTYPES = defaultdict(
_none,
(
("mask", "uint8"),
("dseg", "int16"),
("probseg", "float32"),
("boldref", "source"),
),
)
class _BIDSBaseInputSpec(BaseInterfaceInputSpec):
bids_dir = traits.Either(
(None, Directory(exists=True)), usedefault=True, desc="optional bids directory"
)
bids_validate = traits.Bool(True, usedefault=True, desc="enable BIDS validator")
class _BIDSInfoInputSpec(_BIDSBaseInputSpec):
in_file = File(mandatory=True, desc="input file, part of a BIDS tree")
class _BIDSInfoOutputSpec(DynamicTraitedSpec):
subject = traits.Str()
session = traits.Str()
task = traits.Str()
acquisition = traits.Str()
reconstruction = traits.Str()
run = traits.Int()
suffix = traits.Str()
class BIDSInfo(SimpleInterface):
"""
Extract BIDS entities from a BIDS-conforming path.
This interface uses only the basename, not the path, to determine the
subject, session, task, run, acquisition or reconstruction.
>>> bids_info = BIDSInfo(bids_dir=str(datadir / 'ds054'), bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = <undefined>
reconstruction = <undefined>
run = <undefined>
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_dir=str(datadir / 'ds054'), bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_rec-MB_acq-AP_run-1_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = AP
reconstruction = MB
run = 1
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_dir=str(datadir / 'ds054'), bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_acq-AP_run-01_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = AP
reconstruction = <undefined>
run = 1
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_validate=False)
>>> bids_info.inputs.in_file = str(
... datadir / 'ds114' / 'sub-01' / 'ses-retest' /
... 'func' / 'sub-01_ses-retest_task-covertverbgeneration_bold.nii.gz')
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = <undefined>
reconstruction = <undefined>
run = <undefined>
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
>>> bids_info = BIDSInfo(bids_validate=False)
>>> bids_info.inputs.in_file = '''\
sub-01/func/ses-retest/sub-01_ses-retest_task-covertverbgeneration_bold.nii.gz'''
>>> res = bids_info.run()
>>> res.outputs
<BLANKLINE>
acquisition = <undefined>
reconstruction = <undefined>
run = <undefined>
session = retest
subject = 01
suffix = bold
task = covertverbgeneration
<BLANKLINE>
"""
input_spec = _BIDSInfoInputSpec
output_spec = _BIDSInfoOutputSpec
def _run_interface(self, runtime):
from bids.layout import parse_file_entities
bids_dir = self.inputs.bids_dir
in_file = self.inputs.in_file
if bids_dir is not None:
try:
in_file = str(Path(in_file).relative_to(bids_dir))
except ValueError:
pass
params = parse_file_entities(in_file)
self._results = {
key: params.get(key, Undefined)
for key in _BIDSInfoOutputSpec().get().keys()
}
return runtime
class _BIDSDataGrabberInputSpec(BaseInterfaceInputSpec):
subject_data = traits.Dict(Str, traits.Any)
subject_id = Str()
class _BIDSDataGrabberOutputSpec(TraitedSpec):
out_dict = traits.Dict(desc="output data structure")
fmap = OutputMultiObject(desc="output fieldmaps")
bold = OutputMultiObject(desc="output functional images")
sbref = OutputMultiObject(desc="output sbrefs")
t1w = OutputMultiObject(desc="output T1w images")
roi = OutputMultiObject(desc="output ROI images")
t2w = OutputMultiObject(desc="output T2w images")
flair = OutputMultiObject(desc="output FLAIR images")
class BIDSDataGrabber(SimpleInterface):
"""
Collect files from a BIDS directory structure.
>>> bids_src = BIDSDataGrabber(anat_only=False)
>>> bids_src.inputs.subject_data = bids_collect_data(
... str(datadir / 'ds114'), '01', bids_validate=False)[0]
>>> bids_src.inputs.subject_id = '01'
>>> res = bids_src.run()
>>> res.outputs.t1w # doctest: +ELLIPSIS +NORMALIZE_WHITESPACE
['.../ds114/sub-01/ses-retest/anat/sub-01_ses-retest_T1w.nii.gz',
'.../ds114/sub-01/ses-test/anat/sub-01_ses-test_T1w.nii.gz']
"""
input_spec = _BIDSDataGrabberInputSpec
output_spec = _BIDSDataGrabberOutputSpec
_require_funcs = True
def __init__(self, *args, **kwargs):
anat_only = kwargs.pop("anat_only")
anat_derivatives = kwargs.pop("anat_derivatives", None)
super(BIDSDataGrabber, self).__init__(*args, **kwargs)
if anat_only is not None:
self._require_funcs = not anat_only
self._require_t1w = anat_derivatives is None
def _run_interface(self, runtime):
bids_dict = self.inputs.subject_data
self._results["out_dict"] = bids_dict
self._results.update(bids_dict)
if self._require_t1w and not bids_dict['t1w']:
raise FileNotFoundError(
"No T1w images found for subject sub-{}".format(self.inputs.subject_id)
)
if self._require_funcs and not bids_dict["bold"]:
raise FileNotFoundError(
"No functional images found for subject sub-{}".format(
self.inputs.subject_id
)
)
for imtype in ["bold", "t2w", "flair", "fmap", "sbref", "roi"]:
if not bids_dict[imtype]:
LOGGER.info(
'No "%s" images found for sub-%s', imtype, self.inputs.subject_id
)
return runtime
class _DerivativesDataSinkInputSpec(DynamicTraitedSpec, BaseInterfaceInputSpec):
base_directory = traits.Directory(
desc="Path to the base directory for storing data."
)
check_hdr = traits.Bool(True, usedefault=True, desc="fix headers of NIfTI outputs")
compress = InputMultiObject(
traits.Either(None, traits.Bool),
usedefault=True,
desc="whether ``in_file`` should be compressed (True), uncompressed (False) "
"or left unmodified (None, default).",
)
data_dtype = Str(
desc="NumPy datatype to coerce NIfTI data to, or `source` to"
"match the input file dtype"
)
dismiss_entities = InputMultiObject(
traits.Either(None, Str),
usedefault=True,
desc="a list entities that will not be propagated from the source file",
)
in_file = InputMultiObject(
File(exists=True), mandatory=True, desc="the object to be saved"
)
meta_dict = traits.DictStrAny(desc="an input dictionary containing metadata")
source_file = InputMultiObject(
File(exists=False), mandatory=True, desc="the source file(s) to extract entities from")
class _DerivativesDataSinkOutputSpec(TraitedSpec):
out_file = OutputMultiObject(File(exists=True, desc="written file path"))
out_meta = OutputMultiObject(File(exists=True, desc="written JSON sidecar path"))
compression = OutputMultiObject(
traits.Either(None, traits.Bool),
desc="whether ``in_file`` should be compressed (True), uncompressed (False) "
"or left unmodified (None).",
)
fixed_hdr = traits.List(traits.Bool, desc="whether derivative header was fixed")
class DerivativesDataSink(SimpleInterface):
"""
Store derivative files.
Saves the ``in_file`` into a BIDS-Derivatives folder provided
by ``base_directory``, given the input reference ``source_file``.
>>> import tempfile
>>> tmpdir = Path(tempfile.mkdtemp())
>>> tmpfile = tmpdir / 'a_temp_file.nii.gz'
>>> tmpfile.open('w').close() # "touch" the file
>>> t1w_source = bids_collect_data(
... str(datadir / 'ds114'), '01', bids_validate=False)[0]['t1w'][0]
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = t1w_source
>>> dsink.inputs.desc = 'denoised'
>>> dsink.inputs.compress = False
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_desc-denoised_T1w.nii'
>>> tmpfile = tmpdir / 'a_temp_file.nii'
>>> tmpfile.open('w').close() # "touch" the file
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False,
... allowed_entities=("custom",))
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = t1w_source
>>> dsink.inputs.custom = 'noise'
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_custom-noise_T1w.nii'
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False,
... allowed_entities=("custom",))
>>> dsink.inputs.in_file = [str(tmpfile), str(tmpfile)]
>>> dsink.inputs.source_file = t1w_source
>>> dsink.inputs.custom = [1, 2]
>>> dsink.inputs.compress = True
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
['.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_custom-1_T1w.nii.gz',
'.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_custom-2_T1w.nii.gz']
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False,
... allowed_entities=("custom1", "custom2"))
>>> dsink.inputs.in_file = [str(tmpfile)] * 2
>>> dsink.inputs.source_file = t1w_source
>>> dsink.inputs.custom1 = [1, 2]
>>> dsink.inputs.custom2 = "b"
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
['.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_custom1-1_custom2-b_T1w.nii',
'.../niworkflows/sub-01/ses-retest/anat/sub-01_ses-retest_custom1-2_custom2-b_T1w.nii']
When multiple source files are passed, only common entities are passed down.
For example, if two T1w images from different sessions are used to generate
a single image, the session entity is removed automatically.
>>> bids_dir = tmpdir / 'bidsroot'
>>> multi_source = [
... bids_dir / 'sub-02/ses-A/anat/sub-02_ses-A_T1w.nii.gz',
... bids_dir / 'sub-02/ses-B/anat/sub-02_ses-B_T1w.nii.gz']
>>> for source_file in multi_source:
... source_file.parent.mkdir(parents=True, exist_ok=True)
... _ = source_file.write_text("")
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = list(map(str, multi_source))
>>> dsink.inputs.desc = 'preproc'
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'.../niworkflows/sub-02/anat/sub-02_desc-preproc_T1w.nii'
If, on the other hand, only one is used, the session is preserved:
>>> dsink.inputs.source_file = str(multi_source[0])
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'.../niworkflows/sub-02/ses-A/anat/sub-02_ses-A_desc-preproc_T1w.nii'
>>> bids_dir = tmpdir / 'bidsroot' / 'sub-02' / 'ses-noanat' / 'func'
>>> bids_dir.mkdir(parents=True, exist_ok=True)
>>> tricky_source = bids_dir / 'sub-02_ses-noanat_task-rest_run-01_bold.nii.gz'
>>> tricky_source.open('w').close()
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = str(tricky_source)
>>> dsink.inputs.desc = 'preproc'
>>> res = dsink.run()
>>> res.outputs.out_file # doctest: +ELLIPSIS
'.../niworkflows/sub-02/ses-noanat/func/sub-02_ses-noanat_task-rest_run-1_\
desc-preproc_bold.nii'
>>> bids_dir = tmpdir / 'bidsroot' / 'sub-02' / 'ses-noanat' / 'func'
>>> bids_dir.mkdir(parents=True, exist_ok=True)
>>> tricky_source = bids_dir / 'sub-02_ses-noanat_task-rest_run-1_bold.nii.gz'
>>> tricky_source.open('w').close()
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = str(tricky_source)
>>> dsink.inputs.desc = 'preproc'
>>> dsink.inputs.RepetitionTime = 0.75
>>> res = dsink.run()
>>> res.outputs.out_meta # doctest: +ELLIPSIS
'.../niworkflows/sub-02/ses-noanat/func/sub-02_ses-noanat_task-rest_run-1_\
desc-preproc_bold.json'
>>> Path(res.outputs.out_meta).read_text().splitlines()[1]
' "RepetitionTime": 0.75'
>>> bids_dir = tmpdir / 'bidsroot' / 'sub-02' / 'ses-noanat' / 'func'
>>> bids_dir.mkdir(parents=True, exist_ok=True)
>>> tricky_source = bids_dir / 'sub-02_ses-noanat_task-rest_run-01_bold.nii.gz'
>>> tricky_source.open('w').close()
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False,
... SkullStripped=True)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = str(tricky_source)
>>> dsink.inputs.desc = 'preproc'
>>> dsink.inputs.space = 'MNI152NLin6Asym'
>>> dsink.inputs.resolution = '01'
>>> dsink.inputs.RepetitionTime = 0.75
>>> res = dsink.run()
>>> res.outputs.out_meta # doctest: +ELLIPSIS
'.../niworkflows/sub-02/ses-noanat/func/sub-02_ses-noanat_task-rest_run-1_\
space-MNI152NLin6Asym_res-01_desc-preproc_bold.json'
>>> lines = Path(res.outputs.out_meta).read_text().splitlines()
>>> lines[1]
' "RepetitionTime": 0.75,'
>>> lines[2]
' "SkullStripped": true'
>>> bids_dir = tmpdir / 'bidsroot' / 'sub-02' / 'ses-noanat' / 'func'
>>> bids_dir.mkdir(parents=True, exist_ok=True)
>>> tricky_source = bids_dir / 'sub-02_ses-noanat_task-rest_run-01_bold.nii.gz'
>>> tricky_source.open('w').close()
>>> dsink = DerivativesDataSink(base_directory=str(tmpdir), check_hdr=False,
... SkullStripped=True)
>>> dsink.inputs.in_file = str(tmpfile)
>>> dsink.inputs.source_file = str(tricky_source)
>>> dsink.inputs.desc = 'preproc'
>>> dsink.inputs.resolution = 'native'
>>> dsink.inputs.space = 'MNI152NLin6Asym'
>>> dsink.inputs.RepetitionTime = 0.75
>>> dsink.inputs.meta_dict = {'RepetitionTime': 1.75, 'SkullStripped': False, 'Z': 'val'}
>>> res = dsink.run()
>>> res.outputs.out_meta # doctest: +ELLIPSIS
'.../niworkflows/sub-02/ses-noanat/func/sub-02_ses-noanat_task-rest_run-1_\
space-MNI152NLin6Asym_desc-preproc_bold.json'
>>> lines = Path(res.outputs.out_meta).read_text().splitlines()
>>> lines[1]
' "RepetitionTime": 0.75,'
>>> lines[2]
' "SkullStripped": true,'
>>> lines[3]
' "Z": "val"'
"""
input_spec = _DerivativesDataSinkInputSpec
output_spec = _DerivativesDataSinkOutputSpec
out_path_base = "niworkflows"
_always_run = True
_allowed_entities = set(BIDS_DERIV_ENTITIES)
def __init__(self, allowed_entities=None, out_path_base=None, **inputs):
"""Initialize the SimpleInterface and extend inputs with custom entities."""
self._allowed_entities = set(allowed_entities or []).union(
self._allowed_entities
)
if out_path_base:
self.out_path_base = out_path_base
self._metadata = {}
self._static_traits = self.input_spec.class_editable_traits() + sorted(
self._allowed_entities
)
for dynamic_input in set(inputs) - set(self._static_traits):
self._metadata[dynamic_input] = inputs.pop(dynamic_input)
# First regular initialization (constructs InputSpec object)
super().__init__(**inputs)
add_traits(self.inputs, self._allowed_entities)
for k in self._allowed_entities.intersection(list(inputs.keys())):
# Add additional input fields (self.inputs is an object)
setattr(self.inputs, k, inputs[k])
def _run_interface(self, runtime):
from bids.layout import parse_file_entities
from bids.layout.writing import build_path
from bids.utils import listify
# Ready the output folder
base_directory = runtime.cwd
if isdefined(self.inputs.base_directory):
base_directory = self.inputs.base_directory
base_directory = Path(base_directory).absolute()
out_path = base_directory / self.out_path_base
out_path.mkdir(exist_ok=True, parents=True)
# Ensure we have a list
in_file = listify(self.inputs.in_file)
# Read in the dictionary of metadata
if isdefined(self.inputs.meta_dict):
meta = self.inputs.meta_dict
# inputs passed in construction take priority
meta.update(self._metadata)
self._metadata = meta
# Initialize entities with those from the source file.
in_entities = [
parse_file_entities(str(relative_to_root(source_file)))
for source_file in self.inputs.source_file
]
out_entities = {k: v for k, v in in_entities[0].items()
if all(ent.get(k) == v for ent in in_entities[1:])}
for drop_entity in listify(self.inputs.dismiss_entities or []):
out_entities.pop(drop_entity, None)
# Override extension with that of the input file(s)
out_entities["extension"] = [
# _splitext does not accept .surf.gii (for instance)
"".join(Path(orig_file).suffixes).lstrip(".")
for orig_file in in_file
]
compress = listify(self.inputs.compress) or [None]
if len(compress) == 1:
compress = compress * len(in_file)
for i, ext in enumerate(out_entities["extension"]):
if compress[i] is not None:
ext = regz.sub("", ext)
out_entities["extension"][i] = f"{ext}.gz" if compress[i] else ext
# Override entities with those set as inputs
for key in self._allowed_entities:
value = getattr(self.inputs, key)
if value is not None and isdefined(value):
out_entities[key] = value
# Clean up native resolution with space
if out_entities.get("resolution") == "native" and out_entities.get("space"):
out_entities.pop("resolution", None)
if len(set(out_entities["extension"])) == 1:
out_entities["extension"] = out_entities["extension"][0]
# Insert custom (non-BIDS) entities from allowed_entities.
custom_entities = set(out_entities.keys()) - set(BIDS_DERIV_ENTITIES)
patterns = BIDS_DERIV_PATTERNS
if custom_entities:
# Example: f"{key}-{{{key}}}" -> "task-{task}"
custom_pat = "_".join(f"{key}-{{{key}}}" for key in sorted(custom_entities))
patterns = [
pat.replace("_{suffix", "_".join(("", custom_pat, "{suffix")))
for pat in patterns
]
# Prepare SimpleInterface outputs object
self._results["out_file"] = []
self._results["compression"] = []
self._results["fixed_hdr"] = [False] * len(in_file)
dest_files = build_path(out_entities, path_patterns=patterns)
if not dest_files:
raise ValueError(f"Could not build path with entities {out_entities}.")
# Make sure the interpolated values is embedded in a list, and check
dest_files = listify(dest_files)
if len(in_file) != len(dest_files):
raise ValueError(
f"Input files ({len(in_file)}) not matched "
f"by interpolated patterns ({len(dest_files)})."
)
for i, (orig_file, dest_file) in enumerate(zip(in_file, dest_files)):
out_file = out_path / dest_file
out_file.parent.mkdir(exist_ok=True, parents=True)
self._results["out_file"].append(str(out_file))
self._results["compression"].append(str(dest_file).endswith(".gz"))
# Set data and header iff changes need to be made. If these are
# still None when it's time to write, just copy.
new_data, new_header = None, None
is_nifti = out_file.name.endswith(
(".nii", ".nii.gz")
) and not out_file.name.endswith((".dtseries.nii", ".dtseries.nii.gz"))
data_dtype = self.inputs.data_dtype or DEFAULT_DTYPES[self.inputs.suffix]
if is_nifti and any((self.inputs.check_hdr, data_dtype)):
nii = nb.load(orig_file)
if self.inputs.check_hdr:
hdr = nii.header
curr_units = tuple(
[None if u == "unknown" else u for u in hdr.get_xyzt_units()]
)
curr_codes = (int(hdr["qform_code"]), int(hdr["sform_code"]))
# Default to mm, use sec if data type is bold
units = (
curr_units[0] or "mm",
"sec" if out_entities["suffix"] == "bold" else None,
)
xcodes = (1, 1) # Derivative in its original scanner space
if self.inputs.space:
xcodes = (
(4, 4) if self.inputs.space in STANDARD_SPACES else (2, 2)
)
if curr_codes != xcodes or curr_units != units:
self._results["fixed_hdr"][i] = True
new_header = hdr.copy()
new_header.set_qform(nii.affine, xcodes[0])
new_header.set_sform(nii.affine, xcodes[1])
new_header.set_xyzt_units(*units)
if data_dtype == "source": # match source dtype
try:
data_dtype = nb.load(self.inputs.source_file[0]).get_data_dtype()
except Exception:
LOGGER.warning(
f"Could not get data type of file {self.inputs.source_file[0]}"
)
data_dtype = None
if data_dtype:
data_dtype = np.dtype(data_dtype)
orig_dtype = nii.get_data_dtype()
if orig_dtype != data_dtype:
LOGGER.warning(
f"Changing {out_file} dtype from {orig_dtype} to {data_dtype}"
)
# coerce dataobj to new data dtype
if np.issubdtype(data_dtype, np.integer):
new_data = np.rint(nii.dataobj).astype(data_dtype)
else:
new_data = np.asanyarray(nii.dataobj, dtype=data_dtype)
# and set header to match
if new_header is None:
new_header = nii.header.copy()
new_header.set_data_dtype(data_dtype)
del nii
if new_data is new_header is None:
_copy_any(orig_file, str(out_file))
else:
orig_img = nb.load(orig_file)
if new_data is None:
set_consumables(new_header, orig_img.dataobj)
new_data = orig_img.dataobj.get_unscaled()
else:
# Without this, we would be writing nans
# This is our punishment for hacking around nibabel defaults
new_header.set_slope_inter(slope=1., inter=0.)
unsafe_write_nifti_header_and_data(
fname=out_file,
header=new_header,
data=new_data
)
del orig_img
if len(self._results["out_file"]) == 1:
meta_fields = self.inputs.copyable_trait_names()
self._metadata.update(
{
k: getattr(self.inputs, k)
for k in meta_fields
if k not in self._static_traits
}
)
if self._metadata:
out_file = Path(self._results["out_file"][0])
# 1.3.x hack
# For dtseries, we have been generating weird non-BIDS JSON files.
# We can safely keep producing them to avoid breaking derivatives, but
# only the existing keys should keep going into them.
if out_file.name.endswith(".dtseries.nii"):
legacy_metadata = {}
for key in ("grayordinates", "space", "surface", "surface_density", "volume"):
if key in self._metadata:
legacy_metadata[key] = self._metadata.pop(key)
if legacy_metadata:
sidecar = out_file.parent / f"{_splitext(str(out_file))[0]}.json"
sidecar.write_text(dumps(legacy_metadata, sort_keys=True, indent=2))
# The future: the extension is the first . and everything after
sidecar = out_file.parent / f"{out_file.name.split('.', 1)[0]}.json"
sidecar.write_text(dumps(self._metadata, sort_keys=True, indent=2))
self._results["out_meta"] = str(sidecar)
return runtime
class _ReadSidecarJSONInputSpec(_BIDSBaseInputSpec):
in_file = File(exists=True, mandatory=True, desc="the input nifti file")
class _ReadSidecarJSONOutputSpec(_BIDSInfoOutputSpec):
out_dict = traits.Dict()
class ReadSidecarJSON(SimpleInterface):
"""
Read JSON sidecar files of a BIDS tree.
>>> fmap = str(datadir / 'ds054' / 'sub-100185' / 'fmap' /
... 'sub-100185_phasediff.nii.gz')
>>> meta = ReadSidecarJSON(in_file=fmap, bids_dir=str(datadir / 'ds054'),
... bids_validate=False).run()
>>> meta.outputs.subject
'100185'
>>> meta.outputs.suffix
'phasediff'
>>> meta.outputs.out_dict['Manufacturer']
'SIEMENS'
>>> meta = ReadSidecarJSON(in_file=fmap, fields=['Manufacturer'],
... bids_dir=str(datadir / 'ds054'),
... bids_validate=False).run()
>>> meta.outputs.out_dict['Manufacturer']
'SIEMENS'
>>> meta.outputs.Manufacturer
'SIEMENS'
>>> meta.outputs.OtherField # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
AttributeError:
>>> meta = ReadSidecarJSON(
... in_file=fmap, fields=['MadeUpField'],
... bids_dir=str(datadir / 'ds054'),
... bids_validate=False).run() # doctest: +IGNORE_EXCEPTION_DETAIL
Traceback (most recent call last):
KeyError:
>>> meta = ReadSidecarJSON(in_file=fmap, fields=['MadeUpField'],
... undef_fields=True,
... bids_dir=str(datadir / 'ds054'),
... bids_validate=False).run()
>>> meta.outputs.MadeUpField
<undefined>
"""
input_spec = _ReadSidecarJSONInputSpec
output_spec = _ReadSidecarJSONOutputSpec
layout = None
_always_run = True
def __init__(self, fields=None, undef_fields=False, **inputs):
from bids.utils import listify
super(ReadSidecarJSON, self).__init__(**inputs)
self._fields = listify(fields or [])
self._undef_fields = undef_fields
def _outputs(self):
base = super(ReadSidecarJSON, self)._outputs()
if self._fields:
base = add_traits(base, self._fields)
return base
def _run_interface(self, runtime):
self.layout = self.inputs.bids_dir or self.layout
self.layout = _init_layout(
self.inputs.in_file, self.layout, self.inputs.bids_validate
)
# Fill in BIDS entities of the output ("*_id")
output_keys = list(_BIDSInfoOutputSpec().get().keys())
params = self.layout.parse_file_entities(self.inputs.in_file)
self._results = {
key: params.get(key.split("_")[0], Undefined) for key in output_keys
}
# Fill in metadata
metadata = self.layout.get_metadata(self.inputs.in_file)
self._results["out_dict"] = metadata
# Set dynamic outputs if fields input is present
for fname in self._fields:
if not self._undef_fields and fname not in metadata:
raise KeyError(
'Metadata field "%s" not found for file %s'
% (fname, self.inputs.in_file)
)
self._results[fname] = metadata.get(fname, Undefined)
return runtime
class _BIDSFreeSurferDirInputSpec(BaseInterfaceInputSpec):
derivatives = Directory(
exists=True, mandatory=True, desc="BIDS derivatives directory"
)
freesurfer_home = Directory(
exists=True, mandatory=True, desc="FreeSurfer installation directory"
)
subjects_dir = traits.Either(
traits.Str(),
Directory(),
default="freesurfer",
usedefault=True,
desc="Name of FreeSurfer subjects directory",
)
spaces = traits.List(traits.Str, desc="Set of output spaces to prepare")
overwrite_fsaverage = traits.Bool(
False, usedefault=True, desc="Overwrite fsaverage directories, if present"
)
class _BIDSFreeSurferDirOutputSpec(TraitedSpec):
subjects_dir = traits.Directory(exists=True, desc="FreeSurfer subjects directory")
class BIDSFreeSurferDir(SimpleInterface):
"""
Prepare a FreeSurfer subjects directory for use in a BIDS context.
Constructs a subjects directory path, creating if necessary, and copies
fsaverage subjects (if necessary or forced via ``overwrite_fsaverage``)
into from the local FreeSurfer distribution.
If ``subjects_dir`` is an absolute path, then it is returned as the output
``subjects_dir``.
If it is a relative path, it will be resolved relative to the
```derivatives`` directory.`
Regardless of the path, if ``fsaverage`` spaces are provided, they will be
verified to exist, or copied from ``$FREESURFER_HOME/subjects``, if missing.
The output ``subjects_dir`` is intended to be passed to ``ReconAll`` and
other FreeSurfer interfaces.
"""
input_spec = _BIDSFreeSurferDirInputSpec
output_spec = _BIDSFreeSurferDirOutputSpec
_always_run = True
def _run_interface(self, runtime):
subjects_dir = Path(self.inputs.subjects_dir)
if not subjects_dir.is_absolute():
subjects_dir = Path(self.inputs.derivatives) / subjects_dir
subjects_dir.mkdir(parents=True, exist_ok=True)
self._results["subjects_dir"] = str(subjects_dir)
orig_subjects_dir = Path(self.inputs.freesurfer_home) / "subjects"
# Source is target, so just quit
if subjects_dir == orig_subjects_dir:
return runtime
spaces = list(self.inputs.spaces)
# Always copy fsaverage, for proper recon-all functionality
if "fsaverage" not in spaces:
spaces.append("fsaverage")
for space in spaces:
# Skip non-freesurfer spaces and fsnative
if not space.startswith("fsaverage"):
continue
source = orig_subjects_dir / space
dest = subjects_dir / space
# Edge case, but give a sensible error
if not source.exists():
if dest.exists():
continue
else:
raise FileNotFoundError("Expected to find '%s' to copy" % source)
# Finesse is overrated. Either leave it alone or completely clobber it.
if dest.exists() and self.inputs.overwrite_fsaverage:
rmtree(dest)
if not dest.exists():
try:
copytree(source, dest)
except FileExistsError:
LOGGER.warning(
"%s exists; if multiple jobs are running in parallel"
", this can be safely ignored",
dest,
)
return runtime
| oesteban/niworkflows | niworkflows/interfaces/bids.py | Python | bsd-3-clause | 35,326 |
#!/usr/bin/env python
#
# Copyright (C) 2009-2020 the sqlparse authors and contributors
# <see AUTHORS file>
#
# This module is part of python-sqlparse and is released under
# the BSD License: https://opensource.org/licenses/BSD-3-Clause
"""Module that contains the command line app.
Why does this file exist, and why not put this in __main__?
You might be tempted to import things from __main__ later, but that will
cause problems: the code will get executed twice:
- When you run `python -m sqlparse` python will execute
``__main__.py`` as a script. That means there won't be any
``sqlparse.__main__`` in ``sys.modules``.
- When you import __main__ it will get executed again (as a module) because
there's no ``sqlparse.__main__`` in ``sys.modules``.
Also see (1) from http://click.pocoo.org/5/setuptools/#setuptools-integration
"""
import argparse
import sys
from io import TextIOWrapper
import sqlparse
from sqlparse.exceptions import SQLParseError
# TODO: Add CLI Tests
# TODO: Simplify formatter by using argparse `type` arguments
def create_parser():
_CASE_CHOICES = ['upper', 'lower', 'capitalize']
parser = argparse.ArgumentParser(
prog='sqlformat',
description='Format FILE according to OPTIONS. Use "-" as FILE '
'to read from stdin.',
usage='%(prog)s [OPTIONS] FILE, ...',
)
parser.add_argument('filename')
parser.add_argument(
'-o', '--outfile',
dest='outfile',
metavar='FILE',
help='write output to FILE (defaults to stdout)')
parser.add_argument(
'--version',
action='version',
version=sqlparse.__version__)
group = parser.add_argument_group('Formatting Options')
group.add_argument(
'-k', '--keywords',
metavar='CHOICE',
dest='keyword_case',
choices=_CASE_CHOICES,
help='change case of keywords, CHOICE is one of {}'.format(
', '.join('"{}"'.format(x) for x in _CASE_CHOICES)))
group.add_argument(
'-i', '--identifiers',
metavar='CHOICE',
dest='identifier_case',
choices=_CASE_CHOICES,
help='change case of identifiers, CHOICE is one of {}'.format(
', '.join('"{}"'.format(x) for x in _CASE_CHOICES)))
group.add_argument(
'-l', '--language',
metavar='LANG',
dest='output_format',
choices=['python', 'php'],
help='output a snippet in programming language LANG, '
'choices are "python", "php"')
group.add_argument(
'--strip-comments',
dest='strip_comments',
action='store_true',
default=False,
help='remove comments')
group.add_argument(
'-r', '--reindent',
dest='reindent',
action='store_true',
default=False,
help='reindent statements')
group.add_argument(
'--indent_width',
dest='indent_width',
default=2,
type=int,
help='indentation width (defaults to 2 spaces)')
group.add_argument(
'--indent_after_first',
dest='indent_after_first',
action='store_true',
default=False,
help='indent after first line of statement (e.g. SELECT)')
group.add_argument(
'--indent_columns',
dest='indent_columns',
action='store_true',
default=False,
help='indent all columns by indent_width instead of keyword length')
group.add_argument(
'-a', '--reindent_aligned',
action='store_true',
default=False,
help='reindent statements to aligned format')
group.add_argument(
'-s', '--use_space_around_operators',
action='store_true',
default=False,
help='place spaces around mathematical operators')
group.add_argument(
'--wrap_after',
dest='wrap_after',
default=0,
type=int,
help='Column after which lists should be wrapped')
group.add_argument(
'--comma_first',
dest='comma_first',
default=False,
type=bool,
help='Insert linebreak before comma (default False)')
group.add_argument(
'--encoding',
dest='encoding',
default='utf-8',
help='Specify the input encoding (default utf-8)')
return parser
def _error(msg):
"""Print msg and optionally exit with return code exit_."""
sys.stderr.write('[ERROR] {}\n'.format(msg))
return 1
def main(args=None):
parser = create_parser()
args = parser.parse_args(args)
if args.filename == '-': # read from stdin
wrapper = TextIOWrapper(sys.stdin.buffer, encoding=args.encoding)
try:
data = wrapper.read()
finally:
wrapper.detach()
else:
try:
with open(args.filename, encoding=args.encoding) as f:
data = ''.join(f.readlines())
except OSError as e:
return _error(
'Failed to read {}: {}'.format(args.filename, e))
close_stream = False
if args.outfile:
try:
stream = open(args.outfile, 'w', encoding=args.encoding)
close_stream = True
except OSError as e:
return _error('Failed to open {}: {}'.format(args.outfile, e))
else:
stream = sys.stdout
formatter_opts = vars(args)
try:
formatter_opts = sqlparse.formatter.validate_options(formatter_opts)
except SQLParseError as e:
return _error('Invalid options: {}'.format(e))
s = sqlparse.format(data, **formatter_opts)
stream.write(s)
stream.flush()
if close_stream:
stream.close()
return 0
| andialbrecht/sqlparse | sqlparse/cli.py | Python | bsd-3-clause | 5,712 |
from . import NetworkObject
import z3
class ErroneousAclWebProxy (NetworkObject):
"""A caching web proxy which enforces ACLs erroneously.
The idea here was to present something that is deliberately not path independent"""
def _init (self, node, network, context):
super(ErroneousAclWebProxy, self).init_fail(node)
self.proxy = node.z3Node
self.ctx = context
self.constraints = list ()
self.acls = list ()
network.SaneSend(self)
self._webProxyFunctions ()
self._webProxyConstraints ()
@property
def z3Node (self):
return self.proxy
def SetPolicy (self, policy):
"""Wrap add acls"""
self.AddAcls(policy)
def AddAcls(self, acls):
if not isinstance(acls, list):
acls = [acls]
self.acls.extend(acls)
@property
def ACLs (self):
return self.acls
def _addConstraints (self, solver):
self.constraints = list ()
self._webProxyFunctions ()
self._webProxyConstraints ()
solver.add(self.constraints)
def _webProxyConstraints (self):
eh = z3.Const('__webproxy_contraint_eh_%s'%(self.proxy), self.ctx.node)
eh2 = z3.Const('__webproxy_contraint_eh2_%s'%(self.proxy), self.ctx.node)
a = z3.Const('__webproxyfunc_cache_addr_%s'%(self.proxy), self.ctx.address)
i = z3.Const('__webproxyfunc_cache_body_%s'%(self.proxy), z3.IntSort())
p = z3.Const('__webproxy_req_packet_%s'%(self.proxy), self.ctx.packet)
p2 = z3.Const('__webproxy_req_packet_2_%s'%(self.proxy), self.ctx.packet)
p3 = z3.Const('__webproxy_res_packet_%s'%(self.proxy), self.ctx.packet)
e1 = z3.Const('__webproxy_e1_%s'%(self.proxy), self.ctx.node)
e2 = z3.Const('__webproxy_e2_%s'%(self.proxy), self.ctx.node)
e3 = z3.Const('__webproxy_e3_%s'%(self.proxy), self.ctx.node)
e4 = z3.Const('__webproxy_e4_%s'%(self.proxy), self.ctx.node)
e5 = z3.Const('__webproxy_e5_%s'%(self.proxy), self.ctx.node)
e6 = z3.Const('__webproxy_e6_%s'%(self.proxy), self.ctx.node)
# \forall e, p: send(w, e, p) \Rightarrow hostHasAddr(w, p.src)
# \forall e_1, p_1: send(w, e, p_1) \Rightarrow \exists e_2, p_2: recv(e_2, w, p_2) \land
# p_2.origin == p_1.origin \land p_2.dest == p_1.dest \land hostHasAddr(p_2.origin, p_2.src)
self.constraints.append(z3.ForAll([eh, p], z3.Implies(self.ctx.send(self.proxy, eh, p), \
self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p)))))
cached_packet = z3.And(self.cached(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.etime(self.proxy, p2, self.ctx.recv_event) > \
self.ctime(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.etime(self.proxy, p, self.ctx.send_event) > \
self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \
self.ctx.packet.body(p) == self.cresp(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.packet.orig_body(p) == self.corigbody(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)), \
self.ctx.packet.dest(p) == self.ctx.packet.src(p2), \
self.ctx.dest_port(p) == self.ctx.src_port(p2), \
self.ctx.src_port(p) == self.ctx.dest_port(p2), \
self.ctx.packet.options(p) == 0, \
self.ctx.packet.origin(p) == self.corigin(self.ctx.packet.dest(p2), self.ctx.packet.body(p2)))
request_constraints = [z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.dest(p2))), \
self.ctx.packet.origin(p2) == self.ctx.packet.origin(p),
self.ctx.packet.dest(p2) == self.ctx.packet.dest(p), \
self.ctx.packet.body(p2) == self.ctx.packet.body(p), \
self.ctx.packet.orig_body(p2) == self.ctx.packet.orig_body(p), \
self.ctx.packet.options(p) == 0, \
self.ctx.packet.seq(p2) == self.ctx.packet.seq(p), \
self.ctx.hostHasAddr(self.ctx.packet.origin(p2), self.ctx.packet.src(p2)), \
self.ctx.dest_port(p2) == self.ctx.dest_port(p), \
self.ctx.etime(self.proxy, p, self.ctx.send_event) > \
self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \
self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p))]
if len(self.acls) != 0:
acl_constraint = map(lambda (s, d): \
z3.Not(z3.And(self.ctx.packet.src(p2) == s, \
self.ctx.packet.dest(p2) == d)), self.acls)
request_constraints.extend(acl_constraint)
self.constraints.append(z3.ForAll([eh, p], z3.Implies(self.ctx.send(self.proxy, eh, p), \
z3.Or(\
z3.Exists([p2, eh2], \
z3.And(self.ctx.recv(eh2, self.proxy, p2), \
z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p2))),\
z3.And(request_constraints))), \
z3.Exists([p2, eh2], \
z3.And(self.ctx.recv(eh2, self.proxy, p2), \
z3.Not(self.ctx.hostHasAddr(self.proxy, self.ctx.packet.src(p2))),\
cached_packet))))))
cache_conditions = \
z3.ForAll([a, i], \
z3.Implies(self.cached(a, i), \
z3.And(\
z3.Not(self.ctx.hostHasAddr (self.proxy, a)), \
z3.Exists([e1, e2, e3, p, p2, p3], \
z3.And(\
self.ctx.recv(e1, self.proxy, p2), \
self.ctx.packet.dest(p2) == a, \
self.ctx.packet.body(p2) == i, \
self.ctx.packet.body(p) == i, \
self.ctx.packet.dest(p) == a, \
self.ctx.dest_port(p) == self.ctx.dest_port(p2), \
self.creqpacket(a, i) == p2, \
self.creqopacket(a, i) == p, \
self.ctime(a, i) > self.ctx.etime(self.proxy, p2, self.ctx.recv_event), \
self.ctx.send(self.proxy, e2, p), \
self.ctime(a, i) > self.ctx.etime(self.proxy, p, self.ctx.send_event), \
self.ctx.recv(e3, self.proxy, p3), \
self.crespacket(a, i) == p3, \
self.ctx.src_port(p3) == self.ctx.dest_port(p), \
self.ctx.dest_port(p3) == self.ctx.src_port(p), \
self.ctx.packet.src(p3) == self.ctx.packet.dest(p), \
self.ctx.packet.dest(p3) == self.ctx.packet.src(p), \
z3.Exists([e5, e6], \
z3.And(
self.ctx.hostHasAddr (e5, a), \
self.ctx.recv(e6, e5, p), \
z3.ForAll([e4], \
z3.Or(self.ctx.etime(e4, p3, self.ctx.send_event) == 0, \
self.ctx.etime(e4, p3, self.ctx.send_event) > self.ctx.etime(e5, p, self.ctx.recv_event))))), \
self.cresp(a, i) == self.ctx.packet.body(p3), \
self.corigbody(a, i) == self.ctx.packet.orig_body(p3), \
self.corigin(a, i) == self.ctx.packet.origin(p3), \
self.ctime(a, i) == self.ctx.etime(self.proxy, p3, self.ctx.recv_event), \
*request_constraints)))))
self.constraints.append(cache_conditions)
def _webProxyFunctions (self):
self.cached = z3.Function('__webproxy_cached_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.BoolSort())
self.ctime = z3.Function('__webproxy_ctime_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.IntSort())
self.cresp = z3.Function('__webproxy_cresp_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.IntSort())
self.corigbody = z3.Function('__webproxy_corigbody_%s'%(self.proxy), self.ctx.address, z3.IntSort(), z3.IntSort())
self.corigin = z3.Function('__webproxy_corigin_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.node)
self.crespacket = z3.Function('__webproxy_crespacket_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet)
self.creqpacket = z3.Function('__webproxy_creqpacket_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet)
self.creqopacket = z3.Function('__webproxy_creqopacket_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet)
#self.corigbody = z3.Function('__webproxy_corigbody_%s'%(self.proxy), self.ctx.address, z3.IntSort(), self.ctx.packet)
a = z3.Const('__webproxyfunc_cache_addr_%s'%(self.proxy), self.ctx.address)
i = z3.Const('__webproxyfunc_cache_body_%s'%(self.proxy), z3.IntSort())
# Model cache as a function
# If not cached, cache time is 0
self.constraints.append(z3.ForAll([a, i], z3.Not(self.cached(a, i)) == (self.ctime(a, i) == 0)))
self.constraints.append(z3.ForAll([a, i], z3.Not(self.cached(a, i)) == (self.cresp(a, i) == 0)))
| apanda/modeling | mcnet/components/erroneous_aclfull_proxy.py | Python | bsd-3-clause | 10,082 |
#!/usr/bin/env python
'''Print message using ANSI terminal codes'''
__author__ = "Miki Tebeka <miki@mikitebeka.com>"
from sys import stdout, stderr
# Format
bright = 1
dim = 2
underline = 4
blink = 5
reverse = 7
hidden = 8
# Forground
black = 30
red = 31
green = 32
yellow = 33
blue = 34
magenta = 35
cyan = 36
white = 37
# Background
on_black = 40
on_red = 41
on_green = 42
on_yellow = 43
on_blue = 44
on_magenta = 45
on_cyan = 46
on_white = 47
def ansiformat(msg, *args):
'''Format msg according to args.
See http://www.termsys.demon.co.uk/vtansi.htm for more details/
'''
return "\033[%sm%s\033[0m" % (";".join(["%s" % f for f in args]), msg)
def ansiprint(msg, *args, **kw):
'''Print formatted message.
Should work on ANSI compatible terminal.
'''
if kw.get("stderr", 0):
outfo = stderr
else:
outfo = stdout
outfo.write(ansiformat(msg, *args))
outfo.flush()
if __name__ == "__main__":
from sys import argv, exit
from os.path import basename
h = {
"bright" : bright,
"dim" : dim,
"underline" : underline,
"blink" : blink,
"reverse" : reverse,
"hidden" : hidden,
"black" : black,
"red" : red,
"green" : green,
"yellow" : yellow,
"blue" : blue,
"magenta" : magenta,
"cyan" : cyan,
"white" : white,
"on_black" : on_black,
"on_red" : on_red,
"on_green" : on_green,
"on_yellow" : on_yellow,
"on_blue" : on_blue,
"on_magenta" : on_magenta,
"on_cyan" : on_cyan,
"on_white" : on_white
}
eg = "e.g. ansiprint hello red on_green underline -> %s" % \
ansiformat("hello", red, on_green, underline)
if len(argv) < 2:
print >> stderr, "usage: %s message [format ...]" % basename(argv[0])
print >> stderr, eg
exit(1)
for i in argv[2:]:
if i not in h:
ansiprint("%s: Unknown format\n" % i, red, bright, stderr=True)
print >> stderr, "Formats can be:",
msg = ", ".join([ansiformat(f, h[f]) for f in h.keys()])
print msg
print >> stderr, eg
exit(1)
ansiprint(argv[1], *[h[i] for i in argv[2:]])
print
| tebeka/pythonwise | ansiprint.py | Python | bsd-3-clause | 2,287 |
# Copyright (c) 2012 Lars Hupfeldt Nielsen, Hupfeldt IT
# All rights reserved. This work is under a BSD license, see LICENSE.TXT.
from multiconf import caller_file_line
from .utils.utils import next_line_num
from .utils_test_helpers import bb, fn_aa_exp, ln_aa_exp
ln_bb_exp = None
def test_caller_file_line():
def cc():
global ln_bb_exp
fnc, lnc = caller_file_line(2)
print("fnc, lnc:", fnc, lnc)
ln_bb_exp = next_line_num()
fnb, lnb, fna, lna = bb()
return fnc, lnc, fnb, lnb, fna, lna
fn_exp = __file__
ln_cc_exp = next_line_num()
fnc, lnc, fnb, lnb, fna, lna = cc()
assert fn_exp == fnc
assert ln_cc_exp == lnc
assert fn_exp == fnb
assert ln_bb_exp == lnb
| lhupfeldt/multiconf | test/utils_test.py | Python | bsd-3-clause | 753 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from nose.tools import raises, eq_, ok_
from werkzeug.test import Client
from werkzeug.wrappers import BaseResponse
from clastic import Application, render_basic
from clastic.application import BaseApplication
from clastic.route import BaseRoute, Route
from clastic.route import (InvalidEndpoint,
InvalidPattern,
InvalidMethod)
from clastic.route import S_STRICT, S_REWRITE, S_REDIRECT
from clastic.errors import NotFound, ErrorHandler
MODES = (S_STRICT, S_REWRITE, S_REDIRECT)
NO_OP = lambda: BaseResponse()
def test_new_base_route():
# note default slashing behavior
rp = BaseRoute('/a/b/<t:int>/thing/<das+int>')
d = rp.match_path('/a/b/1/thing/1/2/3/4')
yield eq_, d, {u't': 1, u'das': [1, 2, 3, 4]}
d = rp.match_path('/a/b/1/thing/hi/')
yield eq_, d, None
d = rp.match_path('/a/b/1/thing/')
yield eq_, d, None
rp = BaseRoute('/a/b/<t:int>/thing/<das*int>', methods=['GET'])
d = rp.match_path('/a/b/1/thing')
yield eq_, d, {u't': 1, u'das': []}
def test_base_route_executes():
br = BaseRoute('/', lambda request: request['stephen'])
res = br.execute({'stephen': 'laporte'})
yield eq_, res, 'laporte'
@raises(InvalidEndpoint)
def test_base_route_raises_on_no_ep():
BaseRoute('/a/b/<t:int>/thing/<das+int>').execute({})
def test_base_application_basics():
br = BaseRoute('/', lambda request: BaseResponse('lolporte'))
ba = BaseApplication([br])
client = Client(ba, BaseResponse)
res = client.get('/')
yield eq_, res.data, 'lolporte'
def test_nonbreaking_exc():
app = Application([('/', lambda: NotFound(is_breaking=False)),
('/', lambda: 'so hot in here', render_basic)])
client = Client(app, BaseResponse)
resp = client.get('/')
yield eq_, resp.status_code, 200
yield eq_, resp.data, 'so hot in here'
def api(api_path):
return 'api: %s' % '/'.join(api_path)
def two_segments(one, two):
return 'two_segments: %s, %s' % (one, two)
def three_segments(one, two, three):
return 'three_segments: %s, %s, %s' % (one, two, three)
def test_create_route_order_list():
"tests route order when routes are added as a list"
routes = [('/api/<api_path+>', api, render_basic),
('/<one>/<two>', two_segments, render_basic),
('/<one>/<two>/<three>', three_segments, render_basic)]
app = BaseApplication(routes)
client = Client(app, BaseResponse)
yield eq_, client.get('/api/a').data, 'api: a'
yield eq_, client.get('/api/a/b').data, 'api: a/b'
for i, rt in enumerate(app.routes):
yield eq_, rt.pattern, routes[i][0]
return
def test_create_route_order_incr():
"tests route order when routes are added incrementally"
routes = [('/api/<api_path+>', api, render_basic),
('/<one>/<two>', two_segments, render_basic),
('/<one>/<two>/<three>', three_segments, render_basic)]
app = BaseApplication()
client = Client(app, BaseResponse)
for r in routes:
app.add(r)
yield eq_, client.get('/api/a/b').data, 'api: a/b'
yield eq_, app.routes[-1].pattern, r[0]
return
"""
New routing testing strategy notes
==================================
* Successful endpoint
* Failing endpoint (i.e., raise a non-HTTPException exception)
* Raising endpoint (50x, 40x (breaking/nonbreaking))
* GET/POST/PUT/DELETE/OPTIONS/HEAD, etc.
"""
no_arg_routes = ['/',
'/alpha',
'/alpha/',
'/beta',
'/gamma/',
'/delta/epsilon',
'/zeta/eta/']
arg_routes = ['/<theta>',
'/iota/<kappa>/<lambda>/mu/',
'/<nu:int>/<xi:float>/<omicron:unicode>/<pi:str>/',
'/<rho+>/',
'/<sigma*>/',
'/<tau?>/',
'/<upsilon:>/']
broken_routes = ['alf',
'/bet//',
'/<cat->/',
'/<very*doge>/']
def test_ok_routes():
ok_routes = no_arg_routes + arg_routes
for cur_mode in MODES:
for cur_patt in ok_routes:
try:
cur_rt = Route(cur_patt, NO_OP, slash_mode=cur_mode)
except:
yield ok_, False, cur_patt
else:
yield ok_, cur_rt
def test_broken_routes():
for cur_mode in MODES:
for cur_patt in broken_routes:
try:
cur_rt = Route(cur_patt, NO_OP, slash_mode=cur_mode)
except InvalidPattern:
yield ok_, True
else:
yield ok_, False, cur_rt
def test_known_method():
rt = Route('/', NO_OP, methods=['GET'])
yield ok_, rt
yield ok_, 'HEAD' in rt.methods
@raises(InvalidMethod)
def test_unknown_method():
Route('/', NO_OP, methods=['lol'])
def test_debug_raises():
app_nodebug = Application([('/', lambda: 1/0)], debug=False)
client = Client(app_nodebug, BaseResponse)
yield eq_, client.get('/').status_code, 500
err_handler = ErrorHandler(reraise_uncaught=True)
app_debug = Application([('/', lambda: 1/0)], error_handler=err_handler)
client = Client(app_debug, BaseResponse)
try:
resp = client.get('/')
except ZeroDivisionError:
yield ok_, True
else:
yield ok_, False, ('%r did not raise ZeroDivisionError (got %r)'
% (app_debug, resp))
def test_slashing_behaviors():
routes = [('/', NO_OP),
('/goof/spoof/', NO_OP)]
app_strict = Application(routes, slash_mode=S_STRICT)
app_redirect = Application(routes, slash_mode=S_REDIRECT)
app_rewrite = Application(routes, slash_mode=S_REWRITE)
cl_strict = Client(app_strict, BaseResponse)
cl_redirect = Client(app_redirect, BaseResponse)
cl_rewrite = Client(app_rewrite, BaseResponse)
yield eq_, cl_strict.get('/').status_code, 200
yield eq_, cl_rewrite.get('/').status_code, 200
yield eq_, cl_redirect.get('/').status_code, 200
yield eq_, cl_strict.get('/goof//spoof//').status_code, 404
yield eq_, cl_rewrite.get('/goof//spoof//').status_code, 200
yield eq_, cl_redirect.get('/goof//spoof//').status_code, 302
yield eq_, cl_redirect.get('/goof//spoof//', follow_redirects=True).status_code, 200
yield eq_, cl_strict.get('/dne/dne//').status_code, 404
yield eq_, cl_rewrite.get('/dne/dne//').status_code, 404
yield eq_, cl_redirect.get('/dne/dne//').status_code, 404
| kezabelle/clastic | clastic/tests/test_routing.py | Python | bsd-3-clause | 6,586 |
from django.conf import settings
import requests
from mozillians.celery import app
@app.task
def celery_healthcheck():
"""Ping healthchecks.io periodically to monitor celery/celerybeat health."""
response = requests.get(settings.HEALTHCHECKS_IO_URL)
return response.status_code == requests.codes.ok
| johngian/mozillians | mozillians/common/tasks.py | Python | bsd-3-clause | 316 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from datetime import date
from django.conf import settings
def settings_context(request):
"""
Makes available a template var for some interesting var in settings.py
"""
try:
ITEMS_PER_PAGE = settings.ITEMS_PER_PAGE
except AttributeError:
print "oooo"
ITEMS_PER_PAGE = 20
try:
TAGS_PER_PAGE = settings.TAGS_PER_PAGE
except AttributeError:
TAGS_PER_PAGE = 200
return {"ITEMS_PER_PAGE": ITEMS_PER_PAGE, "TAGS_PER_PAGE": TAGS_PER_PAGE} | matagus/django-jamendo | apps/jamendo/context_processors.py | Python | bsd-3-clause | 558 |
# -*- coding: utf-8 -*-
"""
Display vnstat statistics.
Coloring rules.
If value is bigger that dict key, status string will turn to color, specified
in the value.
Example:
coloring = {
800: "#dddd00",
900: "#dd0000",
}
(0 - 800: white, 800-900: yellow, >900 - red)
Format of status string placeholders:
{down} download
{total} total
{up} upload
Requires:
- external program called `vnstat` installed and configured to work.
@author shadowprince
@license Eclipse Public License
"""
from __future__ import division # python2 compatibility
from time import time
from subprocess import check_output
def get_stat(statistics_type):
"""
Get statistics from devfile in list of lists of words
"""
def filter_stat():
out = check_output(["vnstat", "--dumpdb"]).decode("utf-8").splitlines()
for x in out:
if x.startswith("{};0;".format(statistics_type)):
return x
try:
type, number, ts, rxm, txm, rxk, txk, fill = filter_stat().split(";")
except OSError as e:
print("Looks like you haven't installed or configured vnstat!")
raise e
except ValueError:
err = "vnstat returned wrong output, "
err += "maybe it's configured wrong or module is outdated"
raise RuntimeError(err)
up = (int(txm) * 1024 + int(txk)) * 1024
down = (int(rxm) * 1024 + int(rxk)) * 1024
return {
"up": up,
"down": down,
"total": up+down
}
class Py3status:
"""
"""
# available configuration parameters
cache_timeout = 180
coloring = {}
format = "{total}"
# initial multiplier, if you want to get rid of first bytes, set to 1 to
# disable
initial_multi = 1024
left_align = 0
# if value is greater, divide it with unit_multi and get next unit from
# units
multiplier_top = 1024
precision = 1
statistics_type = "d" # d for daily, m for monthly
unit_multi = 1024 # value to divide if rate is greater than multiplier_top
def __init__(self, *args, **kwargs):
"""
Format of total, up and down placeholders under FORMAT.
As default, substitutes left_align and precision as %s and %s
Placeholders:
value - value (float)
unit - unit (string)
"""
self.last_stat = get_stat(self.statistics_type)
self.last_time = time()
self.last_interface = None
self.value_format = "{value:%s.%sf} {unit}" % (
self.left_align, self.precision
)
# list of units, first one - value/initial_multi, second - value/1024,
# third - value/1024^2, etc...
self.units = ["kb", "mb", "gb", "tb", ]
def _divide_and_format(self, value):
"""
Divide a value and return formatted string
"""
value /= self.initial_multi
for i, unit in enumerate(self.units):
if value > self.multiplier_top:
value /= self.unit_multi
else:
break
return self.value_format.format(value=value, unit=unit)
def currentSpeed(self, i3s_output_list, i3s_config):
stat = get_stat(self.statistics_type)
color = None
keys = list(self.coloring.keys())
keys.sort()
for k in keys:
if stat["total"] < k * 1024 * 1024:
break
else:
color = self.coloring[k]
response = {
'cached_until': time() + self.cache_timeout,
'full_text': self.format.format(
total=self._divide_and_format(stat['total']),
up=self._divide_and_format(stat['up']),
down=self._divide_and_format(stat['down']),
),
'transformed': True
}
if color:
response["color"] = color
return response
if __name__ == "__main__":
"""
Test this module by calling it directly.
"""
from time import sleep
x = Py3status()
config = {
'color_good': '#00FF00',
'color_bad': '#FF0000',
}
while True:
print(x.currentSpeed([], config))
sleep(1)
| Spirotot/py3status | py3status/modules/vnstat.py | Python | bsd-3-clause | 4,225 |
"""
kombu.transport.zmq
===================
ZeroMQ transport.
"""
from __future__ import absolute_import, unicode_literals
import errno
import os
import socket
try:
import zmq
from zmq import ZMQError
except ImportError:
zmq = ZMQError = None # noqa
from kombu.five import Empty
from kombu.log import get_logger
from kombu.serialization import pickle
from kombu.utils import cached_property
from kombu.utils.eventio import poll, READ
from . import virtual
logger = get_logger('kombu.transport.zmq')
DEFAULT_PORT = 5555
DEFAULT_HWM = 128
DEFAULT_INCR = 1
dumps, loads = pickle.dumps, pickle.loads
class MultiChannelPoller(object):
eventflags = READ
def __init__(self):
# active channels
self._channels = set()
# file descriptor -> channel map
self._fd_to_chan = {}
# poll implementation (epoll/kqueue/select)
self.poller = poll()
def close(self):
for fd in self._fd_to_chan:
try:
self.poller.unregister(fd)
except KeyError:
pass
self._channels.clear()
self._fd_to_chan.clear()
self.poller = None
def add(self, channel):
self._channels.add(channel)
def discard(self, channel):
self._channels.discard(channel)
self._fd_to_chan.pop(channel.client.connection.fd, None)
def _register(self, channel):
conn = channel.client.connection
self._fd_to_chan[conn.fd] = channel
self.poller.register(conn.fd, self.eventflags)
def on_poll_start(self):
for channel in self._channels:
self._register(channel)
def on_readable(self, fileno):
chan = self._fd_to_chan[fileno]
return chan.drain_events(), chan
def get(self, timeout=None):
self.on_poll_start()
events = self.poller.poll(timeout)
for fileno, _ in events or []:
return self.on_readable(fileno)
raise Empty()
@property
def fds(self):
return self._fd_to_chan
class Client(object):
def __init__(self, uri='tcp://127.0.0.1', port=DEFAULT_PORT,
hwm=DEFAULT_HWM, swap_size=None, enable_sink=True,
context=None):
try:
scheme, parts = uri.split('://')
except ValueError:
scheme = 'tcp'
parts = uri
endpoints = parts.split(';')
self.port = port
if scheme != 'tcp':
raise NotImplementedError('Currently only TCP can be used')
self.context = context or zmq.Context.instance()
if enable_sink:
self.sink = self.context.socket(zmq.PULL)
self.sink.bind('tcp://*:{0.port}'.format(self))
else:
self.sink = None
self.vent = self.context.socket(zmq.PUSH)
if hasattr(zmq, 'SNDHWM'):
self.vent.setsockopt(zmq.SNDHWM, hwm)
else:
self.vent.setsockopt(zmq.HWM, hwm)
if swap_size:
self.vent.setsockopt(zmq.SWAP, swap_size)
for endpoint in endpoints:
if scheme == 'tcp' and ':' not in endpoint:
endpoint += ':' + str(DEFAULT_PORT)
endpoint = ''.join([scheme, '://', endpoint])
self.connect(endpoint)
def connect(self, endpoint):
self.vent.connect(endpoint)
def get(self, queue=None, timeout=None):
sink = self.sink
try:
if timeout is not None:
prev_timeout, sink.RCVTIMEO = sink.RCVTIMEO, timeout
try:
return sink.recv()
finally:
sink.RCVTIMEO = prev_timeout
else:
return sink.recv()
except ZMQError as exc:
if exc.errno == zmq.EAGAIN:
raise socket.error(errno.EAGAIN, exc.strerror)
else:
raise
def put(self, queue, message, **kwargs):
return self.vent.send(message)
def close(self):
if self.sink and not self.sink.closed:
self.sink.close()
if not self.vent.closed:
self.vent.close()
@property
def connection(self):
if self.sink:
return self.sink
return self.vent
class Channel(virtual.Channel):
Client = Client
hwm = DEFAULT_HWM
swap_size = None
enable_sink = True
port_incr = DEFAULT_INCR
from_transport_options = (
virtual.Channel.from_transport_options +
('hwm', 'swap_size', 'enable_sink', 'port_incr')
)
def __init__(self, *args, **kwargs):
super_ = super(Channel, self)
super_.__init__(*args, **kwargs)
# Evaluate socket
self.client.connection.closed
self.connection.cycle.add(self)
self.connection_errors = self.connection.connection_errors
def _get(self, queue, timeout=None):
try:
return loads(self.client.get(queue, timeout))
except socket.error as exc:
if exc.errno == errno.EAGAIN and timeout != 0:
raise Empty()
else:
raise
def _put(self, queue, message, **kwargs):
self.client.put(queue, dumps(message, -1), **kwargs)
def _purge(self, queue):
return 0
def _poll(self, cycle, timeout=None):
return cycle.get(timeout=timeout)
def close(self):
if not self.closed:
self.connection.cycle.discard(self)
try:
self.__dict__['client'].close()
except KeyError:
pass
super(Channel, self).close()
def _prepare_port(self, port):
return (port + self.channel_id - 1) * self.port_incr
def _create_client(self):
conninfo = self.connection.client
port = self._prepare_port(conninfo.port or DEFAULT_PORT)
return self.Client(uri=conninfo.hostname or 'tcp://127.0.0.1',
port=port,
hwm=self.hwm,
swap_size=self.swap_size,
enable_sink=self.enable_sink,
context=self.connection.context)
@cached_property
def client(self):
return self._create_client()
class Transport(virtual.Transport):
Channel = Channel
can_parse_url = True
default_port = DEFAULT_PORT
driver_type = 'zeromq'
driver_name = 'zmq'
connection_errors = virtual.Transport.connection_errors + (ZMQError,)
implements = virtual.Transport.implements.extend(
async=True,
)
polling_interval = None
def __init__(self, *args, **kwargs):
if zmq is None:
raise ImportError('The zmq library is not installed')
super(Transport, self).__init__(*args, **kwargs)
self.cycle = MultiChannelPoller()
def driver_version(self):
return zmq.__version__
def register_with_event_loop(self, connection, loop):
cycle = self.cycle
cycle.poller = loop.poller
add_reader = loop.add_reader
on_readable = self.on_readable
cycle_poll_start = cycle.on_poll_start
def on_poll_start():
cycle_poll_start()
[add_reader(fd, on_readable, fd) for fd in cycle.fds]
loop.on_tick.add(on_poll_start)
def on_readable(self, fileno):
self._handle_event(self.cycle.on_readable(fileno))
def drain_events(self, connection, timeout=None):
more_to_read = False
for channel in connection.channels:
try:
evt = channel.cycle.get(timeout=timeout)
except socket.error as exc:
if exc.errno == errno.EAGAIN:
continue
raise
else:
connection._handle_event((evt, channel))
more_to_read = True
if not more_to_read:
raise socket.error(errno.EAGAIN, os.strerror(errno.EAGAIN))
def _handle_event(self, evt):
item, channel = evt
self._deliver(*item)
def establish_connection(self):
self.context.closed
return super(Transport, self).establish_connection()
def close_connection(self, connection):
super(Transport, self).close_connection(connection)
try:
connection.__dict__['context'].term()
except KeyError:
pass
@cached_property
def context(self):
return zmq.Context(1)
| Elastica/kombu | kombu/transport/zmq.py | Python | bsd-3-clause | 8,476 |
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from . import views
urlpatterns = [
url(r'^$', views.home),
url(r'^subreddits$', views.subreddits),
url(r'^about$', views.about),
url(r'^api$', views.api),
url(r'^submission/(?P<id>[\w]+)$', views.submission),
# url(r'^subreddit/(?P<subreddit>[\w]+)$', views.subreddit),
url(r'^(subreddit|r)/(?P<subreddit>[\w]+)$', views.subreddit),
url(r'^search$', views.search)
] + static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
| xgi/aliendb | web/aliendb/apps/analytics/urls.py | Python | bsd-3-clause | 576 |
from sklearn2sql_heroku.tests.classification import generic as class_gen
class_gen.test_model("RidgeClassifier" , "iris" , "oracle")
| antoinecarme/sklearn2sql_heroku | tests/classification/iris/ws_iris_RidgeClassifier_oracle_code_gen.py | Python | bsd-3-clause | 135 |
"""
Perform Levenberg-Marquardt least-squares minimization, based on MINPACK-1.
AUTHORS
The original version of this software, called LMFIT, was written in FORTRAN
as part of the MINPACK-1 package by XXX.
Craig Markwardt converted the FORTRAN code to IDL. The information for the
IDL version is:
Craig B. Markwardt, NASA/GSFC Code 662, Greenbelt, MD 20770
craigm@lheamail.gsfc.nasa.gov
UPDATED VERSIONs can be found on my WEB PAGE:
http://cow.physics.wisc.edu/~craigm/idl/idl.html
Mark Rivers created this Python version from Craig's IDL version.
Mark Rivers, University of Chicago
Building 434A, Argonne National Laboratory
9700 South Cass Avenue, Argonne, IL 60439
rivers@cars.uchicago.edu
Updated versions can be found at http://cars.uchicago.edu/software
Sergey Koposov converted the Mark's Python version from Numeric to numpy
Sergey Koposov, University of Cambridge, Institute of Astronomy,
Madingley road, CB3 0HA, Cambridge, UK
koposov@ast.cam.ac.uk
Updated versions can be found at http://code.google.com/p/astrolibpy/source/browse/trunk/
DESCRIPTION
MPFIT uses the Levenberg-Marquardt technique to solve the
least-squares problem. In its typical use, MPFIT will be used to
fit a user-supplied function (the "model") to user-supplied data
points (the "data") by adjusting a set of parameters. MPFIT is
based upon MINPACK-1 (LMDIF.F) by More' and collaborators.
For example, a researcher may think that a set of observed data
points is best modelled with a Gaussian curve. A Gaussian curve is
parameterized by its mean, standard deviation and normalization.
MPFIT will, within certain constraints, find the set of parameters
which best fits the data. The fit is "best" in the least-squares
sense; that is, the sum of the weighted squared differences between
the model and data is minimized.
The Levenberg-Marquardt technique is a particular strategy for
iteratively searching for the best fit. This particular
implementation is drawn from MINPACK-1 (see NETLIB), and is much faster
and more accurate than the version provided in the Scientific Python package
in Scientific.Functions.LeastSquares.
This version allows upper and lower bounding constraints to be placed on each
parameter, or the parameter can be held fixed.
The user-supplied Python function should return an array of weighted
deviations between model and data. In a typical scientific problem
the residuals should be weighted so that each deviate has a
gaussian sigma of 1.0. If X represents values of the independent
variable, Y represents a measurement for each value of X, and ERR
represents the error in the measurements, then the deviates could
be calculated as follows:
DEVIATES = (Y - F(X)) / ERR
where F is the analytical function representing the model. You are
recommended to use the convenience functions MPFITFUN and
MPFITEXPR, which are driver functions that calculate the deviates
for you. If ERR are the 1-sigma uncertainties in Y, then
TOTAL( DEVIATES^2 )
will be the total chi-squared value. MPFIT will minimize the
chi-square value. The values of X, Y and ERR are passed through
MPFIT to the user-supplied function via the FUNCTKW keyword.
Simple constraints can be placed on parameter values by using the
PARINFO keyword to MPFIT. See below for a description of this
keyword.
MPFIT does not perform more general optimization tasks. See TNMIN
instead. MPFIT is customized, based on MINPACK-1, to the
least-squares minimization problem.
USER FUNCTION
The user must define a function which returns the appropriate
values as specified above. The function should return the weighted
deviations between the model and the data. It should also return a status
flag and an optional partial derivative array. For applications which
use finite-difference derivatives -- the default -- the user
function should be declared in the following way:
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If fjac==None then partial derivatives should not be
# computed. It will always be None if MPFIT is called with default
# flag.
model = F(x, p)
# Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
return([status, (y-model)/err]
See below for applications with analytical derivatives.
The keyword parameters X, Y, and ERR in the example above are
suggestive but not required. Any parameters can be passed to
MYFUNCT by using the functkw keyword to MPFIT. Use MPFITFUN and
MPFITEXPR if you need ideas on how to do that. The function *must*
accept a parameter list, P.
In general there are no restrictions on the number of dimensions in
X, Y or ERR. However the deviates *must* be returned in a
one-dimensional Numeric array of type Float.
User functions may also indicate a fatal error condition using the
status return described above. If status is set to a number between
-15 and -1 then MPFIT will stop the calculation and return to the caller.
ANALYTIC DERIVATIVES
In the search for the best-fit solution, MPFIT by default
calculates derivatives numerically via a finite difference
approximation. The user-supplied function need not calculate the
derivatives explicitly. However, if you desire to compute them
analytically, then the AUTODERIVATIVE=0 keyword must be passed to MPFIT.
As a practical matter, it is often sufficient and even faster to allow
MPFIT to calculate the derivatives numerically, and so
AUTODERIVATIVE=0 is not necessary.
If AUTODERIVATIVE=0 is used then the user function must check the parameter
FJAC, and if FJAC!=None then return the partial derivative array in the
return list.
def myfunct(p, fjac=None, x=None, y=None, err=None)
# Parameter values are passed in "p"
# If FJAC!=None then partial derivatives must be comptuer.
# FJAC contains an array of len(p), where each entry
# is 1 if that parameter is free and 0 if it is fixed.
model = F(x, p)
Non-negative status value means MPFIT should continue, negative means
# stop the calculation.
status = 0
if (dojac):
pderiv = zeros([len(x), len(p)], Float)
for j in range(len(p)):
pderiv[:,j] = FGRAD(x, p, j)
else:
pderiv = None
return([status, (y-model)/err, pderiv]
where FGRAD(x, p, i) is a user function which must compute the
derivative of the model with respect to parameter P[i] at X. When
finite differencing is used for computing derivatives (ie, when
AUTODERIVATIVE=1), or when MPFIT needs only the errors but not the
derivatives the parameter FJAC=None.
Derivatives should be returned in the PDERIV array. PDERIV should be an m x
n array, where m is the number of data points and n is the number
of parameters. dp[i,j] is the derivative at the ith point with
respect to the jth parameter.
The derivatives with respect to fixed parameters are ignored; zero
is an appropriate value to insert for those derivatives. Upon
input to the user function, FJAC is set to a vector with the same
length as P, with a value of 1 for a parameter which is free, and a
value of zero for a parameter which is fixed (and hence no
derivative needs to be calculated).
If the data is higher than one dimensional, then the *last*
dimension should be the parameter dimension. Example: fitting a
50x50 image, "dp" should be 50x50xNPAR.
CONSTRAINING PARAMETER VALUES WITH THE PARINFO KEYWORD
The behavior of MPFIT can be modified with respect to each
parameter to be fitted. A parameter value can be fixed; simple
boundary constraints can be imposed; limitations on the parameter
changes can be imposed; properties of the automatic derivative can
be modified; and parameters can be tied to one another.
These properties are governed by the PARINFO structure, which is
passed as a keyword parameter to MPFIT.
PARINFO should be a list of dictionaries, one list entry for each parameter.
Each parameter is associated with one element of the array, in
numerical order. The dictionary can have the following keys
(none are required, keys are case insensitive):
'value' - the starting parameter value (but see the START_PARAMS
parameter for more information).
'fixed' - a boolean value, whether the parameter is to be held
fixed or not. Fixed parameters are not varied by
MPFIT, but are passed on to MYFUNCT for evaluation.
'limited' - a two-element boolean array. If the first/second
element is set, then the parameter is bounded on the
lower/upper side. A parameter can be bounded on both
sides. Both LIMITED and LIMITS must be given
together.
'limits' - a two-element float array. Gives the
parameter limits on the lower and upper sides,
respectively. Zero, one or two of these values can be
set, depending on the values of LIMITED. Both LIMITED
and LIMITS must be given together.
'parname' - a string, giving the name of the parameter. The
fitting code of MPFIT does not use this tag in any
way. However, the default iterfunct will print the
parameter name if available.
'step' - the step size to be used in calculating the numerical
derivatives. If set to zero, then the step size is
computed automatically. Ignored when AUTODERIVATIVE=0.
'mpside' - the sidedness of the finite difference when computing
numerical derivatives. This field can take four
values:
0 - one-sided derivative computed automatically
1 - one-sided derivative (f(x+h) - f(x) )/h
-1 - one-sided derivative (f(x) - f(x-h))/h
2 - two-sided derivative (f(x+h) - f(x-h))/(2*h)
Where H is the STEP parameter described above. The
"automatic" one-sided derivative method will chose a
direction for the finite difference which does not
violate any constraints. The other methods do not
perform this check. The two-sided method is in
principle more precise, but requires twice as many
function evaluations. Default: 0.
'mpmaxstep' - the maximum change to be made in the parameter
value. During the fitting process, the parameter
will never be changed by more than this value in
one iteration.
A value of 0 indicates no maximum. Default: 0.
'tied' - a string expression which "ties" the parameter to other
free or fixed parameters. Any expression involving
constants and the parameter array P are permitted.
Example: if parameter 2 is always to be twice parameter
1 then use the following: parinfo(2).tied = '2 * p(1)'.
Since they are totally constrained, tied parameters are
considered to be fixed; no errors are computed for them.
[ NOTE: the PARNAME can't be used in expressions. ]
'mpprint' - if set to 1, then the default iterfunct will print the
parameter value. If set to 0, the parameter value
will not be printed. This tag can be used to
selectively print only a few parameter values out of
many. Default: 1 (all parameters printed)
Future modifications to the PARINFO structure, if any, will involve
adding dictionary tags beginning with the two letters "MP".
Therefore programmers are urged to avoid using tags starting with
the same letters; otherwise they are free to include their own
fields within the PARINFO structure, and they will be ignored.
PARINFO Example:
parinfo = [{'value':0., 'fixed':0, 'limited':[0,0], 'limits':[0.,0.]}
for i in range(5)]
parinfo[0]['fixed'] = 1
parinfo[4]['limited'][0] = 1
parinfo[4]['limits'][0] = 50.
values = [5.7, 2.2, 500., 1.5, 2000.]
for i in range(5): parinfo[i]['value']=values[i]
A total of 5 parameters, with starting values of 5.7,
2.2, 500, 1.5, and 2000 are given. The first parameter
is fixed at a value of 5.7, and the last parameter is
constrained to be above 50.
EXAMPLE
import mpfit
import numpy.oldnumeric as Numeric
x = arange(100, float)
p0 = [5.7, 2.2, 500., 1.5, 2000.]
y = ( p[0] + p[1]*[x] + p[2]*[x**2] + p[3]*sqrt(x) +
p[4]*log(x))
fa = {'x':x, 'y':y, 'err':err}
m = mpfit('myfunct', p0, functkw=fa)
print 'status = ', m.status
if (m.status <= 0): print 'error message = ', m.errmsg
print 'parameters = ', m.params
Minimizes sum of squares of MYFUNCT. MYFUNCT is called with the X,
Y, and ERR keyword parameters that are given by FUNCTKW. The
results can be obtained from the returned object m.
THEORY OF OPERATION
There are many specific strategies for function minimization. One
very popular technique is to use function gradient information to
realize the local structure of the function. Near a local minimum
the function value can be taylor expanded about x0 as follows:
f(x) = f(x0) + f'(x0) . (x-x0) + (1/2) (x-x0) . f''(x0) . (x-x0)
----- --------------- ------------------------------- (1)
Order 0th 1st 2nd
Here f'(x) is the gradient vector of f at x, and f''(x) is the
Hessian matrix of second derivatives of f at x. The vector x is
the set of function parameters, not the measured data vector. One
can find the minimum of f, f(xm) using Newton's method, and
arrives at the following linear equation:
f''(x0) . (xm-x0) = - f'(x0) (2)
If an inverse can be found for f''(x0) then one can solve for
(xm-x0), the step vector from the current position x0 to the new
projected minimum. Here the problem has been linearized (ie, the
gradient information is known to first order). f''(x0) is
symmetric n x n matrix, and should be positive definite.
The Levenberg - Marquardt technique is a variation on this theme.
It adds an additional diagonal term to the equation which may aid the
convergence properties:
(f''(x0) + nu I) . (xm-x0) = -f'(x0) (2a)
where I is the identity matrix. When nu is large, the overall
matrix is diagonally dominant, and the iterations follow steepest
descent. When nu is small, the iterations are quadratically
convergent.
In principle, if f''(x0) and f'(x0) are known then xm-x0 can be
determined. However the Hessian matrix is often difficult or
impossible to compute. The gradient f'(x0) may be easier to
compute, if even by finite difference techniques. So-called
quasi-Newton techniques attempt to successively estimate f''(x0)
by building up gradient information as the iterations proceed.
In the least squares problem there are further simplifications
which assist in solving eqn (2). The function to be minimized is
a sum of squares:
f = Sum(hi^2) (3)
where hi is the ith residual out of m residuals as described
above. This can be substituted back into eqn (2) after computing
the derivatives:
f' = 2 Sum(hi hi')
f'' = 2 Sum(hi' hj') + 2 Sum(hi hi'') (4)
If one assumes that the parameters are already close enough to a
minimum, then one typically finds that the second term in f'' is
negligible [or, in any case, is too difficult to compute]. Thus,
equation (2) can be solved, at least approximately, using only
gradient information.
In matrix notation, the combination of eqns (2) and (4) becomes:
hT' . h' . dx = - hT' . h (5)
Where h is the residual vector (length m), hT is its transpose, h'
is the Jacobian matrix (dimensions n x m), and dx is (xm-x0). The
user function supplies the residual vector h, and in some cases h'
when it is not found by finite differences (see MPFIT_FDJAC2,
which finds h and hT'). Even if dx is not the best absolute step
to take, it does provide a good estimate of the best *direction*,
so often a line minimization will occur along the dx vector
direction.
The method of solution employed by MINPACK is to form the Q . R
factorization of h', where Q is an orthogonal matrix such that QT .
Q = I, and R is upper right triangular. Using h' = Q . R and the
ortogonality of Q, eqn (5) becomes
(RT . QT) . (Q . R) . dx = - (RT . QT) . h
RT . R . dx = - RT . QT . h (6)
R . dx = - QT . h
where the last statement follows because R is upper triangular.
Here, R, QT and h are known so this is a matter of solving for dx.
The routine MPFIT_QRFAC provides the QR factorization of h, with
pivoting, and MPFIT_QRSOLV provides the solution for dx.
REFERENCES
MINPACK-1, Jorge More', available from netlib (www.netlib.org).
"Optimization Software Guide," Jorge More' and Stephen Wright,
SIAM, *Frontiers in Applied Mathematics*, Number 14.
More', Jorge J., "The Levenberg-Marquardt Algorithm:
Implementation and Theory," in *Numerical Analysis*, ed. Watson,
G. A., Lecture Notes in Mathematics 630, Springer-Verlag, 1977.
MODIFICATION HISTORY
Translated from MINPACK-1 in FORTRAN, Apr-Jul 1998, CM
Copyright (C) 1997-2002, Craig Markwardt
This software is provided as is without any warranty whatsoever.
Permission to use, copy, modify, and distribute modified or
unmodified copies is granted, provided this copyright and disclaimer
are included unchanged.
Translated from MPFIT (Craig Markwardt's IDL package) to Python,
August, 2002. Mark Rivers
Converted from Numeric to numpy (Sergey Koposov, July 2008)
"""
import numpy
#import types
#import scipy.lib.blas
import scipy.linalg.blas
# Original FORTRAN documentation
# **********
#
# subroutine lmdif
#
# the purpose of lmdif is to minimize the sum of the squares of
# m nonlinear functions in n variables by a modification of
# the levenberg-marquardt algorithm. the user must provide a
# subroutine which calculates the functions. the jacobian is
# then calculated by a forward-difference approximation.
#
# the subroutine statement is
#
# subroutine lmdif(fcn,m,n,x,fvec,ftol,xtol,gtol,maxfev,epsfcn,
# diag,mode,factor,nprint,info,nfev,fjac,
# ldfjac,ipvt,qtf,wa1,wa2,wa3,wa4)
#
# where
#
# fcn is the name of the user-supplied subroutine which
# calculates the functions. fcn must be declared
# in an external statement in the user calling
# program, and should be written as follows.
#
# subroutine fcn(m,n,x,fvec,iflag)
# integer m,n,iflag
# double precision x(n),fvec(m)
# ----------
# calculate the functions at x and
# return this vector in fvec.
# ----------
# return
# end
#
# the value of iflag should not be changed by fcn unless
# the user wants to terminate execution of lmdif.
# in this case set iflag to a negative integer.
#
# m is a positive integer input variable set to the number
# of functions.
#
# n is a positive integer input variable set to the number
# of variables. n must not exceed m.
#
# x is an array of length n. on input x must contain
# an initial estimate of the solution vector. on output x
# contains the final estimate of the solution vector.
#
# fvec is an output array of length m which contains
# the functions evaluated at the output x.
#
# ftol is a nonnegative input variable. termination
# occurs when both the actual and predicted relative
# reductions in the sum of squares are at most ftol.
# therefore, ftol measures the relative error desired
# in the sum of squares.
#
# xtol is a nonnegative input variable. termination
# occurs when the relative error between two consecutive
# iterates is at most xtol. therefore, xtol measures the
# relative error desired in the approximate solution.
#
# gtol is a nonnegative input variable. termination
# occurs when the cosine of the angle between fvec and
# any column of the jacobian is at most gtol in absolute
# value. therefore, gtol measures the orthogonality
# desired between the function vector and the columns
# of the jacobian.
#
# maxfev is a positive integer input variable. termination
# occurs when the number of calls to fcn is at least
# maxfev by the end of an iteration.
#
# epsfcn is an input variable used in determining a suitable
# step length for the forward-difference approximation. this
# approximation assumes that the relative errors in the
# functions are of the order of epsfcn. if epsfcn is less
# than the machine precision, it is assumed that the relative
# errors in the functions are of the order of the machine
# precision.
#
# diag is an array of length n. if mode = 1 (see
# below), diag is internally set. if mode = 2, diag
# must contain positive entries that serve as
# multiplicative scale factors for the variables.
#
# mode is an integer input variable. if mode = 1, the
# variables will be scaled internally. if mode = 2,
# the scaling is specified by the input diag. other
# values of mode are equivalent to mode = 1.
#
# factor is a positive input variable used in determining the
# initial step bound. this bound is set to the product of
# factor and the euclidean norm of diag*x if nonzero, or else
# to factor itself. in most cases factor should lie in the
# interval (.1,100.). 100. is a generally recommended value.
#
# nprint is an integer input variable that enables controlled
# printing of iterates if it is positive. in this case,
# fcn is called with iflag = 0 at the beginning of the first
# iteration and every nprint iterations thereafter and
# immediately prior to return, with x and fvec available
# for printing. if nprint is not positive, no special calls
# of fcn with iflag = 0 are made.
#
# info is an integer output variable. if the user has
# terminated execution, info is set to the (negative)
# value of iflag. see description of fcn. otherwise,
# info is set as follows.
#
# info = 0 improper input parameters.
#
# info = 1 both actual and predicted relative reductions
# in the sum of squares are at most ftol.
#
# info = 2 relative error between two consecutive iterates
# is at most xtol.
#
# info = 3 conditions for info = 1 and info = 2 both hold.
#
# info = 4 the cosine of the angle between fvec and any
# column of the jacobian is at most gtol in
# absolute value.
#
# info = 5 number of calls to fcn has reached or
# exceeded maxfev.
#
# info = 6 ftol is too small. no further reduction in
# the sum of squares is possible.
#
# info = 7 xtol is too small. no further improvement in
# the approximate solution x is possible.
#
# info = 8 gtol is too small. fvec is orthogonal to the
# columns of the jacobian to machine precision.
#
# nfev is an integer output variable set to the number of
# calls to fcn.
#
# fjac is an output m by n array. the upper n by n submatrix
# of fjac contains an upper triangular matrix r with
# diagonal elements of nonincreasing magnitude such that
#
# t t t
# p *(jac *jac)*p = r *r,
#
# where p is a permutation matrix and jac is the final
# calculated jacobian. column j of p is column ipvt(j)
# (see below) of the identity matrix. the lower trapezoidal
# part of fjac contains information generated during
# the computation of r.
#
# ldfjac is a positive integer input variable not less than m
# which specifies the leading dimension of the array fjac.
#
# ipvt is an integer output array of length n. ipvt
# defines a permutation matrix p such that jac*p = q*r,
# where jac is the final calculated jacobian, q is
# orthogonal (not stored), and r is upper triangular
# with diagonal elements of nonincreasing magnitude.
# column j of p is column ipvt(j) of the identity matrix.
#
# qtf is an output array of length n which contains
# the first n elements of the vector (q transpose)*fvec.
#
# wa1, wa2, and wa3 are work arrays of length n.
#
# wa4 is a work array of length m.
#
# subprograms called
#
# user-supplied ...... fcn
#
# minpack-supplied ... dpmpar,enorm,fdjac2,,qrfac
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt,mod
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
class mpfit:
blas_enorm32, = scipy.linalg.blas.get_blas_funcs(
['nrm2'], numpy.array([0], dtype=numpy.float32))
blas_enorm64, = scipy.linalg.blas.get_blas_funcs(
['nrm2'], numpy.array([0], dtype=numpy.float64))
def __init__(self, fcn, xall=None, functkw={}, parinfo=None,
ftol=1.e-10, xtol=1.e-10, gtol=1.e-10,
damp=0., maxiter=200, factor=100., nprint=1,
iterfunct='default', iterkw={}, nocovar=0,
rescale=0, autoderivative=1, quiet=0,
diag=None, epsfcn=None, debug=0):
"""
Inputs:
fcn:
The function to be minimized. The function should return the weighted
deviations between the model and the data, as described above.
xall:
An array of starting values for each of the parameters of the model.
The number of parameters should be fewer than the number of measurements.
This parameter is optional if the parinfo keyword is used (but see
parinfo). The parinfo keyword provides a mechanism to fix or constrain
individual parameters.
Keywords:
autoderivative:
If this is set, derivatives of the function will be computed
automatically via a finite differencing procedure. If not set, then
fcn must provide the (analytical) derivatives.
Default: set (=1)
NOTE: to supply your own analytical derivatives,
explicitly pass autoderivative=0
ftol:
A nonnegative input variable. Termination occurs when both the actual
and predicted relative reductions in the sum of squares are at most
ftol (and status is accordingly set to 1 or 3). Therefore, ftol
measures the relative error desired in the sum of squares.
Default: 1E-10
functkw:
A dictionary which contains the parameters to be passed to the
user-supplied function specified by fcn via the standard Python
keyword dictionary mechanism. This is the way you can pass additional
data to your user-supplied function without using global variables.
Consider the following example:
if functkw = {'xval':[1.,2.,3.], 'yval':[1.,4.,9.],
'errval':[1.,1.,1.] }
then the user supplied function should be declared like this:
def myfunct(p, fjac=None, xval=None, yval=None, errval=None):
Default: {} No extra parameters are passed to the user-supplied
function.
gtol:
A nonnegative input variable. Termination occurs when the cosine of
the angle between fvec and any column of the jacobian is at most gtol
in absolute value (and status is accordingly set to 4). Therefore,
gtol measures the orthogonality desired between the function vector
and the columns of the jacobian.
Default: 1e-10
iterkw:
The keyword arguments to be passed to iterfunct via the dictionary
keyword mechanism. This should be a dictionary and is similar in
operation to FUNCTKW.
Default: {} No arguments are passed.
iterfunct:
The name of a function to be called upon each NPRINT iteration of the
MPFIT routine. It should be declared in the following way:
def iterfunct(myfunct, p, iter, fnorm, functkw=None,
parinfo=None, quiet=0, dof=None, [iterkw keywords here])
# perform custom iteration update
iterfunct must accept all three keyword parameters (FUNCTKW, PARINFO
and QUIET).
myfunct: The user-supplied function to be minimized,
p: The current set of model parameters
iter: The iteration number
functkw: The arguments to be passed to myfunct.
fnorm: The chi-squared value.
quiet: Set when no textual output should be printed.
dof: The number of degrees of freedom, normally the number of points
less the number of free parameters.
See below for documentation of parinfo.
In implementation, iterfunct can perform updates to the terminal or
graphical user interface, to provide feedback while the fit proceeds.
If the fit is to be stopped for any reason, then iterfunct should return a
a status value between -15 and -1. Otherwise it should return None
(e.g. no return statement) or 0.
In principle, iterfunct should probably not modify the parameter values,
because it may interfere with the algorithm's stability. In practice it
is allowed.
Default: an internal routine is used to print the parameter values.
Set iterfunct=None if there is no user-defined routine and you don't
want the internal default routine be called.
maxiter:
The maximum number of iterations to perform. If the number is exceeded,
then the status value is set to 5 and MPFIT returns.
Default: 200 iterations
nocovar:
Set this keyword to prevent the calculation of the covariance matrix
before returning (see COVAR)
Default: clear (=0) The covariance matrix is returned
nprint:
The frequency with which iterfunct is called. A value of 1 indicates
that iterfunct is called with every iteration, while 2 indicates every
other iteration, etc. Note that several Levenberg-Marquardt attempts
can be made in a single iteration.
Default value: 1
parinfo
Provides a mechanism for more sophisticated constraints to be placed on
parameter values. When parinfo is not passed, then it is assumed that
all parameters are free and unconstrained. Values in parinfo are never
modified during a call to MPFIT.
See description above for the structure of PARINFO.
Default value: None All parameters are free and unconstrained.
quiet:
Set this keyword when no textual output should be printed by MPFIT
damp:
A scalar number, indicating the cut-off value of residuals where
"damping" will occur. Residuals with magnitudes greater than this
number will be replaced by their hyperbolic tangent. This partially
mitigates the so-called large residual problem inherent in
least-squares solvers (as for the test problem CURVI,
http://www.maxthis.com/curviex.htm).
A value of 0 indicates no damping.
Default: 0
Note: DAMP doesn't work with autoderivative=0
xtol:
A nonnegative input variable. Termination occurs when the relative error
between two consecutive iterates is at most xtol (and status is
accordingly set to 2 or 3). Therefore, xtol measures the relative error
desired in the approximate solution.
Default: 1E-10
Outputs:
Returns an object of type mpfit. The results are attributes of this class,
e.g. mpfit.status, mpfit.errmsg, mpfit.params, npfit.niter, mpfit.covar.
.status
An integer status code is returned. All values greater than zero can
represent success (however .status == 5 may indicate failure to
converge). It can have one of the following values:
-16
A parameter or function value has become infinite or an undefined
number. This is usually a consequence of numerical overflow in the
user's model function, which must be avoided.
-15 to -1
These are error codes that either MYFUNCT or iterfunct may return to
terminate the fitting process. Values from -15 to -1 are reserved
for the user functions and will not clash with MPFIT.
0 Improper input parameters.
1 Both actual and predicted relative reductions in the sum of squares
are at most ftol.
2 Relative error between two consecutive iterates is at most xtol
3 Conditions for status = 1 and status = 2 both hold.
4 The cosine of the angle between fvec and any column of the jacobian
is at most gtol in absolute value.
5 The maximum number of iterations has been reached.
6 ftol is too small. No further reduction in the sum of squares is
possible.
7 xtol is too small. No further improvement in the approximate solution
x is possible.
8 gtol is too small. fvec is orthogonal to the columns of the jacobian
to machine precision.
.fnorm
The value of the summed squared residuals for the returned parameter
values.
.covar
The covariance matrix for the set of parameters returned by MPFIT.
The matrix is NxN where N is the number of parameters. The square root
of the diagonal elements gives the formal 1-sigma statistical errors on
the parameters if errors were treated "properly" in fcn.
Parameter errors are also returned in .perror.
To compute the correlation matrix, pcor, use this example:
cov = mpfit.covar
pcor = cov * 0.
for i in range(n):
for j in range(n):
pcor[i,j] = cov[i,j]/sqrt(cov[i,i]*cov[j,j])
If nocovar is set or MPFIT terminated abnormally, then .covar is set to
a scalar with value None.
.errmsg
A string error or warning message is returned.
.nfev
The number of calls to MYFUNCT performed.
.niter
The number of iterations completed.
.perror
The formal 1-sigma errors in each parameter, computed from the
covariance matrix. If a parameter is held fixed, or if it touches a
boundary, then the error is reported as zero.
If the fit is unweighted (i.e. no errors were given, or the weights
were uniformly set to unity), then .perror will probably not represent
the true parameter uncertainties.
*If* you can assume that the true reduced chi-squared value is unity --
meaning that the fit is implicitly assumed to be of good quality --
then the estimated parameter uncertainties can be computed by scaling
.perror by the measured chi-squared value.
dof = len(x) - len(mpfit.params) # deg of freedom
# scaled uncertainties
pcerror = mpfit.perror * sqrt(mpfit.fnorm / dof)
"""
self.niter = 0
self.params = None
self.covar = None
self.perror = None
self.status = 0 # Invalid input flag set while we check inputs
self.debug = debug
self.errmsg = ''
self.nfev = 0
self.damp = damp
self.dof = 0
if fcn is None:
self.errmsg = "Usage: parms = mpfit('myfunt', ... )"
return
if iterfunct == 'default':
iterfunct = self.defiter
# Parameter damping doesn't work when user is providing their own
# gradients.
if (self.damp != 0) and (autoderivative == 0):
self.errmsg = 'ERROR: keywords DAMP and AUTODERIVATIVE are mutually exclusive'
return
# Parameters can either be stored in parinfo, or x. x takes precedence
# if it exists
if (xall is None) and (parinfo is None):
self.errmsg = 'ERROR: must pass parameters in P or PARINFO'
return
# Be sure that PARINFO is of the right type
if parinfo is not None:
if not isinstance(parinfo, list):
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
else:
if not isinstance(parinfo[0], dict):
self.errmsg = 'ERROR: PARINFO must be a list of dictionaries.'
return
if ((xall is not None) and (len(xall) != len(parinfo))):
self.errmsg = 'ERROR: number of elements in PARINFO and P must agree'
return
# If the parameters were not specified at the command line, then
# extract them from PARINFO
if xall is None:
xall = self.parinfo(parinfo, 'value')
if xall is None:
self.errmsg = 'ERROR: either P or PARINFO(*)["value"] must be supplied.'
return
# Make sure parameters are numpy arrays
xall = numpy.asarray(xall)
# In the case if the xall is not float or if is float but has less
# than 64 bits we do convert it into double
if xall.dtype.kind != 'f' or xall.dtype.itemsize <= 4:
xall = xall.astype(numpy.float)
npar = len(xall)
self.fnorm = -1.
fnorm1 = -1.
# TIED parameters?
ptied = self.parinfo(parinfo, 'tied', default='', n=npar)
self.qanytied = 0
for i in range(npar):
ptied[i] = ptied[i].strip()
if ptied[i] != '':
self.qanytied = 1
self.ptied = ptied
# FIXED parameters ?
pfixed = self.parinfo(parinfo, 'fixed', default=0, n=npar)
pfixed = (pfixed == 1)
for i in range(npar):
pfixed[i] = pfixed[i] or (ptied[
i] != '') # Tied parameters are also effectively fixed
# Finite differencing step, absolute and relative, and sidedness of
# deriv.
step = self.parinfo(parinfo, 'step', default=0., n=npar)
dstep = self.parinfo(parinfo, 'relstep', default=0., n=npar)
dside = self.parinfo(parinfo, 'mpside', default=0, n=npar)
# Maximum and minimum steps allowed to be taken in one iteration
maxstep = self.parinfo(parinfo, 'mpmaxstep', default=0., n=npar)
minstep = self.parinfo(parinfo, 'mpminstep', default=0., n=npar)
qmin = minstep != 0
qmin[:] = False # Remove minstep for now!!
qmax = maxstep != 0
if numpy.any(qmin & qmax & (maxstep < minstep)):
self.errmsg = 'ERROR: MPMINSTEP is greater than MPMAXSTEP'
return
wh = (numpy.nonzero((qmin != 0.) | (qmax != 0.)))[0]
qminmax = len(wh > 0)
# Finish up the free parameters
ifree = (numpy.nonzero(pfixed != 1))[0]
nfree = len(ifree)
if nfree == 0:
self.errmsg = 'ERROR: no free parameters'
return
# Compose only VARYING parameters
self.params = xall.copy()
# self.params is the set of parameters to be
# returned
x = self.params[ifree] # x is the set of free parameters
# LIMITED parameters ?
limited = self.parinfo(parinfo, 'limited', default=[0, 0], n=npar)
limits = self.parinfo(parinfo, 'limits', default=[0., 0.], n=npar)
if (limited is not None) and (limits is not None):
# Error checking on limits in parinfo
if numpy.any((limited[:, 0] & (xall < limits[:, 0])) |
(limited[:, 1] & (xall > limits[:, 1]))):
self.errmsg = 'ERROR: parameters are not within PARINFO limits'
return
if numpy.any((limited[:, 0] & limited[:, 1]) &
(limits[:, 0] >= limits[:, 1]) &
(pfixed == 0)):
self.errmsg = 'ERROR: PARINFO parameter limits are not consistent'
return
# Transfer structure values to local variables
qulim = (limited[:, 1])[ifree]
ulim = (limits[:, 1])[ifree]
qllim = (limited[:, 0])[ifree]
llim = (limits[:, 0])[ifree]
if numpy.any((qulim != 0.) | (qllim != 0.)):
qanylim = 1
else:
qanylim = 0
else:
# Fill in local variables with dummy values
qulim = numpy.zeros(nfree)
ulim = x * 0.
qllim = qulim
llim = x * 0.
qanylim = 0
n = len(x)
# Check input parameters for errors
if (n < 0) or (ftol <= 0) or (xtol <= 0) or (gtol <= 0) \
or (maxiter < 0) or (factor <= 0):
self.errmsg = 'ERROR: input keywords are inconsistent'
return
if rescale != 0:
self.errmsg = 'ERROR: DIAG parameter scales are inconsistent'
if len(diag) < n:
return
if numpy.any(diag <= 0):
return
self.errmsg = ''
[self.status, fvec] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'ERROR: first call to "' + str(fcn) + '" failed'
return
# If the returned fvec has more than four bits I assume that we have
# double precision
# It is important that the machar is determined by the precision of
# the returned value, not by the precision of the input array
if numpy.array([fvec]).dtype.itemsize > 4:
self.machar = machar(double=1)
self.blas_enorm = mpfit.blas_enorm64
else:
self.machar = machar(double=0)
self.blas_enorm = mpfit.blas_enorm32
machep = self.machar.machep
m = len(fvec)
if m < n:
self.errmsg = 'ERROR: number of parameters must not exceed data'
return
self.dof = m - nfree
self.fnorm = self.enorm(fvec)
# Initialize Levelberg-Marquardt parameter and iteration counter
par = 0.
self.niter = 1
qtf = x * 0.
self.status = 0
# Beginning of the outer loop
while(1):
# If requested, call fcn to enable printing of iterates
self.params[ifree] = x
if self.qanytied:
self.params = self.tie(self.params, ptied)
if (nprint > 0) and (iterfunct is not None):
if ((self.niter - 1) % nprint) == 0:
#mperr = 0
xnew0 = self.params.copy()
dof = numpy.max([len(fvec) - len(x), 0])
status = iterfunct(
fcn, self.params, self.niter, self.fnorm ** 2,
functkw=functkw, parinfo=parinfo, quiet=quiet,
dof=dof, **iterkw)
if status is not None:
self.status = status
# Check for user termination
if self.status < 0:
self.errmsg = 'WARNING: premature termination by ' + \
str(iterfunct)
return
# If parameters were changed (grrr..) then re-tie
if numpy.max(numpy.abs(xnew0 - self.params)) > 0:
if self.qanytied:
self.params = self.tie(self.params, ptied)
x = self.params[ifree]
# Calculate the jacobian matrix
self.status = 2
catch_msg = 'calling MPFIT_FDJAC2'
fjac = self.fdjac2(fcn, x, fvec, step, qulim, ulim, dside,
epsfcn=epsfcn,
autoderivative=autoderivative, dstep=dstep,
functkw=functkw, ifree=ifree, xall=self.params)
if fjac is None:
self.errmsg = 'WARNING: premature termination by FDJAC2'
return
# Determine if any of the parameters are pegged at the limits
if qanylim:
catch_msg = 'zeroing derivatives of pegged parameters'
whlpeg = (numpy.nonzero(qllim & (x == llim)))[0]
nlpeg = len(whlpeg)
whupeg = (numpy.nonzero(qulim & (x == ulim)))[0]
nupeg = len(whupeg)
# See if any "pegged" values should keep their derivatives
if nlpeg > 0:
# Total derivative of sum wrt lower pegged parameters
for i in range(nlpeg):
sum0 = sum(fvec * fjac[:, whlpeg[i]])
if sum0 > 0:
fjac[:, whlpeg[i]] = 0
if nupeg > 0:
# Total derivative of sum wrt upper pegged parameters
for i in range(nupeg):
sum0 = sum(fvec * fjac[:, whupeg[i]])
if sum0 < 0:
fjac[:, whupeg[i]] = 0
# Compute the QR factorization of the jacobian
[fjac, ipvt, wa1, wa2] = self.qrfac(fjac, pivot=1)
# On the first iteration if "diag" is unspecified, scale
# according to the norms of the columns of the initial jacobian
catch_msg = 'rescaling diagonal elements'
if self.niter == 1:
if (rescale == 0) or (len(diag) < n):
diag = wa2.copy()
diag[diag == 0] = 1.
# On the first iteration, calculate the norm of the scaled x
# and initialize the step bound delta
wa3 = diag * x
xnorm = self.enorm(wa3)
delta = factor*xnorm
if delta == 0.:
delta = factor
# Form (q transpose)*fvec and store the first n components in qtf
catch_msg = 'forming (q transpose)*fvec'
wa4 = fvec.copy()
for j in range(n):
lj = ipvt[j]
temp3 = fjac[j, lj]
if temp3 != 0:
fj = fjac[j:, lj]
wj = wa4[j:]
# *** optimization wa4(j:*)
wa4[j:] = wj - fj * sum(fj*wj) / temp3
fjac[j, lj] = wa1[j]
qtf[j] = wa4[j]
# From this point on, only the square matrix, consisting of the
# triangle of R, is needed.
fjac = fjac[0:n, 0:n]
fjac.shape = [n, n]
temp = fjac.copy()
for i in range(n):
temp[:, i] = fjac[:, ipvt[i]]
fjac = temp.copy()
# Check for overflow. This should be a cheap test here since FJAC
# has been reduced to a (small) square matrix, and the test is
# O(N^2).
# wh = where(finite(fjac) EQ 0, ct)
# if ct GT 0 then goto, FAIL_OVERFLOW
# Compute the norm of the scaled gradient
catch_msg = 'computing the scaled gradient'
gnorm = 0.
if self.fnorm != 0:
for j in range(n):
l = ipvt[j]
if wa2[l] != 0:
sum0 = sum(fjac[0:j+1, j]*qtf[0:j+1])/self.fnorm
gnorm = numpy.max([gnorm, numpy.abs(sum0/wa2[l])])
# Test for convergence of the gradient norm
if gnorm <= gtol:
self.status = 4
break
if maxiter == 0:
self.status = 5
break
# Rescale if necessary
if rescale == 0:
diag = numpy.choose(diag > wa2, (wa2, diag))
# Beginning of the inner loop
while(1):
# Determine the levenberg-marquardt parameter
catch_msg = 'calculating LM parameter (MPFIT_)'
[fjac, par, wa1, wa2] = self.lmpar(fjac, ipvt, diag, qtf,
delta, wa1, wa2, par=par)
# Store the direction p and x+p. Calculate the norm of p
wa1 = -wa1
if (qanylim == 0) and (qminmax == 0):
# No parameter limits, so just move to new position WA2
alpha = 1.
wa2 = x + wa1
else:
# Respect the limits. If a step were to go out of bounds, then
# we should take a step in the same direction but shorter distance.
# The step should take us right to the limit in that case.
alpha = 1.
if qanylim:
# Do not allow any steps out of bounds
catch_msg = 'checking for a step out of bounds'
if nlpeg > 0:
wa1[whlpeg] = numpy.clip(wa1[
whlpeg], 0., numpy.max(wa1))
if nupeg > 0:
wa1[whupeg] = numpy.clip(wa1[
whupeg], numpy.min(wa1), 0.)
dwa1 = numpy.abs(wa1) > machep
whl = (numpy.nonzero(((
dwa1 != 0.) & qllim) & ((x + wa1) < llim)))[0]
if len(whl) > 0:
t = ((llim[whl] - x[whl]) /
wa1[whl])
alpha = numpy.min([alpha, numpy.min(t)])
whu = (numpy.nonzero(((
dwa1 != 0.) & qulim) & ((x + wa1) > ulim)))[0]
if len(whu) > 0:
t = ((ulim[whu] - x[whu]) /
wa1[whu])
alpha = numpy.min([alpha, numpy.min(t)])
# Obey any max step values.
if qminmax:
nwa1 = wa1 * alpha
whmax = (numpy.nonzero((
qmax != 0.) & (maxstep > 0)))[0]
if len(whmax) > 0:
mrat = numpy.max(numpy.abs(nwa1[whmax]) /
numpy.abs(maxstep[ifree[whmax]]))
if mrat > 1:
alpha = alpha / mrat
# Scale the resulting vector
wa1 = wa1 * alpha
wa2 = x + wa1
# Adjust the final output values. If the step put us exactly
# on a boundary, make sure it is exact.
sgnu = (ulim >= 0) * 2. - 1.
sgnl = (llim >= 0) * 2. - 1.
# Handles case of
# ... nonzero *LIM ... ...zero * LIM
ulim1 = ulim * (1 - sgnu * machep) - (ulim == 0) * machep
llim1 = llim * (1 + sgnl * machep) + (llim == 0) * machep
wh = (numpy.nonzero((qulim != 0) & (wa2 >= ulim1)))[0]
if len(wh) > 0:
wa2[wh] = ulim[wh]
wh = (numpy.nonzero((qllim != 0.) & (wa2 <= llim1)))[0]
if len(wh) > 0:
wa2[wh] = llim[wh]
# endelse
wa3 = diag * wa1
pnorm = self.enorm(wa3)
# On the first iteration, adjust the initial step bound
if self.niter == 1:
delta = numpy.min([delta, pnorm])
self.params[ifree] = wa2
# Evaluate the function at x+p and calculate its norm
#mperr = 0
catch_msg = 'calling ' + str(fcn)
[self.status, wa4] = self.call(fcn, self.params, functkw)
if self.status < 0:
self.errmsg = 'WARNING: premature termination by "' + fcn + '"'
return
fnorm1 = self.enorm(wa4)
# Compute the scaled actual reduction
catch_msg = 'computing convergence criteria'
actred = -1.
if (0.1 * fnorm1) < self.fnorm:
actred = - (fnorm1 / self.fnorm) ** 2 + 1.
# Compute the scaled predicted reduction and the scaled directional
# derivative
for j in range(n):
wa3[j] = 0
wa3[0:j+1] = wa3[0:j+1] + fjac[0:j+1, j]*wa1[ipvt[j]]
# Remember, alpha is the fraction of the full LM step actually
# taken
temp1 = self.enorm(alpha*wa3)/self.fnorm
temp2 = (numpy.sqrt(alpha*par)*pnorm)/self.fnorm
prered = temp1*temp1 + (temp2*temp2)/0.5
dirder = -(temp1*temp1 + temp2*temp2)
# Compute the ratio of the actual to the predicted reduction.
ratio = 0.
if prered != 0:
ratio = actred/prered
# Update the step bound
if ratio <= 0.25:
if actred >= 0:
temp = .5
else:
temp = .5*dirder/(dirder + .5*actred)
if ((0.1*fnorm1) >= self.fnorm) or (temp < 0.1):
temp = 0.1
delta = temp*numpy.min([delta, pnorm/0.1])
par = par/temp
else:
if (par == 0) or (ratio >= 0.75):
delta = pnorm/.5
par = .5*par
# Test for successful iteration
if ratio >= 0.0001:
# Successful iteration. Update x, fvec, and their norms
x = wa2
wa2 = diag * x
fvec = wa4
xnorm = self.enorm(wa2)
self.fnorm = fnorm1
self.niter = self.niter + 1
# Tests for convergence
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1):
self.status = 1
if delta <= xtol*xnorm:
self.status = 2
if (numpy.abs(actred) <= ftol) and (prered <= ftol) \
and (0.5 * ratio <= 1) and (self.status == 2):
self.status = 3
if self.status != 0:
break
# Tests for termination and stringent tolerances
if self.niter >= maxiter:
self.status = 5
if (numpy.abs(actred) <= machep) and (prered <= machep) \
and (0.5 * ratio <= 1):
self.status = 6
if delta <= machep * xnorm:
self.status = 7
if gnorm <= machep:
self.status = 8
if self.status != 0:
break
# End of inner loop. Repeat if iteration unsuccessful
if ratio >= 0.0001:
break
# Check for over/underflow
if ~numpy.all(numpy.isfinite(wa1) & numpy.isfinite(wa2) &
numpy.isfinite(x)) or ~numpy.isfinite(ratio):
#errmsg = ('''ERROR: parameter or function value(s) have become
# 'infinite; check model function for over- 'and underflow''')
self.status = -16
break
# wh = where(finite(wa1) EQ 0 OR finite(wa2) EQ 0 OR finite(x) EQ 0, ct)
# if ct GT 0 OR finite(ratio) EQ 0 then begin
if self.status != 0:
break
# End of outer loop.
catch_msg = 'in the termination phase'
# Termination, either normal or user imposed.
if len(self.params) == 0:
return
if nfree == 0:
self.params = xall.copy()
else:
self.params[ifree] = x
if (nprint > 0) and (self.status > 0):
catch_msg = 'calling ' + str(fcn)
[status, fvec] = self.call(fcn, self.params, functkw)
#catch_msg = 'in the termination phase'
self.fnorm = self.enorm(fvec)
if (self.fnorm is not None) and (fnorm1 is not None):
self.fnorm = numpy.max([self.fnorm, fnorm1])
self.fnorm = self.fnorm ** 2.
self.covar = None
self.perror = None
# (very carefully) set the covariance matrix COVAR
if (self.status > 0) and (nocovar == 0) and (n is not None) \
and (fjac is not None) and (ipvt is not None):
sz = fjac.shape
if (n > 0) and (sz[0] >= n) and (sz[1] >= n) \
and (len(ipvt) >= n):
#catch_msg = 'computing the covariance matrix'
cv = self.calc_covar(fjac[0:n, 0:n], ipvt[0:n])
cv.shape = [n, n]
nn = len(xall)
# Fill in actual covariance matrix, accounting for fixed
# parameters.
self.covar = numpy.zeros([nn, nn], dtype=float)
for i in range(n):
self.covar[ifree, ifree[i]] = cv[:, i]
# Compute errors in parameters
#catch_msg = 'computing parameter errors'
self.perror = numpy.zeros(nn, dtype=float)
d = numpy.diagonal(self.covar)
wh = (numpy.nonzero(d >= 0))[0]
if len(wh) > 0:
self.perror[wh] = numpy.sqrt(d[wh])
return
def __str__(self):
return {'params': self.params,
'niter': self.niter,
'params': self.params,
'covar': self.covar,
'perror': self.perror,
'status': self.status,
'debug': self.debug,
'errmsg': self.errmsg,
'nfev': self.nfev,
'damp': self.damp
#,'machar':self.machar
}.__str__()
# Default procedure to be called every iteration. It simply prints
# the parameter values.
def defiter(self, fcn, x, iter, fnorm=None, functkw=None,
quiet=0, iterstop=None, parinfo=None,
format=None, pformat='%.10g', dof=1):
if self.debug:
print ('Entering defiter...')
if quiet:
return
if fnorm is None:
[status, fvec] = self.call(fcn, x, functkw)
fnorm = self.enorm(fvec)**2
# Determine which parameters to print
nprint = len(x)
print ("Iter ", ('%6i' % iter), " CHI-SQUARE = ", (
'%.10g' % fnorm), " DOF = ", ('%i' % dof))
for i in range(nprint):
if (parinfo is not None) and ('parname' in parinfo[i]):
p = ' ' + parinfo[i]['parname'] + ' = '
else:
p = ' P' + str(i) + ' = '
if (parinfo is not None) and ('mpprint' in parinfo[i]):
iprint = parinfo[i]['mpprint']
else:
iprint = 1
if iprint:
print (p + (pformat % x[i]) + ' ')
return 0
# DO_ITERSTOP:
# if keyword_set(iterstop) then begin
# k = get_kbrd(0)
# if k EQ string(byte(7)) then begin
# message, 'WARNING: minimization not complete', /info
# print, 'Do you want to terminate this procedure? (y/n)', $
# format='(A,$)'
# k = ''
# read, k
# if strupcase(strmid(k,0,1)) EQ 'Y' then begin
# message, 'WARNING: Procedure is terminating.', /info
# mperr = -1
# endif
# endif
# endif
# Procedure to parse the parameter values in PARINFO, which is a list of
# dictionaries
def parinfo(self, parinfo=None, key='a', default=None, n=0):
if self.debug:
print ('Entering parinfo...')
if (n == 0) and (parinfo is not None):
n = len(parinfo)
if n == 0:
values = default
return values
values = []
for i in range(n):
if (parinfo is not None) and (key in parinfo[i]):
values.append(parinfo[i][key])
else:
values.append(default)
# Convert to numeric arrays if possible
test = default
if isinstance(default, list):
test = default[0]
if isinstance(test, int):
values = numpy.asarray(values, int)
elif isinstance(test, float):
values = numpy.asarray(values, float)
return values
# Call user function or procedure, with _EXTRA or not, with
# derivatives or not.
def call(self, fcn, x, functkw, fjac=None):
if self.debug:
print ('Entering call...')
if self.qanytied:
x = self.tie(x, self.ptied)
self.nfev = self.nfev + 1
if fjac is None:
[status, f] = fcn(x, fjac=fjac, **functkw)
if self.damp > 0:
# Apply the damping if requested. This replaces the residuals
# with their hyperbolic tangent. Thus residuals larger than
# DAMP are essentially clipped.
f = numpy.tanh(f/self.damp)
return [status, f]
else:
return fcn(x, fjac=fjac, **functkw)
def enorm(self, vec):
ans = self.blas_enorm(vec)
return ans
def fdjac2(
self, fcn, x, fvec, step=None, ulimited=None, ulimit=None, dside=None,
epsfcn=None, autoderivative=1,
functkw=None, xall=None, ifree=None, dstep=None):
if self.debug:
print ('Entering fdjac2...')
machep = self.machar.machep
if epsfcn is None:
epsfcn = machep
if xall is None:
xall = x
if ifree is None:
ifree = numpy.arange(len(xall))
if step is None:
step = x * 0.
nall = len(xall)
eps = numpy.sqrt(numpy.max([epsfcn, machep]))
m = len(fvec)
n = len(x)
# Compute analytical derivative if requested
if autoderivative == 0:
#mperr = 0
fjac = numpy.zeros(nall, dtype=float)
fjac[ifree] = 1.0 # Specify which parameters need derivatives
[status, fp] = self.call(fcn, xall, functkw, fjac=fjac)
if len(fjac) != m * nall:
print ('ERROR: Derivative matrix was not computed properly.')
return None
# This definition is consistent with CURVEFIT
# Sign error found (thanks Jesus Fernandez
# <fernande@irm.chu-caen.fr>)
fjac.shape = [m, nall]
fjac = -fjac
# Select only the free parameters
if len(ifree) < nall:
fjac = fjac[:, ifree]
fjac.shape = [m, n]
return fjac
fjac = numpy.zeros([m, n], dtype=float)
h = eps * numpy.abs(x)
# if STEP is given, use that
# STEP includes the fixed parameters
if step is not None:
stepi = step[ifree]
wh = (numpy.nonzero(stepi > 0))[0]
if len(wh) > 0:
h[wh] = stepi[wh]
# if relative step is given, use that
# DSTEP includes the fixed parameters
if len(dstep) > 0:
dstepi = dstep[ifree]
wh = (numpy.nonzero(dstepi > 0))[0]
if len(wh) > 0:
h[wh] = numpy.abs(dstepi[wh]*x[wh])
# In case any of the step values are zero
h[h == 0] = eps
# Reverse the sign of the step if we are up against the parameter
# limit, or if the user requested it.
# DSIDE includes the fixed parameters (ULIMITED/ULIMIT have only
# varying ones)
mask = dside[ifree] == -1
if len(ulimited) > 0 and len(ulimit) > 0:
mask = (mask | ((ulimited != 0) & (x > ulimit-h)))
wh = (numpy.nonzero(mask))[0]
if len(wh) > 0:
h[wh] = - h[wh]
# Loop through parameters, computing the derivative for each
for j in range(n):
xp = xall.copy()
xp[ifree[j]] = xp[ifree[j]] + h[j]
[status, fp] = self.call(fcn, xp, functkw)
if status < 0:
return None
if numpy.abs(dside[ifree[j]]) <= 1:
# COMPUTE THE ONE-SIDED DERIVATIVE
# Note optimization fjac(0:*,j)
fjac[0:, j] = (fp-fvec)/h[j]
else:
# COMPUTE THE TWO-SIDED DERIVATIVE
xp[ifree[j]] = xall[ifree[j]] - h[j]
#mperr = 0
[status, fm] = self.call(fcn, xp, functkw)
if status < 0:
return None
# Note optimization fjac(0:*,j)
fjac[0:, j] = (fp-fm)/(2*h[j])
return fjac
# Original FORTRAN documentation
# **********
#
# subroutine qrfac
#
# this subroutine uses householder transformations with column
# pivoting (optional) to compute a qr factorization of the
# m by n matrix a. that is, qrfac determines an orthogonal
# matrix q, a permutation matrix p, and an upper trapezoidal
# matrix r with diagonal elements of nonincreasing magnitude,
# such that a*p = q*r. the householder transformation for
# column k, k = 1,2,...,min(m,n), is of the form
#
# t
# i - (1/u(k))*u*u
#
# where u has zeros in the first k-1 positions. the form of
# this transformation and the method of pivoting first
# appeared in the corresponding linpack subroutine.
#
# the subroutine statement is
#
# subroutine qrfac(m,n,a,lda,pivot,ipvt,lipvt,rdiag,acnorm,wa)
#
# where
#
# m is a positive integer input variable set to the number
# of rows of a.
#
# n is a positive integer input variable set to the number
# of columns of a.
#
# a is an m by n array. on input a contains the matrix for
# which the qr factorization is to be computed. on output
# the strict upper trapezoidal part of a contains the strict
# upper trapezoidal part of r, and the lower trapezoidal
# part of a contains a factored form of q (the non-trivial
# elements of the u vectors described above).
#
# lda is a positive integer input variable not less than m
# which specifies the leading dimension of the array a.
#
# pivot is a logical input variable. if pivot is set true,
# then column pivoting is enforced. if pivot is set false,
# then no column pivoting is done.
#
# ipvt is an integer output array of length lipvt. ipvt
# defines the permutation matrix p such that a*p = q*r.
# column j of p is column ipvt(j) of the identity matrix.
# if pivot is false, ipvt is not referenced.
#
# lipvt is a positive integer input variable. if pivot is false,
# then lipvt may be as small as 1. if pivot is true, then
# lipvt must be at least n.
#
# rdiag is an output array of length n which contains the
# diagonal elements of r.
#
# acnorm is an output array of length n which contains the
# norms of the corresponding columns of the input matrix a.
# if this information is not needed, then acnorm can coincide
# with rdiag.
#
# wa is a work array of length n. if pivot is false, then wa
# can coincide with rdiag.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm
#
# fortran-supplied ... dmax1,dsqrt,min0
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
#
# PIVOTING / PERMUTING:
#
# Upon return, A(*,*) is in standard parameter order, A(*,IPVT) is in
# permuted order.
#
# RDIAG is in permuted order.
# ACNORM is in standard parameter order.
#
#
# NOTE: in IDL the factors appear slightly differently than described
# above. The matrix A is still m x n where m >= n.
#
# The "upper" triangular matrix R is actually stored in the strict
# lower left triangle of A under the standard notation of IDL.
#
# The reflectors that generate Q are in the upper trapezoid of A upon
# output.
#
# EXAMPLE: decompose the matrix [[9.,2.,6.],[4.,8.,7.]]
# aa = [[9.,2.,6.],[4.,8.,7.]]
# mpfit_qrfac, aa, aapvt, rdiag, aanorm
# IDL> print, aa
# 1.81818* 0.181818* 0.545455*
# -8.54545+ 1.90160* 0.432573*
# IDL> print, rdiag
# -11.0000+ -7.48166+
#
# The components marked with a * are the components of the
# reflectors, and those marked with a + are components of R.
#
# To reconstruct Q and R we proceed as follows. First R.
# r = fltarr(m, n)
# for i = 0, n-1 do r(0:i,i) = aa(0:i,i) # fill in lower diag
# r(lindgen(n)*(m+1)) = rdiag
#
# Next, Q, which are composed from the reflectors. Each reflector v
# is taken from the upper trapezoid of aa, and converted to a matrix
# via (I - 2 vT . v / (v . vT)).
#
# hh = ident # identity matrix
# for i = 0, n-1 do begin
# v = aa(*,i) & if i GT 0 then v(0:i-1) = 0 # extract reflector
# hh = hh # (ident - 2*(v # v)/total(v * v)) # generate matrix
# endfor
#
# Test the result:
# IDL> print, hh # transpose(r)
# 9.00000 4.00000
# 2.00000 8.00000
# 6.00000 7.00000
#
# Note that it is usually never necessary to form the Q matrix
# explicitly, and MPFIT does not.
def qrfac(self, a, pivot=0):
if self.debug:
print ('Entering qrfac...')
machep = self.machar.machep
sz = a.shape
m = sz[0]
n = sz[1]
# Compute the initial column norms and initialize arrays
acnorm = numpy.zeros(n, dtype=float)
for j in range(n):
acnorm[j] = self.enorm(a[:, j])
rdiag = acnorm.copy()
wa = rdiag.copy()
ipvt = numpy.arange(n)
# Reduce a to r with householder transformations
minmn = numpy.min([m, n])
for j in range(minmn):
if pivot != 0:
# Bring the column of largest norm into the pivot position
rmax = numpy.max(rdiag[j:])
kmax = (numpy.nonzero(rdiag[j:] == rmax))[0]
ct = len(kmax)
kmax = kmax + j
if ct > 0:
kmax = kmax[0]
# Exchange rows via the pivot only. Avoid actually exchanging
# the rows, in case there is lots of memory transfer. The
# exchange occurs later, within the body of MPFIT, after the
# extraneous columns of the matrix have been shed.
if kmax != j:
temp = ipvt[j]
ipvt[j] = ipvt[kmax]
ipvt[kmax] = temp
rdiag[kmax] = rdiag[j]
wa[kmax] = wa[j]
# Compute the householder transformation to reduce the jth
# column of A to a multiple of the jth unit vector
lj = ipvt[j]
ajj = a[j:, lj]
ajnorm = self.enorm(ajj)
if ajnorm == 0:
break
if a[j, lj] < 0:
ajnorm = -ajnorm
ajj = ajj / ajnorm
ajj[0] = ajj[0] + 1
# *** Note optimization a(j:*,j)
a[j:, lj] = ajj
# Apply the transformation to the remaining columns
# and update the norms
# NOTE to SELF: tried to optimize this by removing the loop,
# but it actually got slower. Reverted to "for" loop to keep
# it simple.
if j+1 < n:
for k in range(j+1, n):
lk = ipvt[k]
ajk = a[j:, lk]
# *** Note optimization a(j:*,lk)
# (corrected 20 Jul 2000)
if a[j, lj] != 0:
a[j:, lk] = ajk - ajj * sum(ajk*ajj)/a[j, lj]
if (pivot != 0) and (rdiag[k] != 0):
temp = a[j, lk]/rdiag[k]
rdiag[k] = rdiag[k] * numpy.sqrt(
numpy.max([(1.-temp**2), 0.]))
temp = rdiag[k]/wa[k]
if (0.05*temp*temp) <= machep:
rdiag[k] = self.enorm(a[j+1:, lk])
wa[k] = rdiag[k]
rdiag[j] = -ajnorm
return [a, ipvt, rdiag, acnorm]
# Original FORTRAN documentation
# **********
#
# subroutine qrsolv
#
# given an m by n matrix a, an n by n diagonal matrix d,
# and an m-vector b, the problem is to determine an x which
# solves the system
#
# a*x = b , d*x = 0 ,
#
# in the least squares sense.
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then qrsolv expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. the system
# a*x = b, d*x = 0, is then equivalent to
#
# t t
# r*z = q *b , p *d*p*z = 0 ,
#
# where x = p*z. if this system does not have full rank,
# then a least squares solution is obtained. on output qrsolv
# also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + d*d)*p = s *s .
#
# s is computed within qrsolv and may be of separate interest.
#
# the subroutine statement is
#
# subroutine qrsolv(n,r,ldr,ipvt,diag,qtb,x,sdiag,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, d*x = 0.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def qrsolv(self, r, ipvt, diag, qtb, sdiag):
if self.debug:
print ('Entering qrsolv...')
sz = r.shape
#m = sz[0]
n = sz[1]
# copy r and (q transpose)*b to preserve input and initialize s.
# in particular, save the diagonal elements of r in x.
for j in range(n):
r[j:n, j] = r[j, j:n]
x = numpy.diagonal(r).copy()
wa = qtb.copy()
# Eliminate the diagonal matrix d using a givens rotation
for j in range(n):
l = ipvt[j]
if diag[l] == 0:
break
sdiag[j:] = 0
sdiag[j] = diag[l]
# The transformations to eliminate the row of d modify only a
# single element of (q transpose)*b beyond the first n, which
# is initially zero.
qtbpj = 0.
for k in range(j, n):
if sdiag[k] == 0:
break
if numpy.abs(r[k, k]) < numpy.abs(sdiag[k]):
cotan = r[k, k]/sdiag[k]
sine = 0.5/numpy.sqrt(.25 + .25*cotan*cotan)
cosine = sine*cotan
else:
tang = sdiag[k]/r[k, k]
cosine = 0.5/numpy.sqrt(.25 + .25*tang*tang)
sine = cosine*tang
# Compute the modified diagonal element of r and the
# modified element of ((q transpose)*b,0).
r[k, k] = cosine*r[k, k] + sine*sdiag[k]
temp = cosine*wa[k] + sine*qtbpj
qtbpj = -sine*wa[k] + cosine*qtbpj
wa[k] = temp
# Accumulate the transformation in the row of s
if n > k+1:
temp = cosine*r[k+1:n, k] + sine*sdiag[k+1:n]
sdiag[k+1:n] = -sine*r[k+1:n, k] + cosine*sdiag[k+1:n]
r[k+1:n, k] = temp
sdiag[j] = r[j, j]
r[j, j] = x[j]
# Solve the triangular system for z. If the system is singular
# then obtain a least squares solution
nsing = n
wh = (numpy.nonzero(sdiag == 0))[0]
if len(wh) > 0:
nsing = wh[0]
wa[nsing:] = 0
if nsing >= 1:
wa[nsing-1] = wa[nsing-1]/sdiag[nsing-1] # Degenerate case
# *** Reverse loop ***
for j in range(nsing-2, -1, -1):
sum0 = sum(r[j+1:nsing, j]*wa[j+1:nsing])
wa[j] = (wa[j]-sum0)/sdiag[j]
# Permute the components of z back to components of x
x[ipvt] = wa
return (r, x, sdiag)
# Original FORTRAN documentation
#
# subroutine lmpar
#
# given an m by n matrix a, an n by n nonsingular diagonal
# matrix d, an m-vector b, and a positive number delta,
# the problem is to determine a value for the parameter
# par such that if x solves the system
#
# a*x = b , sqrt(par)*d*x = 0 ,
#
# in the least squares sense, and dxnorm is the euclidean
# norm of d*x, then either par is zero and
#
# (dxnorm-delta) .le. 0.1*delta ,
#
# or par is positive and
#
# abs(dxnorm-delta) .le. 0.1*delta .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then lmpar expects
# the full upper triangle of r, the permutation matrix p,
# and the first n components of (q transpose)*b. on output
# lmpar also provides an upper triangular matrix s such that
#
# t t t
# p *(a *a + par*d*d)*p = s *s .
#
# s is employed within lmpar and may be of separate interest.
#
# only a few iterations are generally needed for convergence
# of the algorithm. if, however, the limit of 10 iterations
# is reached, then the output par will contain the best
# value obtained so far.
#
# the subroutine statement is
#
# subroutine lmpar(n,r,ldr,ipvt,diag,qtb,delta,par,x,sdiag,
# wa1,wa2)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle
# must contain the full upper triangle of the matrix r.
# on output the full upper triangle is unaltered, and the
# strict lower triangle contains the strict upper triangle
# (transposed) of the upper triangular matrix s.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# diag is an input array of length n which must contain the
# diagonal elements of the matrix d.
#
# qtb is an input array of length n which must contain the first
# n elements of the vector (q transpose)*b.
#
# delta is a positive input variable which specifies an upper
# bound on the euclidean norm of d*x.
#
# par is a nonnegative variable. on input par contains an
# initial estimate of the levenberg-marquardt parameter.
# on output par contains the final estimate.
#
# x is an output array of length n which contains the least
# squares solution of the system a*x = b, sqrt(par)*d*x = 0,
# for the output par.
#
# sdiag is an output array of length n which contains the
# diagonal elements of the upper triangular matrix s.
#
# wa1 and wa2 are work arrays of length n.
#
# subprograms called
#
# minpack-supplied ... dpmpar,enorm,qrsolv
#
# fortran-supplied ... dabs,dmax1,dmin1,dsqrt
#
# argonne national laboratory. minpack project. march 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
def lmpar(self, r, ipvt, diag, qtb, delta, x, sdiag, par=None):
if self.debug:
print ('Entering lmpar...')
dwarf = self.machar.minnum
machep = self.machar.machep
sz = r.shape
#m = sz[0]
n = sz[1]
# Compute and store in x the gauss-newton direction. If the
# jacobian is rank-deficient, obtain a least-squares solution
nsing = n
wa1 = qtb.copy()
rthresh = numpy.max(numpy.abs(numpy.diagonal(r))) * machep
wh = (numpy.nonzero(numpy.abs(numpy.diagonal(r)) < rthresh))[0]
if len(wh) > 0:
nsing = wh[0]
wa1[wh[0]:] = 0
if nsing >= 1:
# *** Reverse loop ***
for j in range(nsing-1, -1, -1):
wa1[j] = wa1[j]/r[j, j]
if j-1 >= 0:
wa1[0:j] = wa1[0:j] - r[0:j, j]*wa1[j]
# Note: ipvt here is a permutation array
x[ipvt] = wa1
# Initialize the iteration counter. Evaluate the function at the
# origin, and test for acceptance of the gauss-newton direction
iter = 0
wa2 = diag * x
dxnorm = self.enorm(wa2)
fp = dxnorm - delta
if fp <= 0.1*delta:
return [r, 0., x, sdiag]
# If the jacobian is not rank deficient, the newton step provides a
# lower bound, parl, for the zero of the function. Otherwise set
# this bound to zero.
parl = 0.
if nsing >= n:
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
wa1[0] = wa1[0] / r[0, 0] # Degenerate case
for j in range(1, n): # Note "1" here, not zero
sum0 = sum(r[0:j, j]*wa1[0:j])
wa1[j] = (wa1[j] - sum0)/r[j, j]
temp = self.enorm(wa1)
parl = ((fp/delta)/temp)/temp
# Calculate an upper bound, paru, for the zero of the function
for j in range(n):
sum0 = sum(r[0:j+1, j]*qtb[0:j+1])
wa1[j] = sum0/diag[ipvt[j]]
gnorm = self.enorm(wa1)
paru = gnorm/delta
if paru == 0:
paru = dwarf/numpy.min([delta, 0.1])
# If the input par lies outside of the interval (parl,paru), set
# par to the closer endpoint
par = numpy.max([par, parl])
par = numpy.min([par, paru])
if par == 0:
par = gnorm/dxnorm
# Beginning of an interation
while(1):
iter = iter + 1
# Evaluate the function at the current value of par
if par == 0:
par = numpy.max([dwarf, paru*0.001])
temp = numpy.sqrt(par)
wa1 = temp * diag
[r, x, sdiag] = self.qrsolv(r, ipvt, wa1, qtb, sdiag)
wa2 = diag*x
dxnorm = self.enorm(wa2)
temp = fp
fp = dxnorm - delta
if (numpy.abs(fp) <= 0.1*delta) or \
((parl == 0) and (fp <= temp) and (temp < 0)) or \
(iter == 10):
break
# Compute the newton correction
wa1 = diag[ipvt] * wa2[ipvt] / dxnorm
for j in range(n-1):
wa1[j] = wa1[j]/sdiag[j]
wa1[j+1:n] = wa1[j+1:n] - r[j+1:n, j]*wa1[j]
wa1[n-1] = wa1[n-1]/sdiag[n-1] # Degenerate case
temp = self.enorm(wa1)
parc = ((fp/delta)/temp)/temp
# Depending on the sign of the function, update parl or paru
if fp > 0:
parl = numpy.max([parl, par])
if fp < 0:
paru = numpy.min([paru, par])
# Compute an improved estimate for par
par = numpy.max([parl, par+parc])
# End of an iteration
# Termination
return [r, par, x, sdiag]
# Procedure to tie one parameter to another.
def tie(self, p, ptied=None):
if self.debug:
print ('Entering tie...')
if ptied is None:
return
for i in range(len(ptied)):
if ptied[i] == '':
continue
cmd = 'p[' + str(i) + '] = ' + ptied[i]
exec(cmd)
return p
# Original FORTRAN documentation
# **********
#
# subroutine covar
#
# given an m by n matrix a, the problem is to determine
# the covariance matrix corresponding to a, defined as
#
# t
# inverse(a *a) .
#
# this subroutine completes the solution of the problem
# if it is provided with the necessary information from the
# qr factorization, with column pivoting, of a. that is, if
# a*p = q*r, where p is a permutation matrix, q has orthogonal
# columns, and r is an upper triangular matrix with diagonal
# elements of nonincreasing magnitude, then covar expects
# the full upper triangle of r and the permutation matrix p.
# the covariance matrix is then computed as
#
# t t
# p*inverse(r *r)*p .
#
# if a is nearly rank deficient, it may be desirable to compute
# the covariance matrix corresponding to the linearly independent
# columns of a. to define the numerical rank of a, covar uses
# the tolerance tol. if l is the largest integer such that
#
# abs(r(l,l)) .gt. tol*abs(r(1,1)) ,
#
# then covar computes the covariance matrix corresponding to
# the first l columns of r. for k greater than l, column
# and row ipvt(k) of the covariance matrix are set to zero.
#
# the subroutine statement is
#
# subroutine covar(n,r,ldr,ipvt,tol,wa)
#
# where
#
# n is a positive integer input variable set to the order of r.
#
# r is an n by n array. on input the full upper triangle must
# contain the full upper triangle of the matrix r. on output
# r contains the square symmetric covariance matrix.
#
# ldr is a positive integer input variable not less than n
# which specifies the leading dimension of the array r.
#
# ipvt is an integer input array of length n which defines the
# permutation matrix p such that a*p = q*r. column j of p
# is column ipvt(j) of the identity matrix.
#
# tol is a nonnegative input variable used to define the
# numerical rank of a in the manner described above.
#
# wa is a work array of length n.
#
# subprograms called
#
# fortran-supplied ... dabs
#
# argonne national laboratory. minpack project. august 1980.
# burton s. garbow, kenneth e. hillstrom, jorge j. more
#
# **********
def calc_covar(self, rr, ipvt=None, tol=1.e-14):
if self.debug:
print ('Entering calc_covar...')
if numpy.rank(rr) != 2:
print ('ERROR: r must be a two-dimensional matrix')
return -1
s = rr.shape
n = s[0]
if s[0] != s[1]:
print ('ERROR: r must be a square matrix')
return -1
if ipvt is None:
ipvt = numpy.arange(n)
r = rr.copy()
r.shape = [n, n]
# For the inverse of r in the full upper triangle of r
l = -1
tolr = tol * numpy.abs(r[0, 0])
for k in range(n):
if numpy.abs(r[k, k]) <= tolr:
break
r[k, k] = 1./r[k, k]
for j in range(k):
temp = r[k, k] * r[j, k]
r[j, k] = 0.
r[0:j+1, k] = r[0:j+1, k] - temp*r[0:j+1, j]
l = k
# Form the full upper triangle of the inverse of (r transpose)*r
# in the full upper triangle of r
if l >= 0:
for k in range(l+1):
for j in range(k):
temp = r[j, k]
r[0:j+1, j] = r[0:j+1, j] + temp*r[0:j+1, k]
temp = r[k, k]
r[0:k+1, k] = temp * r[0:k+1, k]
# For the full lower triangle of the covariance matrix
# in the strict lower triangle or and in wa
wa = numpy.repeat([r[0, 0]], n)
for j in range(n):
jj = ipvt[j]
sing = j > l
for i in range(j+1):
if sing:
r[i, j] = 0.
ii = ipvt[i]
if ii > jj:
r[ii, jj] = r[i, j]
if ii < jj:
r[jj, ii] = r[i, j]
wa[jj] = r[j, j]
# Symmetrize the covariance matrix in r
for j in range(n):
r[0:j+1, j] = r[j, 0:j+1]
r[j, j] = wa[j]
return r
class machar:
def __init__(self, double=1):
if double == 0:
info = numpy.finfo(numpy.float32)
else:
info = numpy.finfo(numpy.float64)
self.machep = info.eps
self.maxnum = info.max
self.minnum = info.tiny
self.maxlog = numpy.log(self.maxnum)
self.minlog = numpy.log(self.minnum)
self.rdwarf = numpy.sqrt(self.minnum*1.5) * 10
self.rgiant = numpy.sqrt(self.maxnum) * 0.1
| philrosenfield/ResolvedStellarPops | utils/mpfit/mpfit.py | Python | bsd-3-clause | 93,267 |
#
# Functions for interacting with the network_types table in the database
#
# Mark Huang <mlhuang@cs.princeton.edu>
# Copyright (C) 2006 The Trustees of Princeton University
#
from PLC.Faults import *
from PLC.Parameter import Parameter
from PLC.Table import Row, Table
class NetworkType(Row):
"""
Representation of a row in the network_types table. To use,
instantiate with a dict of values.
"""
table_name = 'network_types'
primary_key = 'type'
join_tables = ['interfaces']
fields = {
'type': Parameter(str, "Network type", max = 20),
}
def validate_type(self, name):
# Make sure name is not blank
if not len(name):
raise PLCInvalidArgument("Network type must be specified")
# Make sure network type does not alredy exist
conflicts = NetworkTypes(self.api, [name])
if conflicts:
raise PLCInvalidArgument("Network type name already in use")
return name
class NetworkTypes(Table):
"""
Representation of the network_types table in the database.
"""
def __init__(self, api, types = None):
Table.__init__(self, api, NetworkType)
sql = "SELECT %s FROM network_types" % \
", ".join(NetworkType.fields)
if types:
sql += " WHERE type IN (%s)" % ", ".join( [ api.db.quote (t) for t in types ] )
self.selectall(sql)
| dreibh/planetlab-lxc-plcapi | PLC/NetworkTypes.py | Python | bsd-3-clause | 1,417 |
import collections
import copy
import functools
import itertools
import json
import time
import warnings
from sentinels import NOTHING
from .filtering import filter_applies, iter_key_candidates
from . import ObjectId, OperationFailure, DuplicateKeyError
from .helpers import basestring, xrange, print_deprecation_warning
try:
from collections import OrderedDict
except ImportError:
from ordereddict import OrderedDict
try:
# Optional requirements for providing Map-Reduce functionality
import execjs
except ImportError:
execjs = None
try:
from bson import (json_util, SON)
except ImportError:
json_utils = SON = None
from six import (
string_types,
text_type,
iteritems,
itervalues,
iterkeys)
from mongomock import helpers
class Collection(object):
def __init__(self, db, name):
super(Collection, self).__init__()
self.name = name
self.full_name = "{0}.{1}".format(db.name, name)
self._Collection__database = db
self._documents = OrderedDict()
self._uniques = []
def __repr__(self):
return "Collection({0}, '{1}')".format(self._Collection__database, self.name)
def __getitem__(self, name):
return self._Collection__database[self.name + '.' + name]
def __getattr__(self, name):
return self.__getitem__(name)
def insert(self, data, manipulate=True,
safe=None, check_keys=True, continue_on_error=False, **kwargs):
if isinstance(data, list):
return [self._insert(element) for element in data]
return self._insert(data)
def _insert(self, data):
if not all(isinstance(k, string_types) for k in data):
raise ValueError("Document keys must be strings")
if '_id' not in data:
data['_id'] = ObjectId()
object_id = data['_id']
if object_id in self._documents:
raise DuplicateKeyError("Duplicate Key Error", 11000)
for unique in self._uniques:
find_kwargs = {}
for key, direction in unique:
if key in data:
find_kwargs[key] = data[key]
answer = self.find(spec=find_kwargs)
if answer.count() > 0:
raise DuplicateKeyError("Duplicate Key Error", 11000)
self._documents[object_id] = self._internalize_dict(data)
return object_id
def _internalize_dict(self, d):
return dict((k, copy.deepcopy(v)) for k, v in iteritems(d))
def _has_key(self, doc, key):
return key in doc
def update(self, spec, document, upsert = False, manipulate = False,
safe = False, multi = False, _check_keys = False, **kwargs):
"""Updates document(s) in the collection."""
found = False
updated_existing = False
num_updated = 0
for existing_document in itertools.chain(self._iter_documents(spec), [None]):
# we need was_insert for the setOnInsert update operation
was_insert = False
# the sentinel document means we should do an upsert
if existing_document is None:
if not upsert:
continue
existing_document = self._documents[self._insert(self._discard_operators(spec))]
was_insert = True
else:
updated_existing = True
num_updated += 1
first = True
found = True
subdocument = None
for k, v in iteritems(document):
if k == '$set':
positional = False
for key in iterkeys(v):
if '$' in key:
positional = True
break
if positional:
subdocument = self._update_document_fields_positional(existing_document,v, spec, _set_updater, subdocument)
continue
self._update_document_fields(existing_document, v, _set_updater)
elif k == '$setOnInsert':
if not was_insert:
continue
positional = any('$' in key for key in iterkeys(v))
if positional:
# we use _set_updater
subdocument = self._update_document_fields_positional(existing_document,v, spec, _set_updater, subdocument)
else:
self._update_document_fields(existing_document, v, _set_updater)
elif k == '$unset':
for field, value in iteritems(v):
if self._has_key(existing_document, field):
del existing_document[field]
elif k == '$inc':
positional = False
for key in iterkeys(v):
if '$' in key:
positional = True
break
if positional:
subdocument = self._update_document_fields_positional(existing_document, v, spec, _inc_updater, subdocument)
continue
self._update_document_fields(existing_document, v, _inc_updater)
elif k == '$addToSet':
for field, value in iteritems(v):
container = existing_document.setdefault(field, [])
if value not in container:
container.append(value)
elif k == '$pull':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field in existing_document:
arr = existing_document[field]
if isinstance(value, dict):
existing_document[field] = [obj for obj in arr if not filter_applies(value, obj)]
else:
existing_document[field] = [obj for obj in arr if not value == obj]
continue
# nested fields includes a positional element
# need to find that element
if '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(existing_document, spec, nested_field_list)
# value should be a dictionary since we're pulling
pull_results = []
# and the last subdoc should be an array
for obj in subdocument[nested_field_list[-1]]:
if isinstance(obj, dict):
for pull_key, pull_value in iteritems(value):
if obj[pull_key] != pull_value:
pull_results.append(obj)
continue
if obj != value:
pull_results.append(obj)
# cannot write to doc directly as it doesn't save to existing_document
subdocument[nested_field_list[-1]] = pull_results
elif k == '$push':
for field, value in iteritems(v):
nested_field_list = field.rsplit('.')
if len(nested_field_list) == 1:
if field not in existing_document:
existing_document[field] = []
# document should be a list
# append to it
if isinstance(value, dict):
if '$each' in value:
# append the list to the field
existing_document[field] += list(value['$each'])
continue
existing_document[field].append(value)
continue
# nested fields includes a positional element
# need to find that element
elif '$' in nested_field_list:
if not subdocument:
subdocument = self._get_subdocument(existing_document, spec, nested_field_list)
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[nested_field_list[-1]]
if isinstance(value, dict):
# check to see if we have the format
# { '$each': [] }
if '$each' in value:
push_results += list(value['$each'])
else:
push_results.append(value)
else:
push_results.append(value)
# cannot write to doc directly as it doesn't save to existing_document
subdocument[nested_field_list[-1]] = push_results
# push to array in a nested attribute
else:
# create nested attributes if they do not exist
subdocument = existing_document
for field in nested_field_list[:-1]:
if field not in subdocument:
subdocument[field] = {}
subdocument = subdocument[field]
# we're pushing a list
push_results = []
if nested_field_list[-1] in subdocument:
# if the list exists, then use that list
push_results = subdocument[nested_field_list[-1]]
push_results.append(value)
subdocument[nested_field_list[-1]] = push_results
else:
if first:
# replace entire document
for key in document.keys():
if key.startswith('$'):
# can't mix modifiers with non-modifiers in update
raise ValueError('field names cannot start with $ [{}]'.format(k))
_id = spec.get('_id', existing_document.get('_id', None))
existing_document.clear()
if _id:
existing_document['_id'] = _id
existing_document.update(self._internalize_dict(document))
if existing_document['_id'] != _id:
# id changed, fix index
del self._documents[_id]
self.insert(existing_document)
break
else:
# can't mix modifiers with non-modifiers in update
raise ValueError('Invalid modifier specified: {}'.format(k))
first = False
if not multi:
break
return {
text_type("connectionId"): self._Collection__database.connection._id,
text_type("err"): None,
text_type("ok"): 1.0,
text_type("n"): num_updated,
text_type("updatedExisting"): updated_existing,
}
def _get_subdocument(self, existing_document, spec, nested_field_list):
"""
This method retrieves the subdocument of the existing_document.nested_field_list. It uses the spec to filter
through the items. It will continue to grab nested documents until it can go no further. It will then return the
subdocument that was last saved. '$' is the positional operator, so we use the $elemMatch in the spec to find
the right subdocument in the array.
"""
# current document in view
doc = existing_document
# previous document in view
subdocument = existing_document
# current spec in view
subspec = spec
# walk down the dictionary
for subfield in nested_field_list:
if subfield == '$':
# positional element should have the equivalent elemMatch in the query
subspec = subspec['$elemMatch']
for item in doc:
# iterate through
if filter_applies(subspec, item):
# found the matching item
# save the parent
subdocument = doc
# save the item
doc = item
break
continue
subdocument = doc
doc = doc[subfield]
if not subfield in subspec:
break
subspec = subspec[subfield]
return subdocument
def _discard_operators(self, doc):
# TODO: this looks a little too naive...
return dict((k, v) for k, v in iteritems(doc) if not k.startswith("$"))
def find(self, spec = None, fields = None, filter = None, sort = None, timeout = True, limit = 0, snapshot = False, as_class = None, skip = 0, slave_okay=False):
if filter is not None:
print_deprecation_warning('filter', 'spec')
if spec is None:
spec = filter
if as_class is None:
as_class = dict
return Cursor(self, functools.partial(self._get_dataset, spec, sort, fields, as_class, skip), limit=limit)
def _get_dataset(self, spec, sort, fields, as_class, skip):
dataset = (self._copy_only_fields(document, fields, as_class) for document in self._iter_documents(spec))
if sort:
for sortKey, sortDirection in reversed(sort):
dataset = iter(sorted(dataset, key = lambda x: _resolve_key(sortKey, x), reverse = sortDirection < 0))
for i in xrange(skip):
try:
unused = next(dataset)
except StopIteration:
pass
return dataset
def _copy_field(self, obj, container):
if isinstance(obj, list):
new = []
for item in obj:
new.append(self._copy_field(item, container))
return new
if isinstance(obj, dict):
new = container()
for key, value in obj.items():
new[key] = self._copy_field(value, container)
return new
else:
return copy.copy(obj)
def _copy_only_fields(self, doc, fields, container):
"""Copy only the specified fields."""
if fields is None:
return self._copy_field(doc, container)
else:
if not fields:
fields = {"_id": 1}
if not isinstance(fields, dict):
fields = helpers._fields_list_to_dict(fields)
#we can pass in something like {"_id":0, "field":1}, so pull the id value out and hang on to it until later
id_value = fields.pop('_id', 1)
#other than the _id field, all fields must be either includes or excludes, this can evaluate to 0
if len(set(list(fields.values()))) > 1:
raise ValueError('You cannot currently mix including and excluding fields.')
#if we have novalues passed in, make a doc_copy based on the id_value
if len(list(fields.values())) == 0:
if id_value == 1:
doc_copy = container()
else:
doc_copy = self._copy_field(doc, container)
#if 1 was passed in as the field values, include those fields
elif list(fields.values())[0] == 1:
doc_copy = container()
for key in fields:
if key in doc:
doc_copy[key] = doc[key]
#otherwise, exclude the fields passed in
else:
doc_copy = self._copy_field(doc, container)
for key in fields:
if key in doc_copy:
del doc_copy[key]
#set the _id value if we requested it, otherwise remove it
if id_value == 0:
if '_id' in doc_copy:
del doc_copy['_id']
else:
if '_id' in doc:
doc_copy['_id'] = doc['_id']
fields['_id'] = id_value #put _id back in fields
return doc_copy
def _update_document_fields(self, doc, fields, updater):
"""Implements the $set behavior on an existing document"""
for k, v in iteritems(fields):
self._update_document_single_field(doc, k, v, updater)
def _update_document_fields_positional(self, doc, fields, spec, updater, subdocument=None):
"""Implements the $set behavior on an existing document"""
for k, v in iteritems(fields):
if '$' in k:
field_name_parts = k.split('.')
if not subdocument:
current_doc = doc
subspec = spec
for part in field_name_parts[:-1]:
if part == '$':
subspec = subspec.get('$elemMatch', subspec)
for item in current_doc:
if filter_applies(subspec, item):
current_doc = item
break
continue
new_spec = {}
for el in subspec:
if el.startswith(part):
if len(el.split(".")) > 1:
new_spec[".".join(el.split(".")[1:])] = subspec[el]
else:
new_spec = subspec[el]
subspec = new_spec
current_doc = current_doc[part]
subdocument = current_doc
updater(subdocument, field_name_parts[-1], v)
continue
# otherwise, we handle it the standard way
self._update_document_single_field(doc, k, v, updater)
return subdocument
def _update_document_single_field(self, doc, field_name, field_value, updater):
field_name_parts = field_name.split(".")
for part in field_name_parts[:-1]:
if not isinstance(doc, dict) and not isinstance(doc, list):
return # mongodb skips such cases
if isinstance(doc, list):
try:
if part == '$':
doc = doc[0]
else:
doc = doc[int(part)]
continue
except ValueError:
pass
doc = doc.setdefault(part, {})
updater(doc, field_name_parts[-1], field_value)
def _iter_documents(self, filter = None):
return (document for document in itervalues(self._documents) if filter_applies(filter, document))
def find_one(self, spec_or_id=None, *args, **kwargs):
# Allow calling find_one with a non-dict argument that gets used as
# the id for the query.
if spec_or_id is None:
spec_or_id = {}
if not isinstance(spec_or_id, collections.Mapping):
spec_or_id = {'_id':spec_or_id}
try:
return next(self.find(spec_or_id, *args, **kwargs))
except StopIteration:
return None
def find_and_modify(self, query = {}, update = None, upsert = False, sort = None, **kwargs):
remove = kwargs.get("remove", False)
if kwargs.get("new", False) and remove:
raise OperationFailure("remove and returnNew can't co-exist") # message from mongodb
if remove and update is not None:
raise ValueError("Can't do both update and remove")
old = self.find_one(query, sort=sort)
if not old:
if upsert:
old = {'_id':self.insert(query)}
else:
return None
if remove:
self.remove({"_id": old["_id"]})
else:
self.update({'_id':old['_id']}, update)
if kwargs.get('new', False):
return self.find_one({'_id':old['_id']})
return old
def save(self, to_save, manipulate = True, safe = False, **kwargs):
if not isinstance(to_save, dict):
raise TypeError("cannot save object of type %s" % type(to_save))
if "_id" not in to_save:
return self.insert(to_save)
else:
self.update({"_id": to_save["_id"]}, to_save, True,
manipulate, safe, _check_keys = True, **kwargs)
return to_save.get("_id", None)
def remove(self, spec_or_id = None, search_filter = None):
"""Remove objects matching spec_or_id from the collection."""
if search_filter is not None:
print_deprecation_warning('search_filter', 'spec_or_id')
if spec_or_id is None:
spec_or_id = search_filter if search_filter else {}
if not isinstance(spec_or_id, dict):
spec_or_id = {'_id': spec_or_id}
to_delete = list(self.find(spec = spec_or_id))
for doc in to_delete:
doc_id = doc['_id']
del self._documents[doc_id]
return {
"connectionId": self._Collection__database.connection._id,
"n": len(to_delete),
"ok": 1.0,
"err": None,
}
def count(self):
return len(self._documents)
def drop(self):
del self._documents
self._documents = {}
def ensure_index(self, key_or_list, cache_for = 300, **kwargs):
if 'unique' in kwargs and kwargs['unique']:
self._uniques.append(helpers._index_list(key_or_list))
def drop_index(self, index_or_name):
pass
def index_information(self):
return {}
def map_reduce(self, map_func, reduce_func, out, full_response=False, query=None, limit=0):
if execjs is None:
raise NotImplementedError(
"PyExecJS is required in order to run Map-Reduce. "
"Use 'pip install pyexecjs pymongo' to support Map-Reduce mock."
)
if limit == 0:
limit = None
start_time = time.clock()
out_collection = None
reduced_rows = None
full_dict = {'counts': {'input': 0,
'reduce':0,
'emit':0,
'output':0},
'timeMillis': 0,
'ok': 1.0,
'result': None}
map_ctx = execjs.compile("""
function doMap(fnc, docList) {
var mappedDict = {};
function emit(key, val) {
if (key['$oid']) {
mapped_key = '$oid' + key['$oid'];
}
else {
mapped_key = key;
}
if(!mappedDict[mapped_key]) {
mappedDict[mapped_key] = [];
}
mappedDict[mapped_key].push(val);
}
mapper = eval('('+fnc+')');
var mappedList = new Array();
for(var i=0; i<docList.length; i++) {
var thisDoc = eval('('+docList[i]+')');
var mappedVal = (mapper).call(thisDoc);
}
return mappedDict;
}
""")
reduce_ctx = execjs.compile("""
function doReduce(fnc, docList) {
var reducedList = new Array();
reducer = eval('('+fnc+')');
for(var key in docList) {
var reducedVal = {'_id': key,
'value': reducer(key, docList[key])};
reducedList.push(reducedVal);
}
return reducedList;
}
""")
doc_list = [json.dumps(doc, default=json_util.default) for doc in self.find(query)]
mapped_rows = map_ctx.call('doMap', map_func, doc_list)
reduced_rows = reduce_ctx.call('doReduce', reduce_func, mapped_rows)[:limit]
for reduced_row in reduced_rows:
if reduced_row['_id'].startswith('$oid'):
reduced_row['_id'] = ObjectId(reduced_row['_id'][4:])
reduced_rows = sorted(reduced_rows, key=lambda x: x['_id'])
if full_response:
full_dict['counts']['input'] = len(doc_list)
for key in mapped_rows.keys():
emit_count = len(mapped_rows[key])
full_dict['counts']['emit'] += emit_count
if emit_count > 1:
full_dict['counts']['reduce'] += 1
full_dict['counts']['output'] = len(reduced_rows)
if isinstance(out, (str, bytes)):
out_collection = getattr(self._Collection__database, out)
out_collection.drop()
out_collection.insert(reduced_rows)
ret_val = out_collection
full_dict['result'] = out
elif isinstance(out, SON) and out.get('replace') and out.get('db'):
# Must be of the format SON([('replace','results'),('db','outdb')])
out_db = getattr(self._Collection__database._Database__connection, out['db'])
out_collection = getattr(out_db, out['replace'])
out_collection.insert(reduced_rows)
ret_val = out_collection
full_dict['result'] = {'db': out['db'], 'collection': out['replace']}
elif isinstance(out, dict) and out.get('inline'):
ret_val = reduced_rows
full_dict['result'] = reduced_rows
else:
raise TypeError("'out' must be an instance of string, dict or bson.SON")
full_dict['timeMillis'] = int(round((time.clock() - start_time) * 1000))
if full_response:
ret_val = full_dict
return ret_val
def inline_map_reduce(self, map_func, reduce_func, full_response=False, query=None, limit=0):
return self.map_reduce(map_func, reduce_func, {'inline':1}, full_response, query, limit)
def distinct(self, key):
return self.find().distinct(key)
def group(self, key, condition, initial, reduce, finalize=None):
if execjs is None:
raise NotImplementedError(
"PyExecJS is required in order to use group. "
"Use 'pip install pyexecjs pymongo' to support group mock."
)
reduce_ctx = execjs.compile("""
function doReduce(fnc, docList) {
reducer = eval('('+fnc+')');
for(var i=0, l=docList.length; i<l; i++) {
try {
reducedVal = reducer(docList[i-1], docList[i]);
}
catch (err) {
continue;
}
}
return docList[docList.length - 1];
}
""")
ret_array = []
doc_list_copy = []
ret_array_copy = []
reduced_val = {}
doc_list = [doc for doc in self.find(condition)]
for doc in doc_list:
doc_copy = copy.deepcopy(doc)
for k in doc:
if isinstance(doc[k], ObjectId):
doc_copy[k] = str(doc[k])
if k not in key and k not in reduce:
del doc_copy[k]
for initial_key in initial:
if initial_key in doc.keys():
pass
else:
doc_copy[initial_key] = initial[initial_key]
doc_list_copy.append(doc_copy)
doc_list = doc_list_copy
for k in key:
doc_list = sorted(doc_list, key=lambda x: _resolve_key(k, x))
for k in key:
if not isinstance(k, basestring):
raise TypeError("Keys must be a list of key names, "
"each an instance of %s" % (basestring.__name__,))
for k2, group in itertools.groupby(doc_list, lambda item: item[k]):
group_list = ([x for x in group])
reduced_val = reduce_ctx.call('doReduce', reduce, group_list)
ret_array.append(reduced_val)
for doc in ret_array:
doc_copy = copy.deepcopy(doc)
for k in doc:
if k not in key and k not in initial.keys():
del doc_copy[k]
ret_array_copy.append(doc_copy)
ret_array = ret_array_copy
return ret_array
def aggregate(self, pipeline, **kwargs):
pipeline_operators = ['$project','$match','$redact','$limit','$skip','$unwind','$group','$sort','$geoNear','$out']
group_operators = ['$addToSet', '$first','$last','$max','$min','$avg','$push','$sum']
boolean_operators = ['$and','$or', '$not']
set_operators = ['$setEquals', '$setIntersection', '$setDifference', '$setUnion', '$setIsSubset', '$anyElementTrue', '$allElementsTrue']
compairison_operators = ['$cmp','$eq','$gt','$gte','$lt','$lte','$ne']
aritmetic_operators = ['$add','$divide','$mod','$multiply','$subtract']
string_operators = ['$concat','$strcasecmp','$substr','$toLower','$toUpper']
text_search_operators = ['$meta']
array_operators = ['$size']
projection_operators = ['$map', '$let', '$literal']
date_operators = ['$dayOfYear','$dayOfMonth','$dayOfWeek','$year','$month','$week','$hour','$minute','$second','$millisecond']
conditional_operators = ['$cond', '$ifNull']
out_collection = [doc for doc in self.find()]
grouped_collection = []
for expression in pipeline:
for k, v in iteritems(expression):
if k == '$match':
out_collection = [doc for doc in out_collection if filter_applies(v, doc)]
elif k == '$group':
group_func_keys = expression['$group']['_id'][1:]
for group_key in reversed(group_func_keys):
out_collection = sorted(out_collection, key=lambda x: _resolve_key(group_key, x))
for field, value in iteritems(v):
if field != '_id':
for func, key in iteritems(value):
if func == "$sum" or "$avg":
for group_key in group_func_keys:
for ret_value, group in itertools.groupby(out_collection, lambda item: item[group_key]):
doc_dict = {}
group_list = ([x for x in group])
doc_dict['_id'] = ret_value
current_val = 0
if func == "$sum":
for doc in group_list:
current_val = sum([current_val, doc[field]])
doc_dict[field] = current_val
else:
for doc in group_list:
current_val = sum([current_val, doc[field]])
avg = current_val / len(group_list)
doc_dict[field] = current_val
grouped_collection.append(doc_dict)
else:
if func in group_operators:
raise NotImplementedError(
"Although %s is a valid group operator for the aggregation pipeline, "
"%s is currently not implemented in Mongomock."
)
else:
raise NotImplementedError(
"%s is not a valid group operator for the aggregation pipeline. "
"See http://docs.mongodb.org/manual/meta/aggregation-quick-reference/ "
"for a complete list of valid operators."
)
out_collection = grouped_collection
elif k == '$sort':
sort_array = []
for x, y in v.items():
sort_array.append({x:y})
for sort_pair in reversed(sort_array):
for sortKey, sortDirection in sort_pair.items():
out_collection = sorted(out_collection, key = lambda x: _resolve_key(sortKey, x), reverse = sortDirection < 0)
elif k == '$skip':
out_collection = out_collection[v:]
elif k == '$limit':
out_collection = out_collection[:v]
elif k == '$unwind':
if not isinstance(v, basestring) and v[0] != '$':
raise ValueError("$unwind failed: exception: field path references must be prefixed with a '$' ('%s'"%str(v))
if len(v.split('.')) > 1:
raise NotImplementedError('Mongmock does not currently support nested field paths in the $unwind implementation. ("%s"'%v)
unwound_collection = []
for doc in out_collection:
array_value = doc.get(v[1:])
if array_value in (None, []):
continue
elif not isinstance(array_value, list):
raise TypeError('$unwind must specify an array field, field: "%s", value found: %s'%(str(v),str(array_value)))
for field_item in array_value:
unwound_collection.append(copy.deepcopy(doc))
unwound_collection[-1][v[1:]] = field_item
out_collection = unwound_collection
else:
if k in pipeline_operators:
raise NotImplementedError(
"Although %s is a valid operator for the aggregation pipeline, "
"%s is currently not implemented in Mongomock."
)
else:
raise NotImplementedError(
"%s is not a valid operator for the aggregation pipeline. "
"See http://docs.mongodb.org/manual/meta/aggregation-quick-reference/ "
"for a complete list of valid operators."
)
return {'ok':1.0, 'result':out_collection}
def _resolve_key(key, doc):
return next(iter(iter_key_candidates(key, doc)), NOTHING)
class Cursor(object):
def __init__(self, collection, dataset_factory, limit=0):
super(Cursor, self).__init__()
self.collection = collection
self._factory = dataset_factory
self._dataset = self._factory()
self._limit = limit if limit != 0 else None #pymongo limit defaults to 0, returning everything
self._skip = None
def __iter__(self):
return self
def clone(self):
return Cursor(self.collection, self._factory, self._limit)
def __next__(self):
if self._skip:
for i in range(self._skip):
next(self._dataset)
self._skip = None
if self._limit is not None and self._limit <= 0:
raise StopIteration()
if self._limit is not None:
self._limit -= 1
return next(self._dataset)
next = __next__
def sort(self, key_or_list, direction = None):
if direction is None:
direction = 1
if isinstance(key_or_list, (tuple, list)):
for sortKey, sortDirection in reversed(key_or_list):
self._dataset = iter(sorted(self._dataset, key = lambda x: _resolve_key(sortKey, x), reverse = sortDirection < 0))
else:
self._dataset = iter(sorted(self._dataset, key = lambda x: _resolve_key(key_or_list, x), reverse = direction < 0))
return self
def count(self, with_limit_and_skip=False):
arr = [x for x in self._dataset]
count = len(arr)
if with_limit_and_skip:
if self._skip:
count -= self._skip
if self._limit and count > self._limit:
count = self._limit
self._dataset = iter(arr)
return count
def skip(self, count):
self._skip = count
return self
def limit(self, count):
self._limit = count
return self
def batch_size(self, count):
return self
def close(self):
pass
def distinct(self, key):
if not isinstance(key, basestring):
raise TypeError('cursor.distinct key must be a string')
unique = set()
for x in iter(self._dataset):
value = _resolve_key(key, x)
if value == NOTHING: continue
unique.update(value if isinstance(value, (tuple, list)) else [value])
return list(unique)
def __getitem__(self, index):
arr = [x for x in self._dataset]
count = len(arr)
self._dataset = iter(arr)
return arr[index]
def _set_updater(doc, field_name, value):
if isinstance(value, (tuple, list)):
value = copy.deepcopy(value)
if isinstance(doc, dict):
doc[field_name] = value
def _inc_updater(doc, field_name, value):
if isinstance(doc, dict):
doc[field_name] = doc.get(field_name, 0) + value
def _sum_updater(doc, field_name, current, result):
if isinstance(doc, dict):
result = current + doc.get[field_name, 0]
return result
| chartbeat-labs/mongomock | mongomock/collection.py | Python | bsd-3-clause | 39,269 |
from binding import *
from .Value import ValueSymbolTable, Value
from .ADT.StringRef import StringRef
@ValueSymbolTable
class ValueSymbolTable:
if LLVM_VERSION >= (3, 3):
_include_ = 'llvm/IR/ValueSymbolTable.h'
else:
_include_ = 'llvm/ValueSymbolTable.h'
new = Constructor()
delete = Destructor()
lookup = Method(ptr(Value), cast(str, StringRef))
empty = Method(cast(Bool, bool))
size = Method(cast(Unsigned, int))
dump = Method(Void)
| llvmpy/llvmpy | llvmpy/src/ValueSymbolTable.py | Python | bsd-3-clause | 486 |
import logging
import emission.storage.timeseries.abstract_timeseries as esta
import emission.core.wrapper.entry as ecwe
def get_last_entry(user_id, time_query, config_key):
user_ts = esta.TimeSeries.get_time_series(user_id)
# get the list of overrides for this time range. This should be non zero
# only if there has been an override since the last run, which needs to be
# saved back into the cache.
config_overrides = list(user_ts.find_entries([config_key], time_query))
logging.debug("Found %d user overrides for user %s" % (len(config_overrides), user_id))
if len(config_overrides) == 0:
logging.warning("No user defined overrides for %s, early return" % user_id)
return (None, None)
else:
# entries are sorted by the write_ts, we can take the last value
coe = ecwe.Entry(config_overrides[-1])
logging.debug("last entry is %s" % coe)
return (coe.data, coe.metadata.write_ts)
| yw374cornell/e-mission-server | emission/analysis/configs/config_utils.py | Python | bsd-3-clause | 967 |
# $Filename$
# $Authors$
#
# Last Changed: $Date$ $Committer$ $Revision-Id$
#
# Copyright (c) 2003-2011, German Aerospace Center (DLR)
# All rights reserved.
#
#Redistribution and use in source and binary forms, with or without
#modification, are permitted provided that the following conditions are
#met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of the German Aerospace Center nor the names of
# its contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
#LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
#A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
#OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
#SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
#LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
#DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
#THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
#(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
#OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Implements privileges.
"""
__version__ = "$Revision-Id:$" | DLR-SC/DataFinder | src/datafinder/core/item/privileges/__init__.py | Python | bsd-3-clause | 1,769 |
import torch
from .Criterion import Criterion
from .utils import recursiveResizeAs, recursiveFill, recursiveAdd
class ParallelCriterion(Criterion):
def __init__(self, repeatTarget=False):
super(ParallelCriterion, self).__init__()
self.criterions = []
self.weights = []
self.gradInput = []
self.repeatTarget = repeatTarget
def add(self, criterion, weight=1):
self.criterions.append(criterion)
self.weights.append(weight)
return self
def updateOutput(self, input, target):
self.output = 0
for i, criterion in enumerate(self.criterions):
current_target = target if self.repeatTarget else target[i]
self.output += self.weights[i] * criterion.updateOutput(input[i], current_target)
return self.output
def updateGradInput(self, input, target):
self.gradInput = recursiveResizeAs(self.gradInput, input)[0]
recursiveFill(self.gradInput, 0)
for i, criterion in enumerate(self.criterions):
current_target = target if self.repeatTarget else target[i]
recursiveAdd(self.gradInput[i], self.weights[i], criterion.updateGradInput(input[i], current_target))
return self.gradInput
def type(self, type=None, tensorCache=None):
self.gradInput = []
return super(ParallelCriterion, self).type(type, tensorCache)
| RPGOne/Skynet | pytorch-master/torch/legacy/nn/ParallelCriterion.py | Python | bsd-3-clause | 1,404 |
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT
from __future__ import unicode_literals
from ..epi import ApplyTOPUP
def test_ApplyTOPUP_inputs():
input_map = dict(args=dict(argstr='%s',
),
datatype=dict(argstr='-d=%s',
),
encoding_file=dict(argstr='--datain=%s',
mandatory=True,
),
environ=dict(nohash=True,
usedefault=True,
),
ignore_exception=dict(nohash=True,
usedefault=True,
),
in_files=dict(argstr='--imain=%s',
mandatory=True,
sep=',',
),
in_index=dict(argstr='--inindex=%s',
sep=',',
),
in_topup_fieldcoef=dict(argstr='--topup=%s',
copyfile=False,
requires=['in_topup_movpar'],
),
in_topup_movpar=dict(copyfile=False,
requires=['in_topup_fieldcoef'],
),
interp=dict(argstr='--interp=%s',
),
method=dict(argstr='--method=%s',
),
out_corrected=dict(argstr='--out=%s',
name_source=['in_files'],
name_template='%s_corrected',
),
output_type=dict(),
terminal_output=dict(deprecated='1.0.0',
nohash=True,
),
)
inputs = ApplyTOPUP.input_spec()
for key, metadata in list(input_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(inputs.traits()[key], metakey) == value
def test_ApplyTOPUP_outputs():
output_map = dict(out_corrected=dict(),
)
outputs = ApplyTOPUP.output_spec()
for key, metadata in list(output_map.items()):
for metakey, value in list(metadata.items()):
assert getattr(outputs.traits()[key], metakey) == value
| mick-d/nipype | nipype/interfaces/fsl/tests/test_auto_ApplyTOPUP.py | Python | bsd-3-clause | 1,586 |
from django.test import TestCase
from manager.models import Page
from datetime import datetime, timedelta
from django.utils import timezone
class PageTestCase(TestCase):
def setUp(self):
now = timezone.now()
Page.objects.create(url="testurl", description="test description")
def test_regular_page_active(self):
"""Page with no pause or time/date range is active."""
page = Page.objects.get(url="/testurl")
self.assertFalse(page.is_paused())
self.assertTrue(page.is_active())
def test_paused_page_not_active(self):
"""Page that has been paused is not active."""
page = Page.objects.get(url="/testurl")
page.pause_at = timezone.now().replace(hour=12)
current_time = timezone.now().replace(hour=13)
self.assertTrue(page.is_paused(current_time))
self.assertFalse(page.is_active(current_time))
def test_previously_paused_page_active(self):
"""Page that has is not paused but has been in the past is active."""
page = Page.objects.get(url="/testurl")
page.paused_at = timezone.now() - timedelta(hours=48)
self.assertFalse(page.is_paused())
self.assertTrue(page.is_active())
page.paused_at = timezone.now()
morning = timezone.now().replace(hour=6)
self.assertFalse(page.is_paused(morning))
self.assertTrue(page.is_active(morning))
def test_page_active_time_of_day(self):
"""Page has certain times of day it should be visible."""
page = Page.objects.get(url="/testurl")
now = timezone.now().replace(hour=12)
# Default page has no times -> active
self.assertTrue(page.is_active(now))
# Set start time in the future
page.active_time_start = now.replace(hour=13).time()
self.assertFalse(page.is_active(now))
# Set time to be past start time
now = now.replace(hour=14)
self.assertTrue(page.is_active(now))
# Set end time in the future, still active
page.active_time_end = now.replace(hour=15).time()
self.assertTrue(page.is_active(now))
# Set time to be past end-time -> inactive
now = now.replace(hour=16)
self.assertFalse(page.is_active(now))
# Set start time in the future but bigger than end-time
page.active_time_start = now.replace(hour=17).time()
self.assertFalse(page.is_active(now))
# Time bigger than start time in the evening
now = now.replace(hour=19)
self.assertTrue(page.is_active(now))
def test_page_date_range(self):
"""Page has certains dates it should be visible."""
page = Page.objects.get(url="/testurl")
now = timezone.now()
today = now.date()
page.active_date_start = today
self.assertTrue(page.is_active(now))
page.active_date_start = today + timedelta(days=1)
self.assertFalse(page.is_active(now))
page.active_date_start = today - timedelta(days=7)
page.active_date_end = today - timedelta(days=3)
self.assertFalse(page.is_active(now))
def test_page_weekdays(self):
"""Page is active on certain weekdays"""
page = Page.objects.get(url="/testurl")
now = datetime(2014, 4, 28, 16, 53) # Monday
page.active_date_start = now.date()
self.assertTrue(page.is_active(now))
page.monday = False
self.assertFalse(page.is_active(now))
now = now + timedelta(days=1)
self.assertTrue(page.is_active(now))
| olkku/tf-info | manager/tests.py | Python | bsd-3-clause | 3,567 |
from __future__ import absolute_import, unicode_literals
######################
# MEZZANINE SETTINGS #
######################
# The following settings are already defined with default values in
# the ``defaults.py`` module within each of Mezzanine's apps, but are
# common enough to be put here, commented out, for convenient
# overriding. Please consult the settings documentation for a full list
# of settings Mezzanine implements:
# http://mezzanine.jupo.org/docs/configuration.html#default-settings
# Controls the ordering and grouping of the admin menu.
#
# ADMIN_MENU_ORDER = (
# ("Content", ("pages.Page", "blog.BlogPost",
# "generic.ThreadedComment", ("Media Library", "fb_browse"),)),
# ("Site", ("sites.Site", "redirects.Redirect", "conf.Setting")),
# ("Users", ("auth.User", "auth.Group",)),
# )
# A three item sequence, each containing a sequence of template tags
# used to render the admin dashboard.
#
# DASHBOARD_TAGS = (
# ("blog_tags.quick_blog", "mezzanine_tags.app_list"),
# ("comment_tags.recent_comments",),
# ("mezzanine_tags.recent_actions",),
# )
# A sequence of templates used by the ``page_menu`` template tag. Each
# item in the sequence is a three item sequence, containing a unique ID
# for the template, a label for the template, and the template path.
# These templates are then available for selection when editing which
# menus a page should appear in. Note that if a menu template is used
# that doesn't appear in this setting, all pages will appear in it.
# PAGE_MENU_TEMPLATES = (
# (1, "Top navigation bar", "pages/menus/dropdown.html"),
# (2, "Left-hand tree", "pages/menus/tree.html"),
# (3, "Footer", "pages/menus/footer.html"),
# )
# A sequence of fields that will be injected into Mezzanine's (or any
# library's) models. Each item in the sequence is a four item sequence.
# The first two items are the dotted path to the model and its field
# name to be added, and the dotted path to the field class to use for
# the field. The third and fourth items are a sequence of positional
# args and a dictionary of keyword args, to use when creating the
# field instance. When specifying the field class, the path
# ``django.models.db.`` can be omitted for regular Django model fields.
#
# EXTRA_MODEL_FIELDS = (
# (
# # Dotted path to field.
# "mezzanine.blog.models.BlogPost.image",
# # Dotted path to field class.
# "somelib.fields.ImageField",
# # Positional args for field class.
# ("Image",),
# # Keyword args for field class.
# {"blank": True, "upload_to": "blog"},
# ),
# # Example of adding a field to *all* of Mezzanine's content types:
# (
# "mezzanine.pages.models.Page.another_field",
# "IntegerField", # 'django.db.models.' is implied if path is omitted.
# ("Another name",),
# {"blank": True, "default": 1},
# ),
# )
# Setting to turn on featured images for blog posts. Defaults to False.
#
# BLOG_USE_FEATURED_IMAGE = True
# If True, the south application will be automatically added to the
# INSTALLED_APPS setting.
USE_SOUTH = True
########################
# MAIN DJANGO SETTINGS #
########################
# People who get code error notifications.
# In the format (('Full Name', 'email@example.com'),
# ('Full Name', 'anotheremail@example.com'))
ADMINS = (
('administrator', 'administrator@example.com'),
)
MANAGERS = ADMINS
# Hosts/domain names that are valid for this site; required if DEBUG is False
# See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts
ALLOWED_HOSTS = ['127.0.0.1']
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = "Europe/Rome"
# If you set this to True, Django will use timezone-aware datetimes.
USE_TZ = True
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = "en"
# Supported languages
_ = lambda s: s
LANGUAGES = (
('en', _('English')),
)
# A boolean that turns on/off debug mode. When set to ``True``, stack traces
# are displayed for error pages. Should always be set to ``False`` in
# production. Best set to ``True`` in local_settings.py
DEBUG = True
# Whether a user's session cookie expires when the Web browser is closed.
SESSION_EXPIRE_AT_BROWSER_CLOSE = True
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = False
# Tuple of IP addresses, as strings, that:
# * See debug comments, when DEBUG is true
# * Receive x-headers
INTERNAL_IPS = ("127.0.0.1",)
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
"django.template.loaders.filesystem.Loader",
"django.template.loaders.app_directories.Loader",
)
AUTHENTICATION_BACKENDS = ("mezzanine.core.auth_backends.MezzanineBackend",)
# List of finder classes that know how to find static files in
# various locations.
STATICFILES_FINDERS = (
"django.contrib.staticfiles.finders.FileSystemFinder",
"django.contrib.staticfiles.finders.AppDirectoriesFinder",
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
# The numeric mode to set newly-uploaded files to. The value should be
# a mode you'd pass directly to os.chmod.
FILE_UPLOAD_PERMISSIONS = 0o644
#############
# DATABASES #
#############
DATABASES = {
"default": {
# Add "postgresql_psycopg2", "mysql", "sqlite3" or "oracle".
"ENGINE": "django.db.backends.postgresql_psycopg2",
# DB name or path to database file if using sqlite3.
"NAME": "mezzanine_mailchimper",
# Not used with sqlite3.
"USER": "",
# Not used with sqlite3.
"PASSWORD": "",
# Set to empty string for localhost. Not used with sqlite3.
"HOST": "localhost",
# Set to empty string for default. Not used with sqlite3.
"PORT": "",
"ATOMIC_REQUESTS": True,
}
}
#########
# PATHS #
#########
import os
# Full filesystem path to the project.
PROJECT_ROOT = os.path.dirname(os.path.abspath(__file__))
# Name of the directory for the project.
PROJECT_DIRNAME = PROJECT_ROOT.split(os.sep)[-1]
# Every cache key will get prefixed with this value - here we set it to
# the name of the directory the project is in to try and use something
# project specific.
CACHE_MIDDLEWARE_KEY_PREFIX = PROJECT_DIRNAME
# URL prefix for static files.
# Example: "http://media.lawrence.com/static/"
STATIC_URL = "/static/"
# Absolute path to the directory static files should be collected to.
# Don't put anything in this directory yourself; store your static files
# in apps' "static/" subdirectories and in STATICFILES_DIRS.
# Example: "/home/media/media.lawrence.com/static/"
STATIC_ROOT = os.path.join(PROJECT_ROOT, STATIC_URL.strip("/"))
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash.
# Examples: "http://media.lawrence.com/media/", "http://example.com/media/"
MEDIA_URL = STATIC_URL + "media/"
# Absolute filesystem path to the directory that will hold user-uploaded files.
# Example: "/home/media/media.lawrence.com/media/"
MEDIA_ROOT = os.path.join(PROJECT_ROOT, *MEDIA_URL.strip("/").split("/"))
# Package/module name to import the root urlpatterns from for the project.
ROOT_URLCONF = "%s.urls" % PROJECT_DIRNAME
# Put strings here, like "/home/html/django_templates"
# or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
TEMPLATE_DIRS = (os.path.join(PROJECT_ROOT, "templates"),)
###########
# LOGGING #
###########
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse',
},
'require_debug_true': {
'()': 'django.utils.log.RequireDebugTrue',
},
},
'handlers': {
'console': {
'level': 'INFO',
'filters': ['require_debug_true'],
'class': 'logging.StreamHandler',
},
'null': {
'class': 'django.utils.log.NullHandler',
},
'mail_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
# 'email_backend': 'django.core.mail.backends.console.'
# 'EmailBackend',
}
},
'loggers': {
'django': {
'handlers': ['console'],
},
'django.request': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': True,
},
'django.security': {
'handlers': ['mail_admins'],
'level': 'ERROR',
'propagate': False,
},
'py.warnings': {
'handlers': ['console'],
},
}
}
################
# APPLICATIONS #
################
INSTALLED_APPS = (
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.redirects",
"django.contrib.sessions",
"django.contrib.sites",
"django.contrib.sitemaps",
"django.contrib.staticfiles",
"mailchimper",
"mezzanine.boot",
"mezzanine.conf",
"mezzanine.core",
"mezzanine.generic",
"mezzanine.pages",
"mezzanine.accounts",
# "django_pdb",
"crispy_forms",
# "functional_tests",
)
# List of processors used by RequestContext to populate the context.
# Each one should be a callable that takes the request object as its
# only parameter and returns a dictionary to add to the context.
TEMPLATE_CONTEXT_PROCESSORS = (
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
"django.core.context_processors.debug",
"django.core.context_processors.i18n",
"django.core.context_processors.static",
"django.core.context_processors.media",
"django.core.context_processors.request",
"django.core.context_processors.tz",
"mezzanine.conf.context_processors.settings",
)
# List of middleware classes to use. Order is important; in the request phase,
# these middleware classes will be applied in the order given, and in the
# response phase the middleware will be applied in reverse order.
MIDDLEWARE_CLASSES = (
"mezzanine.core.middleware.UpdateCacheMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.locale.LocaleMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"mezzanine.core.request.CurrentRequestMiddleware",
"mezzanine.core.middleware.RedirectFallbackMiddleware",
"mezzanine.core.middleware.TemplateForDeviceMiddleware",
"mezzanine.core.middleware.TemplateForHostMiddleware",
"mezzanine.core.middleware.AdminLoginInterfaceSelectorMiddleware",
"mezzanine.core.middleware.SitePermissionMiddleware",
# Uncomment the following if using any of the SSL settings:
# "mezzanine.core.middleware.SSLRedirectMiddleware",
"mezzanine.pages.middleware.PageMiddleware",
"mezzanine.core.middleware.FetchFromCacheMiddleware",
# "django_pdb.middleware.PdbMiddleware",
)
# Store these package names here as they may change in the future since
# at the moment we are using custom forks of them.
PACKAGE_NAME_FILEBROWSER = "filebrowser_safe"
PACKAGE_NAME_GRAPPELLI = "grappelli_safe"
#########################
# OPTIONAL APPLICATIONS #
#########################
# These will be added to ``INSTALLED_APPS``, only if available.
OPTIONAL_APPS = (
"debug_toolbar",
"django_extensions",
"compressor",
PACKAGE_NAME_FILEBROWSER,
PACKAGE_NAME_GRAPPELLI,
)
DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False}
###################
# DEPLOY SETTINGS #
###################
# These settings are used by the default fabfile.py provided.
# Check fabfile.py for defaults.
# FABRIC = {
# "SSH_USER": "", # SSH username
# "SSH_PASS": "", # SSH password (consider key-based authentication)
# "SSH_KEY_PATH": "", # Local path to SSH key file, for key-based auth
# "HOSTS": [], # List of hosts to deploy to
# "VIRTUALENV_HOME": "", # Absolute remote path for virtualenvs
# "PROJECT_NAME": "", # Unique identifier for project
# "REQUIREMENTS_PATH": "", # Path to pip requirements, relative to project
# "GUNICORN_PORT": 8000, # Port gunicorn will listen on
# "LOCALE": "en_US.UTF-8", # Should end with ".UTF-8"
# "LIVE_HOSTNAME": "www.example.com", # Host for public site.
# "REPO_URL": "", # Git or Mercurial remote repo URL for the project
# "DB_PASS": "", # Live database password
# "ADMIN_PASS": "", # Live admin user password
# "SECRET_KEY": SECRET_KEY,
# "NEVERCACHE_KEY": NEVERCACHE_KEY,
# }
##################
# LOCAL SETTINGS #
##################
# Allow any settings to be defined in local_settings.py which should be
# ignored in your version control system allowing for settings to be
# defined per machine.
try:
from local_settings import *
except ImportError:
pass
# Make these unique, and don't share it with anybody.
SECRET_KEY = "%(SECRET_KEY)s"
NEVERCACHE_KEY = "%(NEVERCACHE_KEY)s"
CRISPY_TEMPLATE_PACK = 'bootstrap'
# for functional tests
INSTALLED_APPS = list(INSTALLED_APPS) + [
PACKAGE_NAME_GRAPPELLI, PACKAGE_NAME_FILEBROWSER,
'django.contrib.redirects']
from django import get_version
if int(get_version().split('.')[1]) <= 5:
TEST_RUNNER = 'discover_runner.DiscoverRunner'
TEST_DISCOVER_PATTERN = "test_*.py"
else:
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
####################
# DYNAMIC SETTINGS #
####################
# set_dynamic_settings() will rewrite globals based on what has been
# defined so far, in order to provide some better defaults where
# applicable. We also allow this settings module to be imported
# without Mezzanine installed, as the case may be when using the
# fabfile, where setting the dynamic settings below isn't strictly
# required.
try:
from mezzanine.utils.conf import set_dynamic_settings
except ImportError:
pass
else:
set_dynamic_settings(globals())
| simodalla/mezzanine_mailchimper | project_template/settings.py | Python | bsd-3-clause | 14,784 |
#!/usr/bin/env python
# Software License Agreement (BSD License)
#
# Copyright (c) 2014-2015, Dataspeed Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of Dataspeed Inc. nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import rospy
import re
match_link = "(.*)<link[^>]*name\s*=\s*\"pedestal\"[^>]*>.*?[^<]<\/link>(.*)"
match_joint = "(.*)<joint[^>]*name\s*=\s*\"pedestal_fixed\"[^>]*>.*?[^<]<\/joint>(.*)"
if __name__ == '__main__':
try:
rospy.init_node('urdf_remove_pedestal', anonymous=True)
param_src = rospy.get_param('~param_src', "/robot_description")
param_dest = rospy.get_param('~param_dest', "/robot_description_mod")
urdf = rospy.get_param(param_src, "")
changed = False
if urdf:
obj = re.match(match_link, urdf, re.S)
if obj:
urdf = obj.group(1) + obj.group(2)
changed = True
rospy.loginfo("Removed link 'pedestal'")
else:
rospy.logwarn("Failed to find link 'pedestal'")
obj = re.match(match_joint, urdf, re.S)
if obj:
urdf = obj.group(1) + obj.group(2)
changed = True
rospy.loginfo("Removed joint 'pedestal_fixed'")
else:
rospy.logwarn("Failed to find joint 'pedestal_fixed'")
rospy.set_param(param_dest, urdf)
if changed:
rospy.loginfo("Updated parameter '%s'", param_dest)
else:
rospy.loginfo("Copied parameter '%s' to '%s'", param_src, param_dest)
else:
rospy.logwarn("Parameter '%s' not found", param_src)
except rospy.ROSInterruptException: pass
| cmsc421/mobility_base_tools | scripts/urdf_remove_pedestal.py | Python | bsd-3-clause | 3,158 |
def extractFinebymetranslationsWordpressCom(item):
'''
Parser for 'finebymetranslations.wordpress.com'
'''
vol, chp, frag, postfix = extractVolChapterFragmentPostfix(item['title'])
if not (chp or vol) or "preview" in item['title'].lower():
return None
tagmap = [
('Death Progress Bar', 'Death Progress Bar', 'translated'),
('PRC', 'PRC', 'translated'),
('Loiterous', 'Loiterous', 'oel'),
]
for tagname, name, tl_type in tagmap:
if tagname in item['tags']:
return buildReleaseMessageWithType(item, name, vol, chp, frag=frag, postfix=postfix, tl_type=tl_type)
return False | fake-name/ReadableWebProxy | WebMirror/management/rss_parser_funcs/feed_parse_extractFinebymetranslationsWordpressCom.py | Python | bsd-3-clause | 666 |
import json
import csv
teamfolder = "teamdata/out/"
with open('combined.json', 'r') as jsonfile:
data = json.load(jsonfile)
for yeardata in data:
year = yeardata['year']
print ""
print "NEW YEAR " + str(year)
for team in yeardata['teams']:
teamname = team['team']
print "Doing team " + teamname
team['info'] = { "PTS/G": team['info']['PTS/G'], "FG%": team['info']['FG%']}
team['opponent'] = { "PTS/G": team['opponent']['PTS/G']}
team['misc'] = {"Attendance": team['misc']['Attendance'], "Age": team['misc']['Age'], "ORtg": team['misc']['ORtg'], "DRtg": team['misc']['DRtg']}
newplayers = []
totalper = 0
for player in team['players']:
if player['advanced']['G'] > 5:
del player['pergame']
del player['perminute']
player['perposs'] = { "ORtg": player['perposs']['ORtg'], "DRtg": player['perposs']['DRtg'] }
player['totals'] = { "Age": player['totals']['Age'], "eFG%" : player['totals']['eFG%'] }
player['advanced'] = { "PER": player['advanced']['PER'] }
totalper += player['advanced']['PER']
newplayers.append(player)
team['players'] = newplayers
team['totalper'] = totalper
with open('removed.json', 'w') as outjsonfile:
json.dump(data, outjsonfile, indent=4)
| Tankske/InfoVisNBA | scraper/remove.py | Python | bsd-3-clause | 1,393 |
#!/usr/bin/env python
from distutils.core import setup
setup(
name = 'pydouban',
version = '1.0.0',
description = 'Lightweight Python Douban API Library',
author = 'Marvour',
author_email = 'marvour@gmail.com',
license = 'BSD License',
url = 'http://i.shiao.org/a/pydouban',
packages = ['pydouban'],
)
| lepture/pydouban | setup.py | Python | bsd-3-clause | 336 |
# -*- coding: UTF-8 -*-
# YaBlog
# (c) Regis FLORET
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the <organization> nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL Regis FLORET BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
| regisf/yablog | blog/templatetags/__init__.py | Python | bsd-3-clause | 1,525 |
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.models import Group
def group_required(names, login_url=None):
"""
Checks if the user is a member of a particular group (or at least one
group from the list)
"""
if not hasattr(names,'__iter__'):
names = [names]
return user_passes_test(lambda u: u.groups.filter(name__in=names),
login_url=login_url)
| solex/django-odesk | django_odesk/auth/decorators.py | Python | bsd-3-clause | 446 |
from unittest import TestCase
from tcontrol.discretization import c2d
from ..transferfunction import tf
from ..model_conversion import *
from ..statespace import StateSpace
import numpy as np
from .tools.test_utility import assert_ss_equal
class TestDiscretization(TestCase):
def setUp(self):
self.s1 = tf([1], [1, 0, 1])
self.zoh = tf([0.4597, 0.4597], [1, 1.0806, 1], dt=1)
self.ss = tf2ss(tf([1], [1, 0, 1]))
def test_c2d_zoh(self):
d_sys = c2d(self.s1, 1, 'zoh')
self.assertLessEqual(np.max(np.abs(d_sys.num - self.zoh.num)), 1e-4)
def test_c2d_foh(self):
a = c2d(self.ss, 1, 'foh')
b = StateSpace([[0.540302, 0.841471], [-0.841471, 0.540302]],
[[0.773644], [0.49675]],
[[1, 0]], [[0.158529]], dt=1)
assert_ss_equal(a, b)
def test_c2d_tustin(self):
d_sys = c2d(self.s1, 1, 'tustin')
error = np.abs(d_sys.num - np.array([0.2, 0.4, 0.2]))
self.assertLessEqual(np.max(error), 1e-4)
def test_c2d_matched(self):
d_sys = c2d(self.s1, 1, 'matched')
error = np.abs(d_sys.num - np.array([0.2298, 0.4597, 0.2298]))
self.assertLessEqual(np.max(error), 1e-4)
| DaivdZhang/tinyControl | tcontrol/tests/test_discretization.py | Python | bsd-3-clause | 1,236 |
import torch
import torch.nn as nn
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super().__init__()
self.gamma = nn.Parameter(torch.ones(features))
self.beta = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
| isaachenrion/jets | src/architectures/utils/layer_norm.py | Python | bsd-3-clause | 446 |
"""
WSGI config for uptee project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "uptee.settings")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| upTee/upTee | uptee/wsgi.py | Python | bsd-3-clause | 1,132 |
#---------------------------------
#Joseph Boyd - joseph.boyd@epfl.ch
#---------------------------------
import os ; import sys ; import pickle
def main():
num_partitions = 8
unique_users = {}
print "Merging..."
for filename in os.listdir("."):
if filename[filename.rfind('.')+1:] == 'pickle':
f = open(filename, 'rb')
users = pickle.load(f)
f.close()
print len(users)
for user in users:
unique_users[user['screen_name']] = user
print "Unique users: %s"%(len(unique_users))
print "Partitioning..."
partition_size = len(unique_users) / num_partitions
for i in range(num_partitions):
f_unique_users = open('outputs/%s.pickle'%(i), 'wb')
pickle.dump(unique_users.values()[i*partition_size:(i+1)*partition_size], f_unique_users)
f_unique_users.close()
if __name__ == '__main__':
main()
| FAB4D/humanitas | data_collection/social_media/twitter/merge.py | Python | bsd-3-clause | 933 |
# -*- coding: utf-8 -*-
"""
Copyright (C) 2015, MuChu Hsu
Contributed by Muchu Hsu (muchu1983@gmail.com)
This file is part of BSD license
<https://opensource.org/licenses/BSD-3-Clause>
"""
import unittest
import logging
from cameo.spiderForPEDAILY import SpiderForPEDAILY
"""
測試 抓取 PEDAILY
"""
class SpiderForPEDAILYTest(unittest.TestCase):
#準備
def setUp(self):
logging.basicConfig(level=logging.INFO)
self.spider = SpiderForPEDAILY()
self.spider.initDriver()
#收尾
def tearDown(self):
self.spider.quitDriver()
"""
#測試抓取 index page
def test_downloadIndexPage(self):
logging.info("SpiderForPEDAILYTest.test_downloadIndexPage")
self.spider.downloadIndexPage()
#測試抓取 category page
def test_downloadCategoryPage(self):
logging.info("SpiderForPEDAILYTest.test_downloadCategoryPage")
self.spider.downloadCategoryPage()
"""
#測試抓取 news page
def test_downloadNewsPage(self):
logging.info("SpiderForPEDAILYTest.test_downloadNewsPage")
self.spider.downloadNewsPage(strCategoryName=None)
#測試開始
if __name__ == "__main__":
unittest.main(exit=False)
| muchu1983/104_cameo | test/unit/test_spiderForPEDAILY.py | Python | bsd-3-clause | 1,237 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 30, transform = "None", sigma = 0.0, exog_count = 20, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_None/trend_MovingAverage/cycle_30/ar_12/test_artificial_32_None_MovingAverage_30_12_20.py | Python | bsd-3-clause | 264 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Video2013.created'
db.add_column('videos_video2013', 'created',
self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(2013, 2, 5, 0, 0), auto_now_add=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Video2013.created'
db.delete_column('videos_video2013', 'created')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'videos.award': {
'Meta': {'object_name': 'Award'},
'award_type': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'preview': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'blank': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'video': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['videos.Video2012']", 'null': 'True', 'blank': 'True'})
},
'videos.video2012': {
'Meta': {'object_name': 'Video2012'},
'bitly_link_db': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'category': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2012, 2, 28, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'judge_mark': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'region': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'shortlink': ('django.db.models.fields.CharField', [], {'max_length': '32', 'blank': 'True'}),
'state': ('django.db.models.fields.CharField', [], {'default': "'unsent'", 'max_length': '10'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'upload_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200'}),
'user_country': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'user_email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'user_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100'}),
'views': ('django.db.models.fields.BigIntegerField', [], {'default': '0'}),
'votes': ('django.db.models.fields.BigIntegerField', [], {'default': '0'})
},
'videos.video2013': {
'Meta': {'object_name': 'Video2013'},
'approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2013, 2, 5, 0, 0)', 'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'processed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"}),
'user_notified': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'vimeo_id': ('django.db.models.fields.IntegerField', [], {})
}
}
complete_apps = ['videos'] | mozilla/firefox-flicks | flicks/videos/migrations/0020_auto__add_field_video2013_created.py | Python | bsd-3-clause | 7,284 |
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Index
import pandas._testing as tm
@pytest.mark.parametrize(
"interpolation", ["linear", "lower", "higher", "nearest", "midpoint"]
)
@pytest.mark.parametrize(
"a_vals,b_vals",
[
# Ints
([1, 2, 3, 4, 5], [5, 4, 3, 2, 1]),
([1, 2, 3, 4], [4, 3, 2, 1]),
([1, 2, 3, 4, 5], [4, 3, 2, 1]),
# Floats
([1.0, 2.0, 3.0, 4.0, 5.0], [5.0, 4.0, 3.0, 2.0, 1.0]),
# Missing data
([1.0, np.nan, 3.0, np.nan, 5.0], [5.0, np.nan, 3.0, np.nan, 1.0]),
([np.nan, 4.0, np.nan, 2.0, np.nan], [np.nan, 4.0, np.nan, 2.0, np.nan]),
# Timestamps
(
list(pd.date_range("1/1/18", freq="D", periods=5)),
list(pd.date_range("1/1/18", freq="D", periods=5))[::-1],
),
# All NA
([np.nan] * 5, [np.nan] * 5),
],
)
@pytest.mark.parametrize("q", [0, 0.25, 0.5, 0.75, 1])
def test_quantile(interpolation, a_vals, b_vals, q):
if interpolation == "nearest" and q == 0.5 and b_vals == [4, 3, 2, 1]:
pytest.skip(
"Unclear numpy expectation for nearest result with equidistant data"
)
a_expected = pd.Series(a_vals).quantile(q, interpolation=interpolation)
b_expected = pd.Series(b_vals).quantile(q, interpolation=interpolation)
df = DataFrame(
{"key": ["a"] * len(a_vals) + ["b"] * len(b_vals), "val": a_vals + b_vals}
)
expected = DataFrame(
[a_expected, b_expected], columns=["val"], index=Index(["a", "b"], name="key")
)
result = df.groupby("key").quantile(q, interpolation=interpolation)
tm.assert_frame_equal(result, expected)
def test_quantile_array():
# https://github.com/pandas-dev/pandas/issues/27526
df = DataFrame({"A": [0, 1, 2, 3, 4]})
result = df.groupby([0, 0, 1, 1, 1]).quantile([0.25])
index = pd.MultiIndex.from_product([[0, 1], [0.25]])
expected = DataFrame({"A": [0.25, 2.50]}, index=index)
tm.assert_frame_equal(result, expected)
df = DataFrame({"A": [0, 1, 2, 3], "B": [4, 5, 6, 7]})
index = pd.MultiIndex.from_product([[0, 1], [0.25, 0.75]])
result = df.groupby([0, 0, 1, 1]).quantile([0.25, 0.75])
expected = DataFrame(
{"A": [0.25, 0.75, 2.25, 2.75], "B": [4.25, 4.75, 6.25, 6.75]}, index=index
)
tm.assert_frame_equal(result, expected)
def test_quantile_array2():
# https://github.com/pandas-dev/pandas/pull/28085#issuecomment-524066959
df = DataFrame(
np.random.RandomState(0).randint(0, 5, size=(10, 3)), columns=list("ABC")
)
result = df.groupby("A").quantile([0.3, 0.7])
expected = DataFrame(
{
"B": [0.9, 2.1, 2.2, 3.4, 1.6, 2.4, 2.3, 2.7, 0.0, 0.0],
"C": [1.2, 2.8, 1.8, 3.0, 0.0, 0.0, 1.9, 3.1, 3.0, 3.0],
},
index=pd.MultiIndex.from_product(
[[0, 1, 2, 3, 4], [0.3, 0.7]], names=["A", None]
),
)
tm.assert_frame_equal(result, expected)
def test_quantile_array_no_sort():
df = DataFrame({"A": [0, 1, 2], "B": [3, 4, 5]})
result = df.groupby([1, 0, 1], sort=False).quantile([0.25, 0.5, 0.75])
expected = DataFrame(
{"A": [0.5, 1.0, 1.5, 1.0, 1.0, 1.0], "B": [3.5, 4.0, 4.5, 4.0, 4.0, 4.0]},
index=pd.MultiIndex.from_product([[1, 0], [0.25, 0.5, 0.75]]),
)
tm.assert_frame_equal(result, expected)
result = df.groupby([1, 0, 1], sort=False).quantile([0.75, 0.25])
expected = DataFrame(
{"A": [1.5, 0.5, 1.0, 1.0], "B": [4.5, 3.5, 4.0, 4.0]},
index=pd.MultiIndex.from_product([[1, 0], [0.75, 0.25]]),
)
tm.assert_frame_equal(result, expected)
def test_quantile_array_multiple_levels():
df = DataFrame(
{"A": [0, 1, 2], "B": [3, 4, 5], "c": ["a", "a", "a"], "d": ["a", "a", "b"]}
)
result = df.groupby(["c", "d"]).quantile([0.25, 0.75])
index = pd.MultiIndex.from_tuples(
[("a", "a", 0.25), ("a", "a", 0.75), ("a", "b", 0.25), ("a", "b", 0.75)],
names=["c", "d", None],
)
expected = DataFrame(
{"A": [0.25, 0.75, 2.0, 2.0], "B": [3.25, 3.75, 5.0, 5.0]}, index=index
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize("frame_size", [(2, 3), (100, 10)])
@pytest.mark.parametrize("groupby", [[0], [0, 1]])
@pytest.mark.parametrize("q", [[0.5, 0.6]])
def test_groupby_quantile_with_arraylike_q_and_int_columns(frame_size, groupby, q):
# GH30289
nrow, ncol = frame_size
df = DataFrame(np.array([ncol * [_ % 4] for _ in range(nrow)]), columns=range(ncol))
idx_levels = [list(range(min(nrow, 4)))] * len(groupby) + [q]
idx_codes = [[x for x in range(min(nrow, 4)) for _ in q]] * len(groupby) + [
list(range(len(q))) * min(nrow, 4)
]
expected_index = pd.MultiIndex(
levels=idx_levels, codes=idx_codes, names=groupby + [None]
)
expected_values = [
[float(x)] * (ncol - len(groupby)) for x in range(min(nrow, 4)) for _ in q
]
expected_columns = [x for x in range(ncol) if x not in groupby]
expected = DataFrame(
expected_values, index=expected_index, columns=expected_columns
)
result = df.groupby(groupby).quantile(q)
tm.assert_frame_equal(result, expected)
def test_quantile_raises():
df = DataFrame([["foo", "a"], ["foo", "b"], ["foo", "c"]], columns=["key", "val"])
with pytest.raises(TypeError, match="cannot be performed against 'object' dtypes"):
df.groupby("key").quantile()
def test_quantile_out_of_bounds_q_raises():
# https://github.com/pandas-dev/pandas/issues/27470
df = DataFrame({"a": [0, 0, 0, 1, 1, 1], "b": range(6)})
g = df.groupby([0, 0, 0, 1, 1, 1])
with pytest.raises(ValueError, match="Got '50.0' instead"):
g.quantile(50)
with pytest.raises(ValueError, match="Got '-1.0' instead"):
g.quantile(-1)
def test_quantile_missing_group_values_no_segfaults():
# GH 28662
data = np.array([1.0, np.nan, 1.0])
df = DataFrame({"key": data, "val": range(3)})
# Random segfaults; would have been guaranteed in loop
grp = df.groupby("key")
for _ in range(100):
grp.quantile()
@pytest.mark.parametrize(
"key, val, expected_key, expected_val",
[
([1.0, np.nan, 3.0, np.nan], range(4), [1.0, 3.0], [0.0, 2.0]),
([1.0, np.nan, 2.0, 2.0], range(4), [1.0, 2.0], [0.0, 2.5]),
(["a", "b", "b", np.nan], range(4), ["a", "b"], [0, 1.5]),
([0], [42], [0], [42.0]),
([], [], np.array([], dtype="float64"), np.array([], dtype="float64")),
],
)
def test_quantile_missing_group_values_correct_results(
key, val, expected_key, expected_val
):
# GH 28662, GH 33200, GH 33569
df = DataFrame({"key": key, "val": val})
expected = DataFrame(
expected_val, index=Index(expected_key, name="key"), columns=["val"]
)
grp = df.groupby("key")
result = grp.quantile(0.5)
tm.assert_frame_equal(result, expected)
result = grp.quantile()
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"values",
[
pd.array([1, 0, None] * 2, dtype="Int64"),
pd.array([True, False, None] * 2, dtype="boolean"),
],
)
@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])
def test_groupby_quantile_nullable_array(values, q):
# https://github.com/pandas-dev/pandas/issues/33136
df = DataFrame({"a": ["x"] * 3 + ["y"] * 3, "b": values})
result = df.groupby("a")["b"].quantile(q)
if isinstance(q, list):
idx = pd.MultiIndex.from_product((["x", "y"], q), names=["a", None])
true_quantiles = [0.0, 0.5, 1.0]
else:
idx = Index(["x", "y"], name="a")
true_quantiles = [0.5]
expected = pd.Series(true_quantiles * 2, index=idx, name="b")
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("q", [0.5, [0.0, 0.5, 1.0]])
def test_groupby_quantile_skips_invalid_dtype(q):
df = DataFrame({"a": [1], "b": [2.0], "c": ["x"]})
result = df.groupby("a").quantile(q)
expected = df.groupby("a")[["b"]].quantile(q)
tm.assert_frame_equal(result, expected)
def test_groupby_timedelta_quantile():
# GH: 29485
df = DataFrame(
{"value": pd.to_timedelta(np.arange(4), unit="s"), "group": [1, 1, 2, 2]}
)
result = df.groupby("group").quantile(0.99)
expected = DataFrame(
{
"value": [
pd.Timedelta("0 days 00:00:00.990000"),
pd.Timedelta("0 days 00:00:02.990000"),
]
},
index=Index([1, 2], name="group"),
)
tm.assert_frame_equal(result, expected)
def test_columns_groupby_quantile():
# GH 33795
df = DataFrame(
np.arange(12).reshape(3, -1),
index=list("XYZ"),
columns=pd.Series(list("ABAB"), name="col"),
)
result = df.groupby("col", axis=1).quantile(q=[0.8, 0.2])
expected = DataFrame(
[
[1.6, 0.4, 2.6, 1.4],
[5.6, 4.4, 6.6, 5.4],
[9.6, 8.4, 10.6, 9.4],
],
index=list("XYZ"),
columns=pd.MultiIndex.from_tuples(
[("A", 0.8), ("A", 0.2), ("B", 0.8), ("B", 0.2)], names=["col", None]
),
)
tm.assert_frame_equal(result, expected)
| jreback/pandas | pandas/tests/groupby/test_quantile.py | Python | bsd-3-clause | 9,272 |
from base import *
DEBUG = False | vicalloy/dj-scaffold | dj_scaffold/conf/prj/sites/settings/production.py | Python | bsd-3-clause | 33 |
# -*-coding:Utf-8 -*
# Copyright (c) 2013 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant la volonté RelacherGouvernail"""
import re
from secondaires.navigation.equipage.ordres.relacher_gouvernail import \
RelacherGouvernail as OrdreRelacherGouvernail
from secondaires.navigation.equipage.ordres.long_deplacer import LongDeplacer
from secondaires.navigation.equipage.volonte import Volonte
class RelacherGouvernail(Volonte):
"""Classe représentant une volonté.
Cette volonté demande simplement au matelot qui tient le gouvernail
de le relâcher. Comme la plupart des volontés, le matelot est
encouragé à retourner dans sa salle d'affectation après coup.
"""
cle = "relacher_gouvernail"
ordre_court = re.compile(r"^rg$", re.I)
ordre_long = re.compile(r"^relacher\s+gouvernail?$", re.I)
def choisir_matelots(self, exception=None):
"""Retourne le matelot le plus apte à accomplir la volonté."""
navire = self.navire
equipage = navire.equipage
gouvernail = self.navire.gouvernail
if gouvernail is None or gouvernail.tenu is None:
return None
personnage = gouvernail.tenu
matelot = equipage.get_matelot_depuis_personnage(personnage)
return matelot
def executer(self, matelot):
"""Exécute la volonté."""
if matelot is None:
self.terminer()
return
navire = self.navire
ordres = []
matelot.invalider_ordres("virer")
relacher = OrdreRelacherGouvernail(matelot, navire)
ordres.append(relacher)
ordres.append(self.revenir_affectation(matelot))
self.ajouter_ordres(matelot, ordres)
def crier_ordres(self, personnage):
"""On fait crier l'ordre au personnage."""
msg = "{} s'écrie : relâchez la barre !".format(
personnage.distinction_audible)
self.navire.envoyer(msg)
@classmethod
def extraire_arguments(cls, navire):
"""Extrait les arguments de la volonté."""
return ()
| stormi/tsunami | src/secondaires/navigation/equipage/volontes/relacher_gouvernail.py | Python | bsd-3-clause | 3,566 |
# ----------------------------------------------------------------------------
# Copyright (c) 2011-2015, The American Gut Development Team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from psycopg2 import connect
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
import agr
# table definitions, these are of the form: [(table_name, table_definition)].
# the motivation for this structure is to allow for checking if tables exist
# easily (see schema_is_sane)
tables = [
('biom',
"""create table biom (
sample varchar,
biom json,
biomtxt text,
constraint pk_biom primary key(sample)
)"""),
('metadata',
"""create table metadata (
sample varchar,
category varchar,
value varchar,
constraint pk_metadata primary key (sample, category),
constraint fk_metadata foreign key (sample) references biom(sample)
)"""),
('fastq',
"""create table fastq (
sample varchar,
url varchar,
constraint pk_fastq primary key (sample),
constraint fk_fastq foreign key (sample) references biom(sample),
constraint uc_fastq unique (url)
)"""),
('state',
"""create table state (
biom_sha varchar)""")
]
def database_connectivity(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name):
"""Determine if we can connect to the database
Paramters
---------
user : str
The database usermame
password : str
The password for the user
host : str
The database host
Returns
-------
bool
True if a connection was made, False otherwise
"""
try:
c = connect(user=user, password=password, host=host, dbname=dbname)
except:
return False
else:
c.close()
return True
def database_exists(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name):
"""Determine if the database exists
Paramters
---------
user : str
The database usermame
password : str
The password for the user
host : str
The database host
dbname : str
The name of the database to connect to
Returns
-------
bool
True if the database exists, False otherwise
"""
try:
c = connect(user=user, password=password, host=host, dbname=dbname)
except:
return False
else:
c.close()
return True
def schema_is_sane():
"""Check to see if the expected tables exist
Notes
-----
Assumes we have connectivity and the database exists.
The structure of the tables is _not_ verified, only checks that the table
names exist.
Database credentials are sourced from the agr module (e.g., the environment
configuration.
Returns
-------
bool
The expected tables appear to exist
"""
c = connect(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name)
cur = c.cursor()
for table_name, _ in tables:
cur.execute("""select exists(select *
from information_schema.tables
where table_name=%s)""", [table_name])
if not cur.fetchone()[0]:
return False
return True
def schema_has_data():
"""Check to see if the schema appears to have data
Notes
-----
Assumes we have connectivity and the database exists.
The structure of the tables is _not_ verified, only checks that there
appears to be rows in the tables.
Database credentials are sourced from the agr module (e.g., the environment
configuration.
Returns
-------
bool
If all of the tables appear to have data.
"""
if not schema_is_sane():
return False
c = connect(user=agr.db_user, password=agr.db_password,
host=agr.db_host, dbname=agr.db_name)
cur = c.cursor()
for table_name, _ in tables:
cur.execute("select count(1) from %s" % table_name)
if cur.fetchone()[0] == 0:
return False
return True
def create_database():
"""Create the database and the schema
Notes
-----
Assumes we have connectivity.
Database credentials are sourced from the agr module (e.g., the environment
configuration.
"""
c = connect(user=agr.admin_db_user, password=agr.admin_db_password,
host=agr.db_host)
c.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = c.cursor()
cur.execute('drop database if exists ag_rest')
cur.execute('create database %s' % agr.db_name)
cur.close()
c.close()
create_tables()
def create_tables():
"""Create the tables"""
c = connect(user=agr.admin_db_user, password=agr.admin_db_password,
host=agr.db_host, dbname=agr.db_name)
c.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
cur = c.cursor()
for _, table in tables:
cur.execute(table)
if __name__ == '__main__':
import sys
if not database_connectivity():
sys.stderr.write("Cannot connect to the database\n")
sys.exit(1)
if not agr.test_environment:
if sys.argv[1] == 'FORCE_CREATE_TABLES':
create_tables()
sys.exit(0)
else:
sys.stderr.write("This does not appear to be a test environment\n")
sys.exit(1)
if database_exists() and schema_is_sane() and schema_has_data():
sys.exit(0)
else:
create_database()
| biocore/american-gut-rest | agr/schema.py | Python | bsd-3-clause | 5,875 |
import os
import tempfile
import shutil
def listar(directorio):
"""Regresa uns lista con los archivos contenidos
en unca carpeta"""
archivos = os.listdir(directorio)
buff = []
for archivo in archivos:
ruta = os.path.join(directorio, archivo)
if os.path.isfile(ruta):
buff.append(ruta)
return buff
def crear(prefijo="Gram"):
"""Crea una carpeta temporal y regresa un string con la ruta
la variable prefijo define el prefijo que se usara para la
carpeta, por defecto se usara Gram"""
temp = tempfile.mkdtemp(prefix=prefijo)
return temp
def eliminar(ruta):
"""Elimina un directorio, toma como parametro la ruta del directorio
a eliminar"""
shutil.rmtree(ruta) | GrampusTeam/Grampus | core/directorio.py | Python | bsd-3-clause | 750 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 7, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 0); | antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_MovingAverage/cycle_7/ar_/test_artificial_32_RelativeDifference_MovingAverage_7__20.py | Python | bsd-3-clause | 276 |
# -----------------------------------------------------------------------------
# Copyright (c) 2016--, The Plate Mapper Development Team.
#
# Distributed under the terms of the BSD 3-clause License.
#
# The full license is in the file LICENSE, distributed with this software.
# -----------------------------------------------------------------------------
import platemap as pm
class Plate(pm.base.PMObject):
_table = 'plate'
@classmethod
def plates(cls, finalized=False):
"""Returns all plates available in the system
Parameters
----------
finalized: bool, optional
Whether to only grab finalized plates. Default False.
Returns
-------
list of Plate objects
All plates in the system
"""
sql = "SELECT plate_id FROM barcodes.plate"
if finalized:
sql += " WHERE finalized = TRUE"
sql += " ORDER BY created_on DESC"
with pm.sql.TRN:
pm.sql.TRN.add(sql)
return [cls(p) for p in pm.sql.TRN.execute_fetchflatten()]
@classmethod
def create(cls, barcode, name, person, rows, cols):
r"""Creates a new plate object
Parameters
----------
barcode : str
The barcode assigned to the plate
name : str
Identifying name for the plate
person : Person object
The person creating the plate
rows : int
Number of rows on the plate
cols : int
Number of columns in the plate
Returns
-------
Plate object
New plate object
Raises
------
DuplicateError
Plate with given barcode already exists
DeveloperError
Barcode already assigned to something else
"""
plate_sql = """INSERT INTO barcodes.plate
(plate_id, plate, rows, cols, person_id)
VALUES (%s, %s, %s, %s, %s)
"""
barcode_sql = """UPDATE barcodes.barcode
SET assigned_on = NOW()
WHERE barcode = %s
"""
with pm.sql.TRN:
if cls.exists(barcode):
raise pm.exceptions.DuplicateError(barcode, 'plate')
if pm.util.check_barcode_assigned(barcode):
raise pm.exceptions.DeveloperError(
'Barcode %s already assigned!' % barcode)
pm.sql.TRN.add(plate_sql, [barcode, name, rows, cols, person.id])
pm.sql.TRN.add(barcode_sql, [barcode])
pm.sql.TRN.execute()
return cls(barcode)
@staticmethod
def delete(cls, barcode):
r"""Delete a plate from the system
Parameters
----------
barcode : str
The plate barcode
"""
raise NotImplementedError()
@staticmethod
def exists(barcode):
r"""Checks if a plate already exists
Parameters
----------
barcode : str
Barcode for plate
Returns
-------
bool
Whether plate already exists (True) or not (False)
"""
sql = "SELECT EXISTS(SELECT * FROM barcodes.plate WHERE plate_id = %s)"
with pm.sql.TRN:
pm.sql.TRN.add(sql, [barcode])
return pm.sql.TRN.execute_fetchlast()
def _check_finalized(self):
"""Locks down changes to plate if already finalized
Raises
------
EditError
Trying to change values of a finalized plate
"""
if self.finalized:
raise pm.exceptions.EditError(self.id)
def __getitem__(self, pos):
"""
Returns the sample at a given position on the plate
Parameters
----------
pos : tuple of int
The plate well to get sample for
Returns
-------
Sample object or None
Sample at the positon, or None if no sample.
Raises
------
IndexError
Position given is outside of plate
Notes
-----
Passed a tuple, so called as sample = plate[row, col]
"""
sql = """SELECT sample_id
FROM barcodes.plates_samples
WHERE plate_id = %s AND plate_row = %s and plate_col = %s
"""
with pm.sql.TRN:
row, col = pos[0], pos[1]
rows, cols = self.shape
if row < 0 or row >= rows or col < 0 or col >= cols:
raise IndexError('Position %d, %d not on plate' % (row, col))
pm.sql.TRN.add(sql, [self.id, row, col])
sid = pm.sql.TRN.execute_fetchlast()
return None if sid is None else pm.sample.Sample(sid)
def __setitem__(self, pos, value):
"""
Adds the sample at a given position on the plate
Parameters
----------
pos : tuple of int
The plate well to add sample at
value : Sample object or None
The sample to add, or None to remove sample from position
Raises
------
IndexError
Position given is outside of plate
Notes
-----
Passed a tuple, so called as plate[row, col] = Sample()
"""
# Need to get around postgres not having upsert in postgres < 9.5
# So do this slightly hacky workaround
# http://www.the-art-of-web.com/sql/upsert/
upsert_sql = """WITH upsert AS (
UPDATE barcodes.plates_samples
SET sample_id = %s
WHERE plate_id = %s AND plate_row = %s
AND plate_col = %s
RETURNING *)
INSERT INTO barcodes.plates_samples
(sample_id, plate_id, plate_row, plate_col)
SELECT %s, %s, %s, %s WHERE NOT EXISTS (
SELECT * FROM upsert)
"""
delete_sql = """DELETE FROM barcodes.plates_samples
WHERE plate_id = %s AND plate_row = %s
AND plate_col = %s"""
with pm.sql.TRN:
self._check_finalized()
row, col = pos[0], pos[1]
rows, cols = self.shape
if row < 0 or row >= rows or col < 0 or col >= cols:
raise IndexError('Position %d, %d not on plate' % (row, col))
if value is not None:
pm.sql.TRN.add(upsert_sql, [value.id, self.id, row, col,
value.id, self.id, row, col])
else:
pm.sql.TRN.add(delete_sql, [self.id, row, col])
@property
def name(self):
"""Name of the plate
Returns
-------
str
Name of the plate
"""
return self._get_property('plate')
@property
def finalized(self):
"""Finalized status of the plate
Returns
-------
bool
If the plate is finalized (True) or not (False)
"""
return self._get_property('finalized')
@property
def shape(self):
"""Shaple of the plate
Returns
-------
tuple of int
Plate dimensions in the form (rows, cols)
"""
sql = "SELECT rows, cols FROM barcodes.plate WHERE plate_id = %s"
with pm.sql.TRN:
pm.sql.TRN.add(sql, [self.id])
return tuple(pm.sql.TRN.execute_fetchindex()[0])
@property
def samples(self):
"""List of samples in the plate, ordered by row down the plate
Returns
-------
list of Sample objects
Samples on the plate, ordered by row.
Sample at [0, 0], followed by [0, 1], [0, 2], etc.
"""
sql = """SELECT sample_id
FROM barcodes.plates_samples
WHERE plate_id = %s
ORDER BY plate_row, plate_col
"""
with pm.sql.TRN:
pm.sql.TRN.add(sql, [self.id])
return [pm.sample.Sample(s) for s in
pm.sql.TRN.execute_fetchflatten()]
@property
def platemap(self):
"""Samples on the plate, mapped as list of lists
Returns
-------
list of list of Sample objects or None
Samples on the plate, with None if no sample at the position
"""
sql = """SELECT plate_row::varchar || plate_col::varchar, sample_id
FROM barcodes.plates_samples
WHERE plate_id = %s
ORDER BY plate_row, plate_col
"""
with pm.sql.TRN:
rows, cols = self.shape
pm.sql.TRN.add(sql, [self.id])
# Turn the returned rows into a dict keyed to the combined
# rowcol created by the sql query
samples = dict(pm.sql.TRN.execute_fetchindex())
ret = []
# Loop over each sample and add None of no sample in position
for r in range(rows):
ret.append([])
for c in range(cols):
samp = samples.get('%d%d' % (r, c), None)
ret[r].append(pm.sample.Sample(samp)
if samp is not None else None)
return ret
# -------- functions ----------------
def to_html(self):
"""Builds an HTML table representation of the plate
Returns
-------
str
HTML representation of the plate
Notes
-----
The class `plate` is added to the table for css styling.
"""
samples = self.platemap
rows, cols = self.shape
table = ['<table class="plate"><tr><th></th>']
# Add column header
for col in range(1, cols + 1):
table.append('<th>%d</th>' % col)
table.append('</tr>')
for row in range(rows):
table.append('<tr><th>%s</th>' % chr(65 + row))
for col in range(cols):
samp = samples[row][col]
table.append('<td>%s</td>' %
samp.name if samp is not None else '<td></td>')
table.append('</tr>')
table.append('</table>')
return ''.join(table)
def finalize(self):
"""Finalizes plate by flagging it in the DB"""
sql = "UPDATE barcodes.plate SET finalized = 'T' WHERE plate_id = %s"
with pm.sql.TRN:
pm.sql.TRN.add(sql, [self.id])
def revert(self, user):
"""Reverts the plate from finalized to editable
Parameters
----------
user : User object
User requesting the revert
Raises
------
AssignError
User is not an admin
"""
# Make sure user is admin
if not user.check_access('Admin'):
raise pm.exceptions.AssignError('User %s is not admin!' % user)
sql = "UPDATE barcodes.plate SET finalized = 'F' WHERE plate_id = %s"
with pm.sql.TRN:
pm.sql.TRN.add(sql, [self.id])
| squirrelo/plate-mapper | platemap/lib/plate.py | Python | bsd-3-clause | 11,253 |
# -*- coding: utf-8 -*-
#
# ask-undrgz system of questions uses data from underguiz.
# Copyright (c) 2010, Nycholas de Oliveira e Oliveira <nycholas@gmail.com>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# # Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# # Neither the name of the Nycholas de Oliveira e Oliveira nor the names of
# its contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# Django settings for ask_undrgz project.
import os
ROOT_PATH = os.path.realpath(os.path.dirname(__file__))
DEBUG = False
TEMPLATE_DEBUG = DEBUG
ADMINS = (
('Nycholas de Oliveira e Oliveira', 'nycholas@gmail.com'),
)
MANAGERS = ADMINS
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.', # Add 'postgresql_psycopg2', 'postgresql', 'mysql', 'sqlite3' or 'oracle'.
'NAME': '', # Or path to database file if using sqlite3.
'USER': '', # Not used with sqlite3.
'PASSWORD': '', # Not used with sqlite3.
'HOST': '', # Set to empty string for localhost. Not used with sqlite3.
'PORT': '', # Set to empty string for default. Not used with sqlite3.
}
}
# Twitter
TWITTER_USERNAME = 'ask_undrgz'
TWITTER_PASSWORD = 'XXX'
TWITTER_CONSUMER_KEY = 'XXX'
TWITTER_CONSUMER_SECRET = 'XXX'
TWITTER_OAUTH_TOKEN = 'XXX'
TWITTER_OAUTH_TOKEN_SECRET = 'XXX'
TWITTER_CALLBACK = 'http://ask-undrgz.appspot.com/_oauth/twitter/callback/'
if DEBUG:
TWITTER_CALLBACK = 'http://localhost:8080/_oauth/twitter/callback/'
ugettext = lambda s: s
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
# although not all choices may be available on all operating systems.
# On Unix systems, a value of None will cause Django to use the same
# timezone as the operating system.
# If running in a Windows environment this must be set to the same as your
# system time zone.
TIME_ZONE = 'America/Chicago'
# Language code for this installation. All choices can be found here:
# http://www.i18nguy.com/unicode/language-identifiers.html
LANGUAGE_CODE = 'en-us'
LANGUAGES = (
('en', ugettext('English')),
('pt-BR', ugettext('Portuguese Brazil')),
)
SITE_ID = 1
# If you set this to False, Django will make some optimizations so as not
# to load the internationalization machinery.
USE_I18N = True
# If you set this to False, Django will not format dates, numbers and
# calendars according to the current locale
USE_L10N = True
# Absolute path to the directory that holds media.
# Example: "/home/media/media.lawrence.com/"
MEDIA_ROOT = ''
# URL that handles the media served from MEDIA_ROOT. Make sure to use a
# trailing slash if there is a path component (optional in other cases).
# Examples: "http://media.lawrence.com", "http://example.com/media/"
MEDIA_URL = ''
# URL prefix for admin media -- CSS, JavaScript and images. Make sure to use a
# trailing slash.
# Examples: "http://foo.com/media/", "/media/".
ADMIN_MEDIA_PREFIX = '/media/'
# Make this unique, and don't share it with anybody.
SECRET_KEY = 'ti*(j(^fvi!&1cu7#sw7mkhb=dgl5v_$1&v5=wom_l4y!x9j*@'
# List of callables that know how to import templates from various sources.
TEMPLATE_LOADERS = (
'django.template.loaders.filesystem.Loader',
'django.template.loaders.app_directories.Loader',
# 'django.template.loaders.eggs.Loader',
)
MIDDLEWARE_CLASSES = (
'django.middleware.common.CommonMiddleware',
# 'django.contrib.sessions.middleware.SessionMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.middleware.locale.LocaleMiddleware',
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'django.contrib.messages.middleware.MessageMiddleware',
)
ROOT_URLCONF = 'ask_undrgz.urls'
TEMPLATE_DIRS = (
# Put strings here, like "/home/html/django_templates" or "C:/www/django/templates".
# Always use forward slashes, even on Windows.
# Don't forget to use absolute paths, not relative paths.
ROOT_PATH + '/templates',
)
INSTALLED_APPS = (
# 'django.contrib.auth',
'django.contrib.contenttypes',
# 'django.contrib.sessions',
'django.contrib.sites',
'django.contrib.messages',
# Uncomment the next line to enable the admin:
# 'django.contrib.admin',
'ask_undrgz.question',
)
| nycholas/ask-undrgz | src/ask-undrgz/ask_undrgz/settings.py | Python | bsd-3-clause | 5,653 |
#! /usr/bin/python3
# The number 3797 has an interesting property. Being prime itself, it is possible to continuously remove digits from left to right, and remain prime at each stage: 3797, 797, 97, and 7. Similarly we can work from right to left: 3797, 379, 37, and 3.
# Find the sum of the only eleven primes that are both truncatable from left to right and right to left.
# NOTE: 2, 3, 5, and 7 are not considered to be truncatable primes.
from euler import *
if __name__ == '__main__':
primes = PrimesBelow(8e5)
# Remove terminal 1's and 9's
# Remove beginning 1's and 9's
maybeTruncatable = [prime for prime in primes if prime % 10 not in (1, 9)]
maybeTruncatable = [prime for prime in maybeTruncatable if str(prime)[0] not in ('1', '9')]
truncatables = []
for prime in maybeTruncatable:
if IsTruncatable(prime, primes):
truncatables.append(prime)
if len(truncatables) > 11 + 3:
break
print(sum(prime for prime in truncatables if prime > 9))
| jeffseif/projectEuler | src/p037.py | Python | bsd-3-clause | 1,027 |
# -*- coding: utf-8 -*-
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# TEST_UNICODE_LITERALS
from ... import table
from .. import pprint
class MyRow(table.Row):
def __str__(self):
return str(self.as_void())
class MyColumn(table.Column):
pass
class MyMaskedColumn(table.MaskedColumn):
pass
class MyTableColumns(table.TableColumns):
pass
class MyTableFormatter(pprint.TableFormatter):
pass
class MyTable(table.Table):
Row = MyRow
Column = MyColumn
MaskedColumn = MyMaskedColumn
TableColumns = MyTableColumns
TableFormatter = MyTableFormatter
def test_simple_subclass():
t = MyTable([[1, 2], [3, 4]])
row = t[0]
assert isinstance(row, MyRow)
assert isinstance(t['col0'], MyColumn)
assert isinstance(t.columns, MyTableColumns)
assert isinstance(t.formatter, MyTableFormatter)
t2 = MyTable(t)
row = t2[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
t3 = table.Table(t)
row = t3[0]
assert not isinstance(row, MyRow)
assert str(row) != '(1, 3)'
t = MyTable([[1, 2], [3, 4]], masked=True)
row = t[0]
assert isinstance(row, MyRow)
assert str(row) == '(1, 3)'
assert isinstance(t['col0'], MyMaskedColumn)
assert isinstance(t.formatter, MyTableFormatter)
class ParamsRow(table.Row):
"""
Row class that allows access to an arbitrary dict of parameters
stored as a dict object in the ``params`` column.
"""
def __getitem__(self, item):
if item not in self.colnames:
return super(ParamsRow, self).__getitem__('params')[item]
else:
return super(ParamsRow, self).__getitem__(item)
def keys(self):
out = [name for name in self.colnames if name != 'params']
params = [key.lower() for key in sorted(self['params'])]
return out + params
def values(self):
return [self[key] for key in self.keys()]
class ParamsTable(table.Table):
Row = ParamsRow
def test_params_table():
t = ParamsTable(names=['a', 'b', 'params'], dtype=['i', 'f', 'O'])
t.add_row((1, 2.0, {'x': 1.5, 'y': 2.5}))
t.add_row((2, 3.0, {'z': 'hello', 'id': 123123}))
assert t['params'][0] == {'x': 1.5, 'y': 2.5}
assert t[0]['params'] == {'x': 1.5, 'y': 2.5}
assert t[0]['y'] == 2.5
assert t[1]['id'] == 123123
assert list(t[1].keys()) == ['a', 'b', 'id', 'z']
assert list(t[1].values()) == [2, 3.0, 123123, 'hello']
| AustereCuriosity/astropy | astropy/table/tests/test_subclass.py | Python | bsd-3-clause | 2,488 |
import itertools as it
import warnings
import numpy as np
from numpy.testing import assert_equal, assert_raises
from skimage.segmentation import slic
def test_color_2d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 3))
img[:10, :10, 0] = 1
img[10:, :10, 1] = 1
img[10:, 10:, 2] = 1
img += 0.01 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
with warnings.catch_warnings():
warnings.simplefilter("ignore")
seg = slic(img, n_segments=4, sigma=0)
# we expect 4 segments
assert_equal(len(np.unique(seg)), 4)
assert_equal(seg.shape, img.shape[:-1])
assert_equal(seg[:10, :10], 0)
assert_equal(seg[10:, :10], 2)
assert_equal(seg[:10, 10:], 1)
assert_equal(seg[10:, 10:], 3)
def test_gray_2d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21))
img[:10, :10] = 0.33
img[10:, :10] = 0.67
img[10:, 10:] = 1.00
img += 0.0033 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=4, compactness=1,
multichannel=False, convert2lab=False)
assert_equal(len(np.unique(seg)), 4)
assert_equal(seg.shape, img.shape)
assert_equal(seg[:10, :10], 0)
assert_equal(seg[10:, :10], 2)
assert_equal(seg[:10, 10:], 1)
assert_equal(seg[10:, 10:], 3)
def test_color_3d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 22, 3))
slices = []
for dim_size in img.shape[:-1]:
midpoint = dim_size // 2
slices.append((slice(None, midpoint), slice(midpoint, None)))
slices = list(it.product(*slices))
colors = list(it.product(*(([0, 1],) * 3)))
for s, c in zip(slices, colors):
img[s] = c
img += 0.01 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=8)
assert_equal(len(np.unique(seg)), 8)
for s, c in zip(slices, range(8)):
assert_equal(seg[s], c)
def test_gray_3d():
rnd = np.random.RandomState(0)
img = np.zeros((20, 21, 22))
slices = []
for dim_size in img.shape:
midpoint = dim_size // 2
slices.append((slice(None, midpoint), slice(midpoint, None)))
slices = list(it.product(*slices))
shades = np.arange(0, 1.000001, 1.0/7)
for s, sh in zip(slices, shades):
img[s] = sh
img += 0.001 * rnd.normal(size=img.shape)
img[img > 1] = 1
img[img < 0] = 0
seg = slic(img, sigma=0, n_segments=8, compactness=1,
multichannel=False, convert2lab=False)
assert_equal(len(np.unique(seg)), 8)
for s, c in zip(slices, range(8)):
assert_equal(seg[s], c)
def test_list_sigma():
rnd = np.random.RandomState(0)
img = np.array([[1, 1, 1, 0, 0, 0],
[0, 0, 0, 1, 1, 1]], np.float)
img += 0.1 * rnd.normal(size=img.shape)
result_sigma = np.array([[0, 0, 0, 1, 1, 1],
[0, 0, 0, 1, 1, 1]], np.int)
seg_sigma = slic(img, n_segments=2, sigma=[1, 50, 1], multichannel=False)
assert_equal(seg_sigma, result_sigma)
def test_spacing():
rnd = np.random.RandomState(0)
img = np.array([[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]], np.float)
result_non_spaced = np.array([[0, 0, 0, 1, 1],
[0, 0, 1, 1, 1]], np.int)
result_spaced = np.array([[0, 0, 0, 0, 0],
[1, 1, 1, 1, 1]], np.int)
img += 0.1 * rnd.normal(size=img.shape)
seg_non_spaced = slic(img, n_segments=2, sigma=0, multichannel=False,
compactness=1.0)
seg_spaced = slic(img, n_segments=2, sigma=0, spacing=[1, 500, 1],
compactness=1.0, multichannel=False)
assert_equal(seg_non_spaced, result_non_spaced)
assert_equal(seg_spaced, result_spaced)
def test_invalid_lab_conversion():
img = np.array([[1, 1, 1, 0, 0],
[1, 1, 0, 0, 0]], np.float)
assert_raises(ValueError, slic, img, multichannel=True, convert2lab=True)
if __name__ == '__main__':
from numpy import testing
testing.run_module_suite()
| almarklein/scikit-image | skimage/segmentation/tests/test_slic.py | Python | bsd-3-clause | 4,145 |
# -*- coding: utf-8 -*-
"""
Implement PyPiXmlRpc Service.
See: http://wiki.python.org/moin/PyPiXmlRpc
"""
import logging
from pyramid_xmlrpc import XMLRPCView
from pyshop.models import DBSession, Package, Release, ReleaseFile
from pyshop.helpers import pypi
log = logging.getLogger(__name__)
# XXX not tested.
class PyPI(XMLRPCView):
def list_packages(self):
"""
Retrieve a list of the package names registered with the package index.
Returns a list of name strings.
"""
session = DBSession()
names = [p.name for p in Package.all(session, order_by=Package.name)]
return names
def package_releases(self, package_name, show_hidden=False):
"""
Retrieve a list of the releases registered for the given package_name.
Returns a list with all version strings if show_hidden is True or
only the non-hidden ones otherwise."""
session = DBSession()
package = Package.by_name(session, package_name)
return [rel.version for rel in package.sorted_releases]
def package_roles(self, package_name):
"""
Retrieve a list of users and their attributes roles for a given
package_name. Role is either 'Maintainer' or 'Owner'.
"""
session = DBSession()
package = Package.by_name(session, package_name)
owners = [('Owner', o.name) for o in package.owners]
maintainers = [('Maintainer', o.name) for o in package.maintainers]
return owners + maintainers
def user_packages(self, user):
"""
Retrieve a list of [role_name, package_name] for a given username.
Role is either 'Maintainer' or 'Owner'.
"""
session = DBSession()
owned = Package.by_owner(session, user)
maintained = Package.by_maintainer(session, user)
owned = [('Owner', p.name) for p in owned]
maintained = [('Maintainer', p.name) for p in maintained]
return owned + maintained
def release_downloads(self, package_name, version):
"""
Retrieve a list of files and download count for a given package and
release version.
"""
session = DBSession()
release_files = ReleaseFile.by_release(session, package_name, version)
if release_files:
release_files = [(f.release.package.name,
f.filename) for f in release_files]
return release_files
def release_urls(self, package_name, version):
"""
Retrieve a list of download URLs for the given package release.
Returns a list of dicts with the following keys:
url
packagetype ('sdist', 'bdist', etc)
filename
size
md5_digest
downloads
has_sig
python_version (required version, or 'source', or 'any')
comment_text
"""
session = DBSession()
release_files = ReleaseFile.by_release(session, package_name, version)
return [{'url': f.url,
'packagetype': f.package_type,
'filename': f.filename,
'size': f.size,
'md5_digest': f.md5_digest,
'downloads': f.downloads,
'has_sig': f.has_sig,
'comment_text': f.comment_text,
'python_version': f.python_version
}
for f in release_files]
def release_data(self, package_name, version):
"""
Retrieve metadata describing a specific package release.
Returns a dict with keys for:
name
version
stable_version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
classifiers (list of classifier strings)
requires
requires_dist
provides
provides_dist
requires_external
requires_python
obsoletes
obsoletes_dist
project_url
docs_url (URL of the packages.python.org docs
if they've been supplied)
If the release does not exist, an empty dictionary is returned.
"""
session = DBSession()
release = Release.by_version(session, package_name, version)
if release:
result = {'name': release.package.name,
'version': release.version,
'stable_version': '',
'author': release.author.name,
'author_email': release.author.email,
'home_page': release.home_page,
'license': release.license,
'summary': release.summary,
'description': release.description,
'keywords': release.keywords,
'platform': release.platform,
'download_url': release.download_url,
'classifiers': [c.name for c in release.classifiers],
#'requires': '',
#'requires_dist': '',
#'provides': '',
#'provides_dist': '',
#'requires_external': '',
#'requires_python': '',
#'obsoletes': '',
#'obsoletes_dist': '',
'bugtrack_url': release.bugtrack_url,
'docs_url': release.docs_url,
}
if release.maintainer:
result.update({'maintainer': release.maintainer.name,
'maintainer_email': release.maintainer.email,
})
return dict([(key, val or '') for key, val in result.items()])
def search(self, spec, operator='and'):
"""
Search the package database using the indicated search spec.
The spec may include any of the keywords described in the above list
(except 'stable_version' and 'classifiers'),
for example: {'description': 'spam'} will search description fields.
Within the spec, a field's value can be a string or a list of strings
(the values within the list are combined with an OR),
for example: {'name': ['foo', 'bar']}.
Valid keys for the spec dict are listed here. Invalid keys are ignored:
name
version
author
author_email
maintainer
maintainer_email
home_page
license
summary
description
keywords
platform
download_url
Arguments for different fields are combined using either "and"
(the default) or "or".
Example: search({'name': 'foo', 'description': 'bar'}, 'or').
The results are returned as a list of dicts
{'name': package name,
'version': package release version,
'summary': package release summary}
"""
api = pypi.proxy
rv = []
# search in proxy
for k, v in spec.items():
rv += api.search({k: v}, True)
# search in local
session = DBSession()
release = Release.search(session, spec, operator)
rv += [{'name': r.package.name,
'version': r.version,
'summary': r.summary,
# hack https://mail.python.org/pipermail/catalog-sig/2012-October/004633.html
'_pypi_ordering':'',
} for r in release]
return rv
def browse(self, classifiers):
"""
Retrieve a list of (name, version) pairs of all releases classified
with all of the given classifiers. 'classifiers' must be a list of
Trove classifier strings.
changelog(since)
Retrieve a list of four-tuples (name, version, timestamp, action)
since the given timestamp. All timestamps are UTC values.
The argument is a UTC integer seconds since the epoch.
"""
session = DBSession()
release = Release.by_classifiers(session, classifiers)
rv = [(r.package.name, r.version) for r in release]
return rv
| last-g/pyshop | pyshop/views/xmlrpc.py | Python | bsd-3-clause | 8,468 |
# -*-coding:Utf-8 -*
# Copyright (c) 2012 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier définissant la classe Pion, détaillée plus bas."""
from abstraits.obase import BaseObj
class Pion(BaseObj):
"""Classe représentant un pion."""
def __init__(self, couleur, numero):
"""Constructeur du pion."""
BaseObj.__init__(self)
self.couleur = couleur
self.numero = numero
self._construire()
def __getnewargs__(self):
return ("", 0)
| stormi/tsunami | src/secondaires/jeux/plateaux/oie/pion.py | Python | bsd-3-clause | 1,992 |
# -*- coding: utf-8 -*-
from loading import load_plugins, register_plugin
from plugz import PluginTypeBase
from plugintypes import StandardPluginType
__author__ = 'Matti Gruener'
__email__ = 'matti@mistermatti.com'
__version__ = '0.1.5'
__ALL__ = [load_plugins, register_plugin, StandardPluginType, PluginTypeBase]
| mistermatti/plugz | plugz/__init__.py | Python | bsd-3-clause | 317 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (c) 2013-2014, Martín Gaitán
# Copyright (c) 2012-2013, Alexander Jung-Loddenkemper
# This file is part of Waliki (http://waliki.nqnwebs.com/)
# License: BSD (https://github.com/mgaitan/waliki/blob/master/LICENSE)
#===============================================================================
# DOCS
#===============================================================================
"""All supported markups
"""
#===============================================================================
# IMPORTS
#===============================================================================
import re
import docutils.core
import docutils.io
import markdown
import textwrap
from rst2html5 import HTML5Writer
import wiki
#===============================================================================
# MARKUP BASE
#===============================================================================
class Markup(object):
""" Base markup class."""
NAME = 'Text'
META_LINE = '%s: %s\n'
EXTENSION = '.txt'
HOWTO = """ """
def __init__(self, raw_content):
self.raw_content = raw_content
@classmethod
def render_meta(cls, key, value):
return cls.META_LINE % (key, value)
def process(self):
"""
return (html, body, meta) where HTML is the rendered output
body is the the editable content (text), and meta is
a dictionary with at least ['title', 'tags'] keys
"""
raise NotImplementedError("override in a subclass")
@classmethod
def howto(cls):
return cls(textwrap.dedent(cls.HOWTO)).process()[0]
#===============================================================================
# MARKDOWN
#===============================================================================
class Markdown(Markup):
NAME = 'markdown'
META_LINE = '%s: %s\n'
EXTENSION = '.md'
HOWTO = """
This editor is [markdown][] featured.
* I am
* a
* list
Turns into:
* I am
* a
* list
`**bold** and *italics*` turn into **bold** and *italics*. Very easy!
Create links with `[Wiki](http://github.com/alexex/wiki)`.
They turn into [Wiki][http://github.com/alexex/wiki].
Headers are as follows:
# Level 1
## Level 2
### Level 3
[markdown]: http://daringfireball.net/projects/markdown/
"""
def process(self):
# Processes Markdown text to HTML, returns original markdown text,
# and adds meta
md = markdown.Markdown(['codehilite', 'fenced_code', 'meta'])
html = md.convert(self.raw_content)
meta_lines, body = self.raw_content.split('\n\n', 1)
meta = md.Meta
return html, body, meta
#===============================================================================
# RESTRUCTURED TEXT
#===============================================================================
class RestructuredText(Markup):
NAME = 'restructuredtext'
META_LINE = '.. %s: %s\n'
IMAGE_LINE = '.. image:: %(url)s'
LINK_LINE = '`%(filename)s <%(url)s>`_'
EXTENSION = '.rst'
HOWTO = """
This editor is `reStructuredText`_ featured::
* I am
* a
* list
Turns into:
* I am
* a
* list
``**bold** and *italics*`` turn into **bold** and *italics*. Very easy!
Create links with ```Wiki <http://github.com/alexex/wiki>`_``.
They turn into `Wiki <https://github.com/alexex/wiki>`_.
Headers are just any underline (and, optionally, overline).
For example::
Level 1
*******
Level 2
-------
Level 3
+++++++
.. _reStructuredText: http://docutils.sourceforge.net/rst.html
"""
def process(self):
settings = {'initial_header_level': 2,
'record_dependencies': True,
'stylesheet_path': None,
'link_stylesheet': True,
'syntax_highlight': 'short',
}
html = self._rst2html(self.raw_content,
settings_overrides=settings)
# Convert unknow links to internal wiki links.
# Examples:
# Something_ will link to '/something'
# `something great`_ to '/something_great'
# `another thing <thing>`_ '/thing'
refs = re.findall(r'Unknown target name: "(.*)"', html)
if refs:
content = self.raw_content + self.get_autolinks(refs)
html = self._rst2html(content, settings_overrides=settings)
meta_lines, body = self.raw_content.split('\n\n', 1)
meta = self._parse_meta(meta_lines.split('\n'))
return html, body, meta
def get_autolinks(self, refs):
autolinks = '\n'.join(['.. _%s: /%s' % (ref, wiki.urlify(ref, False))
for ref in refs])
return '\n\n' + autolinks
def _rst2html(self, source, source_path=None,
source_class=docutils.io.StringInput,
destination_path=None, reader=None, reader_name='standalone',
parser=None, parser_name='restructuredtext', writer=None,
writer_name=None, settings=None, settings_spec=None,
settings_overrides=None, config_section=None,
enable_exit_status=None):
if not writer:
writer = HTML5Writer()
# Taken from Nikola
# http://bit.ly/14CmQyh
output, pub = docutils.core.publish_programmatically(
source=source, source_path=source_path, source_class=source_class,
destination_class=docutils.io.StringOutput,
destination=None, destination_path=destination_path,
reader=reader, reader_name=reader_name,
parser=parser, parser_name=parser_name,
writer=writer, writer_name=writer_name,
settings=settings, settings_spec=settings_spec,
settings_overrides=settings_overrides,
config_section=config_section,
enable_exit_status=enable_exit_status)
return pub.writer.parts['body']
def _parse_meta(self, lines):
""" Parse Meta-Data. Taken from Python-Markdown"""
META_RE = re.compile(r'^\.\.\s(?P<key>.*?): (?P<value>.*)')
meta = {}
key = None
for line in lines:
if line.strip() == '':
continue
m1 = META_RE.match(line)
if m1:
key = m1.group('key').lower().strip()
value = m1.group('value').strip()
try:
meta[key].append(value)
except KeyError:
meta[key] = [value]
return meta
#===============================================================================
# MAIN
#===============================================================================
if __name__ == "__main__":
print(__doc__)
| mgaitan/waliki_flask | waliki/markup.py | Python | bsd-3-clause | 7,181 |
from setuptools import setup, find_packages
setup(
name='zeit.content.gallery',
version='2.9.2.dev0',
author='gocept, Zeit Online',
author_email='zon-backend@zeit.de',
url='http://www.zeit.de/',
description="vivi Content-Type Portraitbox",
packages=find_packages('src'),
package_dir={'': 'src'},
include_package_data=True,
zip_safe=False,
license='BSD',
namespace_packages=['zeit', 'zeit.content'],
install_requires=[
'cssselect',
'Pillow',
'gocept.form',
'setuptools',
'zeit.cms >= 3.0.dev0',
'zeit.connector>=2.4.0.dev0',
'zeit.imp>=0.15.0.dev0',
'zeit.content.image',
'zeit.push>=1.21.0.dev0',
'zeit.wysiwyg',
'zope.app.appsetup',
'zope.app.testing',
'zope.component',
'zope.formlib',
'zope.interface',
'zope.publisher',
'zope.security',
'zope.testing',
],
entry_points={
'fanstatic.libraries': [
'zeit_content_gallery=zeit.content.gallery.browser.resources:lib',
],
},
)
| ZeitOnline/zeit.content.gallery | setup.py | Python | bsd-3-clause | 1,113 |
# Generated by Django 2.2.1 on 2019-09-19 11:28
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dojo', '0021_cve_index'),
]
operations = [
migrations.AddField(
model_name='system_settings',
name='credentials',
field=models.CharField(max_length=3000, blank=True),
),
migrations.AddField(
model_name='system_settings',
name='column_widths',
field=models.CharField(max_length=1500, blank=True),
),
migrations.AddField(
model_name='system_settings',
name='drive_folder_ID',
field=models.CharField(max_length=100, blank=True),
),
migrations.AddField(
model_name='system_settings',
name='enable_google_sheets',
field=models.BooleanField(null=True, blank=True, default=False),
),
]
| rackerlabs/django-DefectDojo | dojo/db_migrations/0022_google_sheet_sync_additions.py | Python | bsd-3-clause | 963 |
# vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4
#
# Copyright (c) 2014, Arista Networks, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# Neither the name of Arista Networks nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL ARISTA NETWORKS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
# BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
# IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
''' The setup script is the center of all activity in building,
distributing, and installing modules using the Distutils. The
main purpose of the setup script is to describe your module
distribution to the Distutils, so that the various commands
that operate on your modules do the right thing.
'''
import os
from glob import glob
from setuptools import setup, find_packages
from rphm import __version__, __author__
def find_modules(pkg):
''' Find the modules that belong in this package. '''
modules = [pkg]
for dirname, dirnames, _ in os.walk(pkg):
for subdirname in dirnames:
modules.append(os.path.join(dirname, subdirname))
return modules
INSTALL_ROOT = os.getenv('VIRTUAL_ENV', '')
CONF_PATH = INSTALL_ROOT + '/persist/sys'
INSTALL_REQUIREMENTS = [
'jsonrpclib'
]
TEST_REQUIREMENTS = [
'mock'
]
setup(
name='rphm',
version=__version__,
description='EOS extension to generate SNMP traps based on counter thresholds',
long_description=open('README.md').read(),
author=__author__,
author_email='eosplus-dev@arista.com',
url='http://eos.arista.com',
license='BSD-3',
install_requires=INSTALL_REQUIREMENTS,
tests_require=TEST_REQUIREMENTS,
packages=find_modules('rphm'),
scripts=glob('bin/*'),
data_files=[
(CONF_PATH, ['conf/rphm.conf'])
]
)
| arista-eosext/rphm | setup.py | Python | bsd-3-clause | 3,002 |
import os
import random
import time
import hashlib
import warnings
from tempfile import mkdtemp
from shutil import rmtree
from six.moves.urllib.parse import urlparse
from six import BytesIO
from twisted.trial import unittest
from twisted.internet import defer
from scrapy.pipelines.files import FilesPipeline, FSFilesStore, S3FilesStore, GCSFilesStore
from scrapy.item import Item, Field
from scrapy.http import Request, Response
from scrapy.settings import Settings
from scrapy.utils.python import to_bytes
from scrapy.utils.test import assert_aws_environ, get_s3_content_and_delete
from scrapy.utils.test import assert_gcs_environ, get_gcs_content_and_delete
from scrapy.utils.boto import is_botocore
from tests import mock
def _mocked_download_func(request, info):
response = request.meta.get('response')
return response() if callable(response) else response
class FilesPipelineTestCase(unittest.TestCase):
def setUp(self):
self.tempdir = mkdtemp()
self.pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': self.tempdir}))
self.pipeline.download_func = _mocked_download_func
self.pipeline.open_spider(None)
def tearDown(self):
rmtree(self.tempdir)
def test_file_path(self):
file_path = self.pipeline.file_path
self.assertEqual(file_path(Request("https://dev.mydeco.com/mydeco.pdf")),
'full/c9b564df929f4bc635bdd19fde4f3d4847c757c5.pdf')
self.assertEqual(file_path(Request("http://www.maddiebrown.co.uk///catalogue-items//image_54642_12175_95307.txt")),
'full/4ce274dd83db0368bafd7e406f382ae088e39219.txt')
self.assertEqual(file_path(Request("https://dev.mydeco.com/two/dirs/with%20spaces%2Bsigns.doc")),
'full/94ccc495a17b9ac5d40e3eabf3afcb8c2c9b9e1a.doc')
self.assertEqual(file_path(Request("http://www.dfsonline.co.uk/get_prod_image.php?img=status_0907_mdm.jpg")),
'full/4507be485f38b0da8a0be9eb2e1dfab8a19223f2.jpg')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532/")),
'full/97ee6f8a46cbbb418ea91502fd24176865cf39b2')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532")),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
self.assertEqual(file_path(Request("http://www.dorma.co.uk/images/product_details/2532"),
response=Response("http://www.dorma.co.uk/images/product_details/2532"),
info=object()),
'full/244e0dd7d96a3b7b01f54eded250c9e272577aa1')
def test_fs_store(self):
assert isinstance(self.pipeline.store, FSFilesStore)
self.assertEqual(self.pipeline.store.basedir, self.tempdir)
path = 'some/image/key.jpg'
fullpath = os.path.join(self.tempdir, 'some', 'image', 'key.jpg')
self.assertEqual(self.pipeline.store._get_filesystem_path(path), fullpath)
@defer.inlineCallbacks
def test_file_not_expired(self):
item_url = "http://example.com/file.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True),
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc', 'last_modified': time.time()}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)])
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
@defer.inlineCallbacks
def test_file_expired(self):
item_url = "http://example.com/file2.pdf"
item = _create_item_with_files(item_url)
patchers = [
mock.patch.object(FSFilesStore, 'stat_file', return_value={
'checksum': 'abc',
'last_modified': time.time() - (self.pipeline.expires * 60 * 60 * 24 * 2)}),
mock.patch.object(FilesPipeline, 'get_media_requests',
return_value=[_prepare_request_object(item_url)]),
mock.patch.object(FilesPipeline, 'inc_stats', return_value=True)
]
for p in patchers:
p.start()
result = yield self.pipeline.process_item(item, None)
self.assertNotEqual(result['files'][0]['checksum'], 'abc')
for p in patchers:
p.stop()
class FilesPipelineTestCaseFields(unittest.TestCase):
def test_item_fields_default(self):
class TestItem(Item):
name = Field()
file_urls = Field()
files = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'file_urls': [url]})
pipeline = FilesPipeline.from_settings(Settings({'FILES_STORE': 's3://example/files/'}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['files'], [results[0][1]])
def test_item_fields_override_settings(self):
class TestItem(Item):
name = Field()
files = Field()
stored_file = Field()
for cls in TestItem, dict:
url = 'http://www.example.com/files/1.txt'
item = cls({'name': 'item1', 'files': [url]})
pipeline = FilesPipeline.from_settings(Settings({
'FILES_STORE': 's3://example/files/',
'FILES_URLS_FIELD': 'files',
'FILES_RESULT_FIELD': 'stored_file'
}))
requests = list(pipeline.get_media_requests(item, None))
self.assertEqual(requests[0].url, url)
results = [(True, {'url': url})]
pipeline.item_completed(results, item, None)
self.assertEqual(item['stored_file'], [results[0][1]])
class FilesPipelineTestCaseCustomSettings(unittest.TestCase):
default_cls_settings = {
"EXPIRES": 90,
"FILES_URLS_FIELD": "file_urls",
"FILES_RESULT_FIELD": "files"
}
file_cls_attr_settings_map = {
("EXPIRES", "FILES_EXPIRES", "expires"),
("FILES_URLS_FIELD", "FILES_URLS_FIELD", "files_urls_field"),
("FILES_RESULT_FIELD", "FILES_RESULT_FIELD", "files_result_field")
}
def setUp(self):
self.tempdir = mkdtemp()
def tearDown(self):
rmtree(self.tempdir)
def _generate_fake_settings(self, prefix=None):
def random_string():
return "".join([chr(random.randint(97, 123)) for _ in range(10)])
settings = {
"FILES_EXPIRES": random.randint(100, 1000),
"FILES_URLS_FIELD": random_string(),
"FILES_RESULT_FIELD": random_string(),
"FILES_STORE": self.tempdir
}
if not prefix:
return settings
return {prefix.upper() + "_" + k if k != "FILES_STORE" else k: v for k, v in settings.items()}
def _generate_fake_pipeline(self):
class UserDefinedFilePipeline(FilesPipeline):
EXPIRES = 1001
FILES_URLS_FIELD = "alfa"
FILES_RESULT_FIELD = "beta"
return UserDefinedFilePipeline
def test_different_settings_for_different_instances(self):
"""
If there are different instances with different settings they should keep
different settings.
"""
custom_settings = self._generate_fake_settings()
another_pipeline = FilesPipeline.from_settings(Settings(custom_settings))
one_pipeline = FilesPipeline(self.tempdir)
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
default_value = self.default_cls_settings[pipe_attr]
self.assertEqual(getattr(one_pipeline, pipe_attr), default_value)
custom_value = custom_settings[settings_attr]
self.assertNotEqual(default_value, custom_value)
self.assertEqual(getattr(another_pipeline, pipe_ins_attr), custom_value)
def test_subclass_attributes_preserved_if_no_settings(self):
"""
If subclasses override class attributes and there are no special settings those values should be kept.
"""
pipe_cls = self._generate_fake_pipeline()
pipe = pipe_cls.from_settings(Settings({"FILES_STORE": self.tempdir}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
custom_value = getattr(pipe, pipe_ins_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr])
self.assertEqual(getattr(pipe, pipe_ins_attr), getattr(pipe, pipe_attr))
def test_subclass_attrs_preserved_custom_settings(self):
"""
If file settings are defined but they are not defined for subclass
settings should be preserved.
"""
pipeline_cls = self._generate_fake_pipeline()
settings = self._generate_fake_settings()
pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
value = getattr(pipeline, pipe_ins_attr)
setting_value = settings.get(settings_attr)
self.assertNotEqual(value, self.default_cls_settings[pipe_attr])
self.assertEqual(value, setting_value)
def test_no_custom_settings_for_subclasses(self):
"""
If there are no settings for subclass and no subclass attributes, pipeline should use
attributes of base class.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
user_pipeline = UserDefinedFilesPipeline.from_settings(Settings({"FILES_STORE": self.tempdir}))
for pipe_attr, settings_attr, pipe_ins_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = self.default_cls_settings.get(pipe_attr.upper())
self.assertEqual(getattr(user_pipeline, pipe_ins_attr), custom_value)
def test_custom_settings_for_subclasses(self):
"""
If there are custom settings for subclass and NO class attributes, pipeline should use custom
settings.
"""
class UserDefinedFilesPipeline(FilesPipeline):
pass
prefix = UserDefinedFilesPipeline.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = UserDefinedFilesPipeline.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
# Values from settings for custom pipeline should be set on pipeline instance.
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_attr])
self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
def test_custom_settings_and_class_attrs_for_subclasses(self):
"""
If there are custom settings for subclass AND class attributes
setting keys are preferred and override attributes.
"""
pipeline_cls = self._generate_fake_pipeline()
prefix = pipeline_cls.__name__.upper()
settings = self._generate_fake_settings(prefix=prefix)
user_pipeline = pipeline_cls.from_settings(Settings(settings))
for pipe_cls_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
custom_value = settings.get(prefix + "_" + settings_attr)
self.assertNotEqual(custom_value, self.default_cls_settings[pipe_cls_attr])
self.assertEqual(getattr(user_pipeline, pipe_inst_attr), custom_value)
def test_cls_attrs_with_DEFAULT_prefix(self):
class UserDefinedFilesPipeline(FilesPipeline):
DEFAULT_FILES_RESULT_FIELD = "this"
DEFAULT_FILES_URLS_FIELD = "that"
pipeline = UserDefinedFilesPipeline.from_settings(Settings({"FILES_STORE": self.tempdir}))
self.assertEqual(pipeline.files_result_field, "this")
self.assertEqual(pipeline.files_urls_field, "that")
def test_user_defined_subclass_default_key_names(self):
"""Test situation when user defines subclass of FilesPipeline,
but uses attribute names for default pipeline (without prefixing
them with pipeline class name).
"""
settings = self._generate_fake_settings()
class UserPipe(FilesPipeline):
pass
pipeline_cls = UserPipe.from_settings(Settings(settings))
for pipe_attr, settings_attr, pipe_inst_attr in self.file_cls_attr_settings_map:
expected_value = settings.get(settings_attr)
self.assertEqual(getattr(pipeline_cls, pipe_inst_attr),
expected_value)
class TestS3FilesStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
assert_aws_environ()
uri = os.environ.get('S3_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No S3 URI available for testing")
data = b"TestS3FilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = ''
store = S3FilesStore(uri)
yield store.persist_file(
path, buf, info=None, meta=meta,
headers={'Content-Type': 'image/png'})
s = yield store.stat_file(path, info=None)
self.assertIn('last_modified', s)
self.assertIn('checksum', s)
self.assertEqual(s['checksum'], '3187896a9657a28163abb31667df64c8')
u = urlparse(uri)
content, key = get_s3_content_and_delete(
u.hostname, u.path[1:], with_key=True)
self.assertEqual(content, data)
if is_botocore():
self.assertEqual(key['Metadata'], {'foo': 'bar'})
self.assertEqual(
key['CacheControl'], S3FilesStore.HEADERS['Cache-Control'])
self.assertEqual(key['ContentType'], 'image/png')
else:
self.assertEqual(key.metadata, {'foo': 'bar'})
self.assertEqual(
key.cache_control, S3FilesStore.HEADERS['Cache-Control'])
self.assertEqual(key.content_type, 'image/png')
class TestGCSFilesStore(unittest.TestCase):
@defer.inlineCallbacks
def test_persist(self):
assert_gcs_environ()
uri = os.environ.get('GCS_TEST_FILE_URI')
if not uri:
raise unittest.SkipTest("No GCS URI available for testing")
data = b"TestGCSFilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {'foo': 'bar'}
path = 'full/filename'
store = GCSFilesStore(uri)
store.POLICY = 'authenticatedRead'
expected_policy = {'role': 'READER', 'entity': 'allAuthenticatedUsers'}
yield store.persist_file(path, buf, info=None, meta=meta, headers=None)
s = yield store.stat_file(path, info=None)
self.assertIn('last_modified', s)
self.assertIn('checksum', s)
self.assertEqual(s['checksum'], 'zc2oVgXkbQr2EQdSdw3OPA==')
u = urlparse(uri)
content, acl, blob = get_gcs_content_and_delete(u.hostname, u.path[1:]+path)
self.assertEqual(content, data)
self.assertEqual(blob.metadata, {'foo': 'bar'})
self.assertEqual(blob.cache_control, GCSFilesStore.CACHE_CONTROL)
self.assertEqual(blob.content_type, 'application/octet-stream')
self.assertIn(expected_policy, acl)
class ItemWithFiles(Item):
file_urls = Field()
files = Field()
def _create_item_with_files(*files):
item = ItemWithFiles()
item['file_urls'] = files
return item
def _prepare_request_object(item_url):
return Request(
item_url,
meta={'response': Response(item_url, status=200, body=b'data')})
if __name__ == "__main__":
unittest.main()
| wujuguang/scrapy | tests/test_pipeline_files.py | Python | bsd-3-clause | 16,352 |
#!/usr/bin/python3
#-*- coding:utf-8 -*-
from functools import wraps
def xxx(func):
@wraps(func)
def my(n):
func(n*100)
return
return my
@xxx
def abc(n):
print(n)
if __name__ == '__main__':
abc(10)
abc.__wrapped__(10)
xx = abc.__wrapped__
xx(1234)
| yuncliu/Learn | python/decorator.py | Python | bsd-3-clause | 299 |
"""Metrics to assess performance on classification task given scores
Functions named as ``*_score`` return a scalar value to maximize: the higher
the better
Function named as ``*_error`` or ``*_loss`` return a scalar value to minimize:
the lower the better
"""
# Authors: Alexandre Gramfort <alexandre.gramfort@inria.fr>
# Mathieu Blondel <mathieu@mblondel.org>
# Olivier Grisel <olivier.grisel@ensta.org>
# Arnaud Joly <a.joly@ulg.ac.be>
# Jochen Wersdorfer <jochen@wersdoerfer.de>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# Joel Nothman <joel.nothman@gmail.com>
# Noel Dawe <noel@dawe.me>
# License: BSD 3 clause
from __future__ import division
import warnings
import numpy as np
from scipy.sparse import csr_matrix
from ..utils import check_consistent_length
from ..utils import column_or_1d, check_array
from ..utils.multiclass import type_of_target
from ..utils.fixes import isclose
from ..utils.stats import rankdata
from .base import _average_binary_score
from .base import UndefinedMetricWarning
def auc(x, y, reorder=False):
"""Compute Area Under the Curve (AUC) using the trapezoidal rule
This is a general function, given points on a curve. For computing the
area under the ROC-curve, see :func:`roc_auc_score`.
Parameters
----------
x : array, shape = [n]
x coordinates.
y : array, shape = [n]
y coordinates.
reorder : boolean, optional (default=False)
If True, assume that the curve is ascending in the case of ties, as for
an ROC curve. If the curve is non-ascending, the result will be wrong.
Returns
-------
auc : float
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> pred = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, pred, pos_label=2)
>>> metrics.auc(fpr, tpr)
0.75
See also
--------
roc_auc_score : Computes the area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
"""
check_consistent_length(x, y)
x = column_or_1d(x)
y = column_or_1d(y)
if x.shape[0] < 2:
raise ValueError('At least 2 points are needed to compute'
' area under curve, but x.shape = %s' % x.shape)
direction = 1
if reorder:
# reorder the data points according to the x axis and using y to
# break ties
order = np.lexsort((y, x))
x, y = x[order], y[order]
else:
dx = np.diff(x)
if np.any(dx < 0):
if np.all(dx <= 0):
direction = -1
else:
raise ValueError("Reordering is not turned on, and "
"the x array is not increasing: %s" % x)
area = direction * np.trapz(y, x)
return area
def average_precision_score(y_true, y_score, average="macro",
sample_weight=None):
"""Compute average precision (AP) from prediction scores
This score corresponds to the area under the precision-recall curve.
Note: this implementation is restricted to the binary classification task
or multilabel classification task.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
average_precision : float
References
----------
.. [1] `Wikipedia entry for the Average precision
<http://en.wikipedia.org/wiki/Average_precision>`_
See also
--------
roc_auc_score : Area under the ROC curve
precision_recall_curve :
Compute precision-recall pairs for different probability thresholds
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import average_precision_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> average_precision_score(y_true, y_scores) # doctest: +ELLIPSIS
0.79...
"""
def _binary_average_precision(y_true, y_score, sample_weight=None):
precision, recall, thresholds = precision_recall_curve(
y_true, y_score, sample_weight=sample_weight)
return auc(recall, precision)
return _average_binary_score(_binary_average_precision, y_true, y_score,
average, sample_weight=sample_weight)
def roc_auc_score(y_true, y_score, average="macro", sample_weight=None):
"""Compute Area Under the Curve (AUC) from prediction scores
Note: this implementation is restricted to the binary classification task
or multilabel classification task in label indicator format.
Parameters
----------
y_true : array, shape = [n_samples] or [n_samples, n_classes]
True binary labels in binary label indicators.
y_score : array, shape = [n_samples] or [n_samples, n_classes]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
average : string, [None, 'micro', 'macro' (default), 'samples', 'weighted']
If ``None``, the scores for each class are returned. Otherwise,
this determines the type of averaging performed on the data:
``'micro'``:
Calculate metrics globally by considering each element of the label
indicator matrix as a label.
``'macro'``:
Calculate metrics for each label, and find their unweighted
mean. This does not take label imbalance into account.
``'weighted'``:
Calculate metrics for each label, and find their average, weighted
by support (the number of true instances for each label).
``'samples'``:
Calculate metrics for each instance, and find their average.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
auc : float
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
See also
--------
average_precision_score : Area under the precision-recall curve
roc_curve : Compute Receiver operating characteristic (ROC)
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import roc_auc_score
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> roc_auc_score(y_true, y_scores)
0.75
"""
def _binary_roc_auc_score(y_true, y_score, sample_weight=None):
if len(np.unique(y_true)) != 2:
raise ValueError("Only one class present in y_true. ROC AUC score "
"is not defined in that case.")
fpr, tpr, tresholds = roc_curve(y_true, y_score,
sample_weight=sample_weight)
return auc(fpr, tpr, reorder=True)
return _average_binary_score(
_binary_roc_auc_score, y_true, y_score, average,
sample_weight=sample_weight)
def _binary_clf_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Calculate true and false positives per binary classification threshold.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification
y_score : array, shape = [n_samples]
Estimated probabilities or decision function
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fps : array, shape = [n_thresholds]
A count of false positives, at index i being the number of negative
samples assigned a score >= thresholds[i]. The total number of
negative samples is equal to fps[-1] (thus true negatives are given by
fps[-1] - fps).
tps : array, shape = [n_thresholds := len(np.unique(y_score))]
An increasing count of true positives, at index i being the number
of positive samples assigned a score >= thresholds[i]. The total
number of positive samples is equal to tps[-1] (thus false negatives
are given by tps[-1] - tps).
thresholds : array, shape = [n_thresholds]
Decreasing score values.
"""
check_consistent_length(y_true, y_score)
y_true = column_or_1d(y_true)
y_score = column_or_1d(y_score)
if sample_weight is not None:
sample_weight = column_or_1d(sample_weight)
# ensure binary classification if pos_label is not specified
classes = np.unique(y_true)
if (pos_label is None and
not (np.all(classes == [0, 1]) or
np.all(classes == [-1, 1]) or
np.all(classes == [0]) or
np.all(classes == [-1]) or
np.all(classes == [1]))):
raise ValueError("Data is not binary and pos_label is not specified")
elif pos_label is None:
pos_label = 1.
# make y_true a boolean vector
y_true = (y_true == pos_label)
# sort scores and corresponding truth values
desc_score_indices = np.argsort(y_score, kind="mergesort")[::-1]
y_score = y_score[desc_score_indices]
y_true = y_true[desc_score_indices]
if sample_weight is not None:
weight = sample_weight[desc_score_indices]
else:
weight = 1.
# y_score typically has many tied values. Here we extract
# the indices associated with the distinct values. We also
# concatenate a value for the end of the curve.
# We need to use isclose to avoid spurious repeated thresholds
# stemming from floating point roundoff errors.
distinct_value_indices = np.where(np.logical_not(isclose(
np.diff(y_score), 0)))[0]
threshold_idxs = np.r_[distinct_value_indices, y_true.size - 1]
# accumulate the true positives with decreasing threshold
tps = (y_true * weight).cumsum()[threshold_idxs]
if sample_weight is not None:
fps = weight.cumsum()[threshold_idxs] - tps
else:
fps = 1 + threshold_idxs - tps
return fps, tps, y_score[threshold_idxs]
def precision_recall_curve(y_true, probas_pred, pos_label=None,
sample_weight=None):
"""Compute precision-recall pairs for different probability thresholds
Note: this implementation is restricted to the binary classification task.
The precision is the ratio ``tp / (tp + fp)`` where ``tp`` is the number of
true positives and ``fp`` the number of false positives. The precision is
intuitively the ability of the classifier not to label as positive a sample
that is negative.
The recall is the ratio ``tp / (tp + fn)`` where ``tp`` is the number of
true positives and ``fn`` the number of false negatives. The recall is
intuitively the ability of the classifier to find all the positive samples.
The last precision and recall values are 1. and 0. respectively and do not
have a corresponding threshold. This ensures that the graph starts on the
x axis.
Parameters
----------
y_true : array, shape = [n_samples]
True targets of binary classification in range {-1, 1} or {0, 1}.
probas_pred : array, shape = [n_samples]
Estimated probabilities or decision function.
pos_label : int, optional (default=None)
The label of the positive class
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
precision : array, shape = [n_thresholds + 1]
Precision values such that element i is the precision of
predictions with score >= thresholds[i] and the last element is 1.
recall : array, shape = [n_thresholds + 1]
Decreasing recall values such that element i is the recall of
predictions with score >= thresholds[i] and the last element is 0.
thresholds : array, shape = [n_thresholds := len(np.unique(probas_pred))]
Increasing thresholds on the decision function used to compute
precision and recall.
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import precision_recall_curve
>>> y_true = np.array([0, 0, 1, 1])
>>> y_scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> precision, recall, thresholds = precision_recall_curve(
... y_true, y_scores)
>>> precision # doctest: +ELLIPSIS
array([ 0.66..., 0.5 , 1. , 1. ])
>>> recall
array([ 1. , 0.5, 0.5, 0. ])
>>> thresholds
array([ 0.35, 0.4 , 0.8 ])
"""
fps, tps, thresholds = _binary_clf_curve(y_true, probas_pred,
pos_label=pos_label,
sample_weight=sample_weight)
precision = tps / (tps + fps)
recall = tps / tps[-1]
# stop when full recall attained
# and reverse the outputs so recall is decreasing
last_ind = tps.searchsorted(tps[-1])
sl = slice(last_ind, None, -1)
return np.r_[precision[sl], 1], np.r_[recall[sl], 0], thresholds[sl]
def roc_curve(y_true, y_score, pos_label=None, sample_weight=None):
"""Compute Receiver operating characteristic (ROC)
Note: this implementation is restricted to the binary classification task.
Parameters
----------
y_true : array, shape = [n_samples]
True binary labels in range {0, 1} or {-1, 1}. If labels are not
binary, pos_label should be explicitly given.
y_score : array, shape = [n_samples]
Target scores, can either be probability estimates of the positive
class or confidence values.
pos_label : int
Label considered as positive and others are considered negative.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
fpr : array, shape = [>2]
Increasing false positive rates such that element i is the false
positive rate of predictions with score >= thresholds[i].
tpr : array, shape = [>2]
Increasing true positive rates such that element i is the true
positive rate of predictions with score >= thresholds[i].
thresholds : array, shape = [n_thresholds]
Decreasing thresholds on the decision function used to compute
fpr and tpr. `thresholds[0]` represents no instances being predicted
and is arbitrarily set to `max(y_score) + 1`.
See also
--------
roc_auc_score : Compute Area Under the Curve (AUC) from prediction scores
Notes
-----
Since the thresholds are sorted from low to high values, they
are reversed upon returning them to ensure they correspond to both ``fpr``
and ``tpr``, which are sorted in reversed order during their calculation.
References
----------
.. [1] `Wikipedia entry for the Receiver operating characteristic
<http://en.wikipedia.org/wiki/Receiver_operating_characteristic>`_
Examples
--------
>>> import numpy as np
>>> from sklearn import metrics
>>> y = np.array([1, 1, 2, 2])
>>> scores = np.array([0.1, 0.4, 0.35, 0.8])
>>> fpr, tpr, thresholds = metrics.roc_curve(y, scores, pos_label=2)
>>> fpr
array([ 0. , 0.5, 0.5, 1. ])
>>> tpr
array([ 0.5, 0.5, 1. , 1. ])
>>> thresholds
array([ 0.8 , 0.4 , 0.35, 0.1 ])
"""
fps, tps, thresholds = _binary_clf_curve(
y_true, y_score, pos_label=pos_label, sample_weight=sample_weight)
if tps.size == 0 or fps[0] != 0:
# Add an extra threshold position if necessary
tps = np.r_[0, tps]
fps = np.r_[0, fps]
thresholds = np.r_[thresholds[0] + 1, thresholds]
if fps[-1] <= 0:
warnings.warn("No negative samples in y_true, "
"false positive value should be meaningless",
UndefinedMetricWarning)
fpr = np.repeat(np.nan, fps.shape)
else:
fpr = fps / fps[-1]
if tps[-1] <= 0:
warnings.warn("No positive samples in y_true, "
"true positive value should be meaningless",
UndefinedMetricWarning)
tpr = np.repeat(np.nan, tps.shape)
else:
tpr = tps / tps[-1]
return fpr, tpr, thresholds
def label_ranking_average_precision_score(y_true, y_score):
"""Compute ranking-based average precision
Label ranking average precision (LRAP) is the average over each ground
truth label assigned to each sample, of the ratio of true vs. total
labels with lower score.
This metric is used in multilabel ranking problem, where the goal
is to give better rank to the labels associated to each sample.
The obtained score is always strictly greater than 0 and
the best value is 1.
Parameters
----------
y_true : array or sparse matrix, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
Returns
-------
score : float
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import label_ranking_average_precision_score
>>> y_true = np.array([[1, 0, 0], [0, 0, 1]])
>>> y_score = np.array([[0.75, 0.5, 1], [1, 0.2, 0.1]])
>>> label_ranking_average_precision_score(y_true, y_score) \
# doctest: +ELLIPSIS
0.416...
"""
check_consistent_length(y_true, y_score)
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
# Handle badly formated array and the degenerate case with one label
y_type = type_of_target(y_true)
if (y_type != "multilabel-indicator"
and not (y_type == "binary" and y_true.ndim == 2)):
raise ValueError("{0} format is not supported".format(y_type))
y_true = csr_matrix(y_true)
y_score = -y_score
n_samples, n_labels = y_true.shape
out = 0.
for i, (start, stop) in enumerate(zip(y_true.indptr, y_true.indptr[1:])):
relevant = y_true.indices[start:stop]
if (relevant.size == 0 or relevant.size == n_labels):
# If all labels are relevant or unrelevant, the score is also
# equal to 1. The label ranking has no meaning.
out += 1.
continue
scores_i = y_score[i]
rank = rankdata(scores_i, 'max')[relevant]
L = rankdata(scores_i[relevant], 'max')
out += np.divide(L, rank, dtype=float).mean()
return out / n_samples
def coverage_error(y_true, y_score, sample_weight=None):
"""Coverage error measure
Compute how far we need to go through the ranked scores to cover all
true labels. The best value is equal to the average number
of labels in ``y_true`` per sample.
Ties in ``y_scores`` are broken by giving maximal rank that would have
been assigned to all tied values.
Parameters
----------
y_true : array, shape = [n_samples, n_labels]
True binary labels in binary indicator format.
y_score : array, shape = [n_samples, n_labels]
Target scores, can either be probability estimates of the positive
class, confidence values, or binary decisions.
sample_weight : array-like of shape = [n_samples], optional
Sample weights.
Returns
-------
coverage_error : float
References
----------
.. [1] Tsoumakas, G., Katakis, I., & Vlahavas, I. (2010).
Mining multi-label data. In Data mining and knowledge discovery
handbook (pp. 667-685). Springer US.
"""
y_true = check_array(y_true, ensure_2d=False)
y_score = check_array(y_score, ensure_2d=False)
check_consistent_length(y_true, y_score, sample_weight)
y_type = type_of_target(y_true)
if y_type != "multilabel-indicator":
raise ValueError("{0} format is not supported".format(y_type))
if y_true.shape != y_score.shape:
raise ValueError("y_true and y_score have different shape")
y_score_mask = np.ma.masked_array(y_score, mask=np.logical_not(y_true))
y_min_relevant = y_score_mask.min(axis=1).reshape((-1, 1))
coverage = (y_score >= y_min_relevant).sum(axis=1)
coverage = coverage.filled(0)
return np.average(coverage, weights=sample_weight)
| mehdidc/scikit-learn | sklearn/metrics/ranking.py | Python | bsd-3-clause | 21,796 |
#
# Module implementing queues
#
# multiprocessing/queues.py
#
# Copyright (c) 2006-2008, R Oudkerk
# Licensed to PSF under a Contributor Agreement.
#
from __future__ import absolute_import
import sys
import os
import threading
import collections
import weakref
import errno
from . import connection
from . import context
from ._ext import _billiard
from .compat import get_errno
from .five import monotonic, Empty, Full
from .util import (
debug, error, info, Finalize, register_after_fork, is_exiting,
)
from .reduction import ForkingPickler
__all__ = ['Queue', 'SimpleQueue', 'JoinableQueue']
class Queue(object):
'''
Queue type using a pipe, buffer and thread
'''
def __init__(self, maxsize=0, *args, **kwargs):
try:
ctx = kwargs['ctx']
except KeyError:
raise TypeError('missing 1 required keyword-only argument: ctx')
if maxsize <= 0:
maxsize = _billiard.SemLock.SEM_VALUE_MAX
self._maxsize = maxsize
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._opid = os.getpid()
if sys.platform == 'win32':
self._wlock = None
else:
self._wlock = ctx.Lock()
self._sem = ctx.BoundedSemaphore(maxsize)
# For use by concurrent.futures
self._ignore_epipe = False
self._after_fork()
if sys.platform != 'win32':
register_after_fork(self, Queue._after_fork)
def __getstate__(self):
context.assert_spawning(self)
return (self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid)
def __setstate__(self, state):
(self._ignore_epipe, self._maxsize, self._reader, self._writer,
self._rlock, self._wlock, self._sem, self._opid) = state
self._after_fork()
def _after_fork(self):
debug('Queue._after_fork()')
self._notempty = threading.Condition(threading.Lock())
self._buffer = collections.deque()
self._thread = None
self._jointhread = None
self._joincancelled = False
self._closed = False
self._close = None
self._send = self._writer.send
self._recv = self._reader.recv
self._send_bytes = self._writer.send_bytes
self._recv_bytes = self._reader.recv_bytes
self._poll = self._reader.poll
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
with self._notempty:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._notempty.notify()
def get(self, block=True, timeout=None):
if block and timeout is None:
with self._rlock:
res = self._recv_bytes()
self._sem.release()
else:
if block:
deadline = monotonic() + timeout
if not self._rlock.acquire(block, timeout):
raise Empty
try:
if block:
timeout = deadline - monotonic()
if timeout < 0 or not self._poll(timeout):
raise Empty
elif not self._poll():
raise Empty
res = self._recv_bytes()
self._sem.release()
finally:
self._rlock.release()
# unserialize the data after having released the lock
return ForkingPickler.loads(res)
def qsize(self):
# Raises NotImplementedError on Mac OSX because
# of broken sem_getvalue()
return self._maxsize - self._sem._semlock._get_value()
def empty(self):
return not self._poll()
def full(self):
return self._sem._semlock._is_zero()
def get_nowait(self):
return self.get(False)
def put_nowait(self, obj):
return self.put(obj, False)
def close(self):
self._closed = True
self._reader.close()
if self._close:
self._close()
def join_thread(self):
debug('Queue.join_thread()')
assert self._closed
if self._jointhread:
self._jointhread()
def cancel_join_thread(self):
debug('Queue.cancel_join_thread()')
self._joincancelled = True
try:
self._jointhread.cancel()
except AttributeError:
pass
def _start_thread(self):
debug('Queue._start_thread()')
# Start thread which transfers data from buffer to pipe
self._buffer.clear()
self._thread = threading.Thread(
target=Queue._feed,
args=(self._buffer, self._notempty, self._send_bytes,
self._wlock, self._writer.close, self._ignore_epipe),
name='QueueFeederThread'
)
self._thread.daemon = True
debug('doing self._thread.start()')
self._thread.start()
debug('... done self._thread.start()')
# On process exit we will wait for data to be flushed to pipe.
#
# However, if this process created the queue then all
# processes which use the queue will be descendants of this
# process. Therefore waiting for the queue to be flushed
# is pointless once all the child processes have been joined.
created_by_this_process = (self._opid == os.getpid())
if not self._joincancelled and not created_by_this_process:
self._jointhread = Finalize(
self._thread, Queue._finalize_join,
[weakref.ref(self._thread)],
exitpriority=-5
)
# Send sentinel to the thread queue object when garbage collected
self._close = Finalize(
self, Queue._finalize_close,
[self._buffer, self._notempty],
exitpriority=10
)
@staticmethod
def _finalize_join(twr):
debug('joining queue thread')
thread = twr()
if thread is not None:
thread.join()
debug('... queue thread joined')
else:
debug('... queue thread already dead')
@staticmethod
def _finalize_close(buffer, notempty):
debug('telling queue thread to quit')
with notempty:
buffer.append(_sentinel)
notempty.notify()
@staticmethod
def _feed(buffer, notempty, send_bytes, writelock, close, ignore_epipe):
debug('starting thread to feed data to pipe')
nacquire = notempty.acquire
nrelease = notempty.release
nwait = notempty.wait
bpopleft = buffer.popleft
sentinel = _sentinel
if sys.platform != 'win32':
wacquire = writelock.acquire
wrelease = writelock.release
else:
wacquire = None
try:
while 1:
nacquire()
try:
if not buffer:
nwait()
finally:
nrelease()
try:
while 1:
obj = bpopleft()
if obj is sentinel:
debug('feeder thread got sentinel -- exiting')
close()
return
# serialize the data before acquiring the lock
obj = ForkingPickler.dumps(obj)
if wacquire is None:
send_bytes(obj)
else:
wacquire()
try:
send_bytes(obj)
finally:
wrelease()
except IndexError:
pass
except Exception as exc:
if ignore_epipe and get_errno(exc) == errno.EPIPE:
return
# Since this runs in a daemon thread the resources it uses
# may be become unusable while the process is cleaning up.
# We ignore errors which happen after the process has
# started to cleanup.
try:
if is_exiting():
info('error in queue thread: %r', exc, exc_info=True)
else:
if not error('error in queue thread: %r', exc,
exc_info=True):
import traceback
traceback.print_exc()
except Exception:
pass
_sentinel = object()
class JoinableQueue(Queue):
'''
A queue type which also supports join() and task_done() methods
Note that if you do not call task_done() for each finished task then
eventually the counter's semaphore may overflow causing Bad Things
to happen.
'''
def __init__(self, maxsize=0, *args, **kwargs):
try:
ctx = kwargs['ctx']
except KeyError:
raise TypeError('missing 1 required keyword argument: ctx')
Queue.__init__(self, maxsize, ctx=ctx)
self._unfinished_tasks = ctx.Semaphore(0)
self._cond = ctx.Condition()
def __getstate__(self):
return Queue.__getstate__(self) + (self._cond, self._unfinished_tasks)
def __setstate__(self, state):
Queue.__setstate__(self, state[:-2])
self._cond, self._unfinished_tasks = state[-2:]
def put(self, obj, block=True, timeout=None):
assert not self._closed
if not self._sem.acquire(block, timeout):
raise Full
with self._notempty:
with self._cond:
if self._thread is None:
self._start_thread()
self._buffer.append(obj)
self._unfinished_tasks.release()
self._notempty.notify()
def task_done(self):
with self._cond:
if not self._unfinished_tasks.acquire(False):
raise ValueError('task_done() called too many times')
if self._unfinished_tasks._semlock._is_zero():
self._cond.notify_all()
def join(self):
with self._cond:
if not self._unfinished_tasks._semlock._is_zero():
self._cond.wait()
class _SimpleQueue(object):
'''
Simplified Queue type -- really just a locked pipe
'''
def __init__(self, rnonblock=False, wnonblock=False, ctx=None):
self._reader, self._writer = connection.Pipe(
duplex=False, rnonblock=rnonblock, wnonblock=wnonblock,
)
self._poll = self._reader.poll
self._rlock = self._wlock = None
def empty(self):
return not self._poll()
def __getstate__(self):
context.assert_spawning(self)
return (self._reader, self._writer, self._rlock, self._wlock)
def __setstate__(self, state):
(self._reader, self._writer, self._rlock, self._wlock) = state
def get_payload(self):
return self._reader.recv_bytes()
def send_payload(self, value):
self._writer.send_bytes(value)
def get(self):
# unserialize the data after having released the lock
return ForkingPickler.loads(self.get_payload())
def put(self, obj):
# serialize the data before acquiring the lock
self.send_payload(ForkingPickler.dumps(obj))
class SimpleQueue(_SimpleQueue):
def __init__(self, *args, **kwargs):
try:
ctx = kwargs['ctx']
except KeyError:
raise TypeError('missing required keyword argument: ctx')
self._reader, self._writer = connection.Pipe(duplex=False)
self._rlock = ctx.Lock()
self._wlock = ctx.Lock() if sys.platform != 'win32' else None
def get_payload(self):
with self._rlock:
return self._reader.recv_bytes()
def send_payload(self, value):
if self._wlock is None:
# writes to a message oriented win32 pipe are atomic
self._writer.send_bytes(value)
else:
with self._wlock:
self._writer.send_bytes(value)
| flaviogrossi/billiard | billiard/queues.py | Python | bsd-3-clause | 12,304 |
from django.utils.importlib import import_module
def function_from_string(string):
module, func = string.rsplit(".", 1)
m = import_module(module)
return getattr(m, func) | chrisdrackett/django-support | support/functions.py | Python | bsd-3-clause | 187 |
#!/usr/bin/env python
# coding=utf-8
__author__ = u'Ahmed Şeref GÜNEYSU'
import ui
| guneysus/packathon2016 | packathon2016/__init__.py | Python | bsd-3-clause | 85 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('telerivet', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='incomingrequest',
name='secret',
field=models.CharField(max_length=255, null=True, db_index=True),
preserve_default=True,
),
]
| qedsoftware/commcare-hq | corehq/messaging/smsbackends/telerivet/migrations/0002_add_index_on_webhook_secret.py | Python | bsd-3-clause | 464 |
#!/usr/bin/env python
# Copyright 2018 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unpack_pak
import unittest
class UnpackPakTest(unittest.TestCase):
def testMapFileLine(self):
self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH}'))
def testGzippedMapFileLine(self):
self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH, false}'))
self.assertTrue(unpack_pak.ParseLine(' {"path.js", IDR_PATH, true}'))
def testGetFileAndDirName(self):
(f, d) = unpack_pak.GetFileAndDirName(
'out/build/gen/foo/foo.unpak', 'out/build/gen/foo', 'a/b.js')
self.assertEquals('b.js', f)
self.assertEquals('out/build/gen/foo/foo.unpak/a', d)
def testGetFileAndDirNameForGeneratedResource(self):
(f, d) = unpack_pak.GetFileAndDirName(
'out/build/gen/foo/foo.unpak', 'out/build/gen/foo',
'@out_folder@/out/build/gen/foo/a/b.js')
self.assertEquals('b.js', f)
self.assertEquals('out/build/gen/foo/foo.unpak/a', d)
if __name__ == '__main__':
unittest.main()
| endlessm/chromium-browser | chrome/browser/resources/unpack_pak_test.py | Python | bsd-3-clause | 1,132 |
from owtf.managers.resource import get_resources
from owtf.plugin.helper import plugin_helper
DESCRIPTION = "Plugin to assist manual testing"
def run(PluginInfo):
resource = get_resources("ExternalSSL")
Content = plugin_helper.resource_linklist("Online Resources", resource)
return Content
| owtf/owtf | owtf/plugins/web/external/Testing_for_SSL-TLS@OWTF-CM-001.py | Python | bsd-3-clause | 305 |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# HobsonPy documentation build configuration file, created by
# sphinx-quickstart on Sat Sep 16 18:59:19 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = []
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'HobsonPy'
copyright = '2017, Al Sweigart'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.0.1'
# The full version, including alpha/beta/rc tags.
release = '0.0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'HobsonPydoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'HobsonPy.tex', 'HobsonPy Documentation',
'Al Sweigart', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'hobsonpy', 'HobsonPy Documentation',
['Al Sweigart'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'HobsonPy', 'HobsonPy Documentation',
'Al Sweigart', 'HobsonPy', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| asweigart/hobson | docs/conf.py | Python | bsd-3-clause | 8,164 |
from __future__ import with_statement
import logging
import warnings
import django
from django.conf import settings
from django.conf.urls.defaults import patterns, url
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, ValidationError
from django.core.urlresolvers import NoReverseMatch, reverse, resolve, Resolver404, get_script_prefix
from django.db import transaction
from django.db.models.sql.constants import QUERY_TERMS
from django.http import HttpResponse, HttpResponseNotFound, Http404
from django.utils.cache import patch_cache_control, patch_vary_headers
from tastypie.authentication import Authentication
from tastypie.authorization import ReadOnlyAuthorization
from tastypie.bundle import Bundle
from tastypie.cache import NoCache
from tastypie.constants import ALL, ALL_WITH_RELATIONS
from tastypie.exceptions import NotFound, BadRequest, InvalidFilterError, HydrationError, InvalidSortError, ImmediateHttpResponse
from tastypie import fields
from tastypie import http
from tastypie.paginator import Paginator
from tastypie.serializers import Serializer
from tastypie.throttle import BaseThrottle
from tastypie.utils import is_valid_jsonp_callback_value, dict_strip_unicode_keys, trailing_slash
from tastypie.utils.mime import determine_format, build_content_type
from tastypie.validation import Validation
try:
set
except NameError:
from sets import Set as set
# The ``copy`` module became function-friendly in Python 2.5 and
# ``copycompat`` was added in post 1.1.1 Django (r11901)..
try:
from django.utils.copycompat import deepcopy
except ImportError:
from copy import deepcopy
# If ``csrf_exempt`` isn't present, stub it.
try:
from django.views.decorators.csrf import csrf_exempt
except ImportError:
def csrf_exempt(func):
return func
# Django 1.5 has moved this constant up one level.
try:
from django.db.models.constants import LOOKUP_SEP
except ImportError:
from django.db.models.sql.constants import LOOKUP_SEP
class NOT_AVAILABLE:
def __str__(self):
return 'No such data is available.'
class ResourceOptions(object):
"""
A configuration class for ``Resource``.
Provides sane defaults and the logic needed to augment these settings with
the internal ``class Meta`` used on ``Resource`` subclasses.
"""
serializer = Serializer()
authentication = Authentication()
authorization = ReadOnlyAuthorization()
cache = NoCache()
throttle = BaseThrottle()
validation = Validation()
paginator_class = Paginator
allowed_methods = ['get', 'post', 'put', 'delete', 'patch']
list_allowed_methods = None
detail_allowed_methods = None
limit = getattr(settings, 'API_LIMIT_PER_PAGE', 20)
max_limit = 1000
api_name = None
resource_name = None
urlconf_namespace = None
default_format = 'application/json'
filtering = {}
ordering = []
object_class = None
queryset = None
fields = []
excludes = []
include_resource_uri = True
include_absolute_url = False
always_return_data = False
collection_name = 'objects'
detail_uri_name = 'pk'
def __new__(cls, meta=None):
overrides = {}
# Handle overrides.
if meta:
for override_name in dir(meta):
# No internals please.
if not override_name.startswith('_'):
overrides[override_name] = getattr(meta, override_name)
allowed_methods = overrides.get('allowed_methods', ['get', 'post', 'put', 'delete', 'patch'])
if overrides.get('list_allowed_methods', None) is None:
overrides['list_allowed_methods'] = allowed_methods
if overrides.get('detail_allowed_methods', None) is None:
overrides['detail_allowed_methods'] = allowed_methods
return object.__new__(type('ResourceOptions', (cls,), overrides))
class DeclarativeMetaclass(type):
def __new__(cls, name, bases, attrs):
attrs['base_fields'] = {}
declared_fields = {}
# Inherit any fields from parent(s).
try:
parents = [b for b in bases if issubclass(b, Resource)]
# Simulate the MRO.
parents.reverse()
for p in parents:
parent_fields = getattr(p, 'base_fields', {})
for field_name, field_object in parent_fields.items():
attrs['base_fields'][field_name] = deepcopy(field_object)
except NameError:
pass
for field_name, obj in attrs.items():
# Look for ``dehydrated_type`` instead of doing ``isinstance``,
# which can break down if Tastypie is re-namespaced as something
# else.
if hasattr(obj, 'dehydrated_type'):
field = attrs.pop(field_name)
declared_fields[field_name] = field
attrs['base_fields'].update(declared_fields)
attrs['declared_fields'] = declared_fields
new_class = super(DeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
opts = getattr(new_class, 'Meta', None)
new_class._meta = ResourceOptions(opts)
if not getattr(new_class._meta, 'resource_name', None):
# No ``resource_name`` provided. Attempt to auto-name the resource.
class_name = new_class.__name__
name_bits = [bit for bit in class_name.split('Resource') if bit]
resource_name = ''.join(name_bits).lower()
new_class._meta.resource_name = resource_name
if getattr(new_class._meta, 'include_resource_uri', True):
if not 'resource_uri' in new_class.base_fields:
new_class.base_fields['resource_uri'] = fields.CharField(readonly=True)
elif 'resource_uri' in new_class.base_fields and not 'resource_uri' in attrs:
del(new_class.base_fields['resource_uri'])
for field_name, field_object in new_class.base_fields.items():
if hasattr(field_object, 'contribute_to_class'):
field_object.contribute_to_class(new_class, field_name)
return new_class
class Resource(object):
"""
Handles the data, request dispatch and responding to requests.
Serialization/deserialization is handled "at the edges" (i.e. at the
beginning/end of the request/response cycle) so that everything internally
is Python data structures.
This class tries to be non-model specific, so it can be hooked up to other
data sources, such as search results, files, other data, etc.
"""
__metaclass__ = DeclarativeMetaclass
def __init__(self, api_name=None):
self.fields = deepcopy(self.base_fields)
if not api_name is None:
self._meta.api_name = api_name
def __getattr__(self, name):
if name in self.fields:
return self.fields[name]
raise AttributeError(name)
def wrap_view(self, view):
"""
Wraps methods so they can be called in a more functional way as well
as handling exceptions better.
Note that if ``BadRequest`` or an exception with a ``response`` attr
are seen, there is special handling to either present a message back
to the user or return the response traveling with the exception.
"""
@csrf_exempt
def wrapper(request, *args, **kwargs):
try:
callback = getattr(self, view)
response = callback(request, *args, **kwargs)
# Our response can vary based on a number of factors, use
# the cache class to determine what we should ``Vary`` on so
# caches won't return the wrong (cached) version.
varies = getattr(self._meta.cache, "varies", [])
if varies:
patch_vary_headers(response, varies)
if self._meta.cache.cacheable(request, response):
if self._meta.cache.cache_control():
# If the request is cacheable and we have a
# ``Cache-Control`` available then patch the header.
patch_cache_control(response, **self._meta.cache.cache_control())
if request.is_ajax() and not response.has_header("Cache-Control"):
# IE excessively caches XMLHttpRequests, so we're disabling
# the browser cache here.
# See http://www.enhanceie.com/ie/bugs.asp for details.
patch_cache_control(response, no_cache=True)
return response
except (BadRequest, fields.ApiFieldError), e:
return http.HttpBadRequest(e.args[0])
except ValidationError, e:
return http.HttpBadRequest(', '.join(e.messages))
except Exception, e:
if hasattr(e, 'response'):
return e.response
# A real, non-expected exception.
# Handle the case where the full traceback is more helpful
# than the serialized error.
if settings.DEBUG and getattr(settings, 'TASTYPIE_FULL_DEBUG', False):
raise
# Re-raise the error to get a proper traceback when the error
# happend during a test case
if request.META.get('SERVER_NAME') == 'testserver':
raise
# Rather than re-raising, we're going to things similar to
# what Django does. The difference is returning a serialized
# error message.
return self._handle_500(request, e)
return wrapper
def _handle_500(self, request, exception):
import traceback
import sys
the_trace = '\n'.join(traceback.format_exception(*(sys.exc_info())))
response_class = http.HttpApplicationError
response_code = 500
NOT_FOUND_EXCEPTIONS = (NotFound, ObjectDoesNotExist, Http404)
if isinstance(exception, NOT_FOUND_EXCEPTIONS):
response_class = HttpResponseNotFound
response_code = 404
if settings.DEBUG:
data = {
"error_message": unicode(exception),
"traceback": the_trace,
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format))
# When DEBUG is False, send an error message to the admins (unless it's
# a 404, in which case we check the setting).
send_broken_links = getattr(settings, 'SEND_BROKEN_LINK_EMAILS', False)
if not response_code == 404 or send_broken_links:
log = logging.getLogger('django.request.tastypie')
log.error('Internal Server Error: %s' % request.path, exc_info=sys.exc_info(), extra={'status_code': response_code, 'request':request})
if django.VERSION < (1, 3, 0):
from django.core.mail import mail_admins
subject = 'Error (%s IP): %s' % ((request.META.get('REMOTE_ADDR') in settings.INTERNAL_IPS and 'internal' or 'EXTERNAL'), request.path)
try:
request_repr = repr(request)
except:
request_repr = "Request repr() unavailable"
message = "%s\n\n%s" % (the_trace, request_repr)
mail_admins(subject, message, fail_silently=True)
# Prep the data going out.
data = {
"error_message": getattr(settings, 'TASTYPIE_CANNED_ERROR', "Sorry, this request could not be processed. Please try again later."),
}
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format))
def _build_reverse_url(self, name, args=None, kwargs=None):
"""
A convenience hook for overriding how URLs are built.
See ``NamespacedModelResource._build_reverse_url`` for an example.
"""
return reverse(name, args=args, kwargs=kwargs)
def base_urls(self):
"""
The standard URLs this ``Resource`` should respond to.
"""
return [
url(r"^(?P<resource_name>%s)%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('dispatch_list'), name="api_dispatch_list"),
url(r"^(?P<resource_name>%s)/schema%s$" % (self._meta.resource_name, trailing_slash()), self.wrap_view('get_schema'), name="api_get_schema"),
url(r"^(?P<resource_name>%s)/set/(?P<%s_list>\w[\w/;-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('get_multiple'), name="api_get_multiple"),
url(r"^(?P<resource_name>%s)/(?P<%s>\w[\w/-]*)%s$" % (self._meta.resource_name, self._meta.detail_uri_name, trailing_slash()), self.wrap_view('dispatch_detail'), name="api_dispatch_detail"),
]
def override_urls(self):
"""
Deprecated. Will be removed by v1.0.0. Please use ``prepend_urls`` instead.
"""
return []
def prepend_urls(self):
"""
A hook for adding your own URLs or matching before the default URLs.
"""
return []
@property
def urls(self):
"""
The endpoints this ``Resource`` responds to.
Mostly a standard URLconf, this is suitable for either automatic use
when registered with an ``Api`` class or for including directly in
a URLconf should you choose to.
"""
urls = self.prepend_urls()
if self.override_urls():
warnings.warn("'override_urls' is a deprecated method & will be removed by v1.0.0. Please rename your method to ``prepend_urls``.")
urls += self.override_urls()
urls += self.base_urls()
urlpatterns = patterns('',
*urls
)
return urlpatterns
def determine_format(self, request):
"""
Used to determine the desired format.
Largely relies on ``tastypie.utils.mime.determine_format`` but here
as a point of extension.
"""
return determine_format(request, self._meta.serializer, default_format=self._meta.default_format)
def serialize(self, request, data, format, options=None):
"""
Given a request, data and a desired format, produces a serialized
version suitable for transfer over the wire.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
options = options or {}
if 'text/javascript' in format:
# get JSONP callback name. default to "callback"
callback = request.GET.get('callback', 'callback')
if not is_valid_jsonp_callback_value(callback):
raise BadRequest('JSONP callback name is invalid.')
options['callback'] = callback
return self._meta.serializer.serialize(data, format, options)
def deserialize(self, request, data, format='application/json'):
"""
Given a request, data and a format, deserializes the given data.
It relies on the request properly sending a ``CONTENT_TYPE`` header,
falling back to ``application/json`` if not provided.
Mostly a hook, this uses the ``Serializer`` from ``Resource._meta``.
"""
deserialized = self._meta.serializer.deserialize(data, format=request.META.get('CONTENT_TYPE', 'application/json'))
return deserialized
def alter_list_data_to_serialize(self, request, data):
"""
A hook to alter list data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for a list of objects, generally also including
meta data.
"""
return data
def alter_detail_data_to_serialize(self, request, data):
"""
A hook to alter detail data just before it gets serialized & sent to the user.
Useful for restructuring/renaming aspects of the what's going to be
sent.
Should accommodate for receiving a single bundle of data.
"""
return data
def alter_deserialized_list_data(self, request, data):
"""
A hook to alter list data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def alter_deserialized_detail_data(self, request, data):
"""
A hook to alter detail data just after it has been received from the user &
gets deserialized.
Useful for altering the user data before any hydration is applied.
"""
return data
def dispatch_list(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) over
the entire list of resources.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('list', request, **kwargs)
def dispatch_detail(self, request, **kwargs):
"""
A view for handling the various HTTP methods (GET/POST/PUT/DELETE) on
a single resource.
Relies on ``Resource.dispatch`` for the heavy-lifting.
"""
return self.dispatch('detail', request, **kwargs)
def dispatch(self, request_type, request, **kwargs):
"""
Handles the common operations (allowed HTTP method, authentication,
throttling, method lookup) surrounding most CRUD interactions.
"""
allowed_methods = getattr(self._meta, "%s_allowed_methods" % request_type, None)
if 'HTTP_X_HTTP_METHOD_OVERRIDE' in request.META:
request.method = request.META['HTTP_X_HTTP_METHOD_OVERRIDE']
request_method = self.method_check(request, allowed=allowed_methods)
method = getattr(self, "%s_%s" % (request_method, request_type), None)
if method is None:
raise ImmediateHttpResponse(response=http.HttpNotImplemented())
self.is_authenticated(request)
self.is_authorized(request)
self.throttle_check(request)
# All clear. Process the request.
request = convert_post_to_put(request)
response = method(request, **kwargs)
# Add the throttled request.
self.log_throttled_access(request)
# If what comes back isn't a ``HttpResponse``, assume that the
# request was accepted and that some action occurred. This also
# prevents Django from freaking out.
if not isinstance(response, HttpResponse):
return http.HttpNoContent()
return response
def remove_api_resource_names(self, url_dict):
"""
Given a dictionary of regex matches from a URLconf, removes
``api_name`` and/or ``resource_name`` if found.
This is useful for converting URLconf matches into something suitable
for data lookup. For example::
Model.objects.filter(**self.remove_api_resource_names(matches))
"""
kwargs_subset = url_dict.copy()
for key in ['api_name', 'resource_name']:
try:
del(kwargs_subset[key])
except KeyError:
pass
return kwargs_subset
def method_check(self, request, allowed=None):
"""
Ensures that the HTTP method used on the request is allowed to be
handled by the resource.
Takes an ``allowed`` parameter, which should be a list of lowercase
HTTP methods to check against. Usually, this looks like::
# The most generic lookup.
self.method_check(request, self._meta.allowed_methods)
# A lookup against what's allowed for list-type methods.
self.method_check(request, self._meta.list_allowed_methods)
# A useful check when creating a new endpoint that only handles
# GET.
self.method_check(request, ['get'])
"""
if allowed is None:
allowed = []
request_method = request.method.lower()
allows = ','.join(map(str.upper, allowed))
if request_method == "options":
response = HttpResponse(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
if not request_method in allowed:
response = http.HttpMethodNotAllowed(allows)
response['Allow'] = allows
raise ImmediateHttpResponse(response=response)
return request_method
def is_authorized(self, request, object=None):
"""
Handles checking of permissions to see if the user has authorization
to GET, POST, PUT, or DELETE this resource. If ``object`` is provided,
the authorization backend can apply additional row-level permissions
checking.
"""
auth_result = self._meta.authorization.is_authorized(request, object)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def is_authenticated(self, request):
"""
Handles checking if the user is authenticated and dealing with
unauthenticated users.
Mostly a hook, this uses class assigned to ``authentication`` from
``Resource._meta``.
"""
# Authenticate the request as needed.
auth_result = self._meta.authentication.is_authenticated(request)
if isinstance(auth_result, HttpResponse):
raise ImmediateHttpResponse(response=auth_result)
if not auth_result is True:
raise ImmediateHttpResponse(response=http.HttpUnauthorized())
def throttle_check(self, request):
"""
Handles checking if the user should be throttled.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
identifier = self._meta.authentication.get_identifier(request)
# Check to see if they should be throttled.
if self._meta.throttle.should_be_throttled(identifier):
# Throttle limit exceeded.
raise ImmediateHttpResponse(response=http.HttpTooManyRequests())
def log_throttled_access(self, request):
"""
Handles the recording of the user's access for throttling purposes.
Mostly a hook, this uses class assigned to ``throttle`` from
``Resource._meta``.
"""
request_method = request.method.lower()
self._meta.throttle.accessed(self._meta.authentication.get_identifier(request), url=request.get_full_path(), request_method=request_method)
def build_bundle(self, obj=None, data=None, request=None):
"""
Given either an object, a data dictionary or both, builds a ``Bundle``
for use throughout the ``dehydrate/hydrate`` cycle.
If no object is provided, an empty object from
``Resource._meta.object_class`` is created so that attempts to access
``bundle.obj`` do not fail.
"""
if obj is None:
obj = self._meta.object_class()
return Bundle(obj=obj, data=data, request=request)
def build_filters(self, filters=None):
"""
Allows for the filtering of applicable objects.
This needs to be implemented at the user level.'
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return filters
def apply_sorting(self, obj_list, options=None):
"""
Allows for the sorting of objects being returned.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
return obj_list
def get_bundle_detail_data(self, bundle):
"""
Convenience method to return the ``detail_uri_name`` attribute off
``bundle.obj``.
Usually just accesses ``bundle.obj.pk`` by default.
"""
return getattr(bundle.obj, self._meta.detail_uri_name)
# URL-related methods.
def detail_uri_kwargs(self, bundle_or_obj):
"""
This needs to be implemented at the user level.
Given a ``Bundle`` or an object, it returns the extra kwargs needed to
generate a detail URI.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def resource_uri_kwargs(self, bundle_or_obj=None):
"""
Builds a dictionary of kwargs to help generate URIs.
Automatically provides the ``Resource.Meta.resource_name`` (and
optionally the ``Resource.Meta.api_name`` if populated by an ``Api``
object).
If the ``bundle_or_obj`` argument is provided, it calls
``Resource.detail_uri_kwargs`` for additional bits to create
"""
kwargs = {
'resource_name': self._meta.resource_name,
}
if self._meta.api_name is not None:
kwargs['api_name'] = self._meta.api_name
if bundle_or_obj is not None:
kwargs.update(self.detail_uri_kwargs(bundle_or_obj))
return kwargs
def get_resource_uri(self, bundle_or_obj=None, url_name='api_dispatch_list'):
"""
Handles generating a resource URI.
If the ``bundle_or_obj`` argument is not provided, it builds the URI
for the list endpoint.
If the ``bundle_or_obj`` argument is provided, it builds the URI for
the detail endpoint.
Return the generated URI. If that URI can not be reversed (not found
in the URLconf), it will return an empty string.
"""
if bundle_or_obj is not None:
url_name = 'api_dispatch_detail'
try:
return self._build_reverse_url(url_name, kwargs=self.resource_uri_kwargs(bundle_or_obj))
except NoReverseMatch:
return ''
def get_via_uri(self, uri, request=None):
"""
This pulls apart the salient bits of the URI and populates the
resource via a ``obj_get``.
Optionally accepts a ``request``.
If you need custom behavior based on other portions of the URI,
simply override this method.
"""
prefix = get_script_prefix()
chomped_uri = uri
if prefix and chomped_uri.startswith(prefix):
chomped_uri = chomped_uri[len(prefix)-1:]
try:
view, args, kwargs = resolve(chomped_uri)
except Resolver404:
raise NotFound("The URL provided '%s' was not a link to a valid resource." % uri)
return self.obj_get(request=request, **self.remove_api_resource_names(kwargs))
# Data preparation.
def full_dehydrate(self, bundle):
"""
Given a bundle with an object instance, extract the information from it
to populate the resource.
"""
# Dehydrate each field.
for field_name, field_object in self.fields.items():
# A touch leaky but it makes URI resolution work.
if getattr(field_object, 'dehydrated_type', None) == 'related':
field_object.api_name = self._meta.api_name
field_object.resource_name = self._meta.resource_name
bundle.data[field_name] = field_object.dehydrate(bundle)
# Check for an optional method to do further dehydration.
method = getattr(self, "dehydrate_%s" % field_name, None)
if method:
bundle.data[field_name] = method(bundle)
bundle = self.dehydrate(bundle)
return bundle
def dehydrate(self, bundle):
"""
A hook to allow a final manipulation of data once all fields/methods
have built out the dehydrated data.
Useful if you need to access more than one dehydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def full_hydrate(self, bundle):
"""
Given a populated bundle, distill it and turn it back into
a full-fledged object instance.
"""
if bundle.obj is None:
bundle.obj = self._meta.object_class()
bundle = self.hydrate(bundle)
for field_name, field_object in self.fields.items():
if field_object.readonly is True:
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
# NOTE: We only get back a bundle when it is related field.
if isinstance(value, Bundle) and value.errors.get(field_name):
bundle.errors[field_name] = value.errors[field_name]
if value is not None or field_object.null:
# We need to avoid populating M2M data here as that will
# cause things to blow up.
if not getattr(field_object, 'is_related', False):
setattr(bundle.obj, field_object.attribute, value)
elif not getattr(field_object, 'is_m2m', False):
if value is not None:
setattr(bundle.obj, field_object.attribute, value.obj)
elif field_object.blank:
continue
elif field_object.null:
setattr(bundle.obj, field_object.attribute, value)
return bundle
def hydrate(self, bundle):
"""
A hook to allow an initial manipulation of data before all methods/fields
have built out the hydrated data.
Useful if you need to access more than one hydrated field or want
to annotate on additional data.
Must return the modified bundle.
"""
return bundle
def hydrate_m2m(self, bundle):
"""
Populate the ManyToMany data on the instance.
"""
if bundle.obj is None:
raise HydrationError("You must call 'full_hydrate' before attempting to run 'hydrate_m2m' on %r." % self)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if field_object.attribute:
# Note that we only hydrate the data, leaving the instance
# unmodified. It's up to the user's code to handle this.
# The ``ModelResource`` provides a working baseline
# in this regard.
bundle.data[field_name] = field_object.hydrate_m2m(bundle)
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
method = getattr(self, "hydrate_%s" % field_name, None)
if method:
method(bundle)
return bundle
def build_schema(self):
"""
Returns a dictionary of all the fields on the resource and some
properties about those fields.
Used by the ``schema/`` endpoint to describe what will be available.
"""
data = {
'fields': {},
'default_format': self._meta.default_format,
'allowed_list_http_methods': self._meta.list_allowed_methods,
'allowed_detail_http_methods': self._meta.detail_allowed_methods,
'default_limit': self._meta.limit,
}
if self._meta.ordering:
data['ordering'] = self._meta.ordering
if self._meta.filtering:
data['filtering'] = self._meta.filtering
for field_name, field_object in self.fields.items():
data['fields'][field_name] = {
'default': field_object.default,
'type': field_object.dehydrated_type,
'nullable': field_object.null,
'blank': field_object.blank,
'readonly': field_object.readonly,
'help_text': field_object.help_text,
'unique': field_object.unique,
}
if field_object.dehydrated_type == 'related':
if getattr(field_object, 'is_m2m', False):
related_type = 'to_many'
else:
related_type = 'to_one'
data['fields'][field_name]['related_type'] = related_type
return data
def dehydrate_resource_uri(self, bundle):
"""
For the automatically included ``resource_uri`` field, dehydrate
the URI for the given bundle.
Returns empty string if no URI can be generated.
"""
try:
return self.get_resource_uri(bundle)
except NotImplementedError:
return ''
except NoReverseMatch:
return ''
def generate_cache_key(self, *args, **kwargs):
"""
Creates a unique-enough cache key.
This is based off the current api_name/resource_name/args/kwargs.
"""
smooshed = []
for key, value in kwargs.items():
smooshed.append("%s=%s" % (key, value))
# Use a list plus a ``.join()`` because it's faster than concatenation.
return "%s:%s:%s:%s" % (self._meta.api_name, self._meta.resource_name, ':'.join(args), ':'.join(smooshed))
# Data access methods.
def get_object_list(self, request):
"""
A hook to allow making returning the list of available objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def apply_authorization_limits(self, request, object_list):
"""
Allows the ``Authorization`` class to further limit the object list.
Also a hook to customize per ``Resource``.
"""
if hasattr(self._meta.authorization, 'apply_limits'):
object_list = self._meta.authorization.apply_limits(request, object_list)
return object_list
def can_create(self):
"""
Checks to ensure ``post`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'post' in allowed
def can_update(self):
"""
Checks to ensure ``put`` is within ``allowed_methods``.
Used when hydrating related data.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'put' in allowed
def can_delete(self):
"""
Checks to ensure ``delete`` is within ``allowed_methods``.
"""
allowed = set(self._meta.list_allowed_methods + self._meta.detail_allowed_methods)
return 'delete' in allowed
def apply_filters(self, request, applicable_filters):
"""
A hook to alter how the filters are applied to the object list.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_get_list(self, request=None, **kwargs):
"""
Fetches the list of objects available on the resource.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get_list(self, request=None, **kwargs):
"""
A version of ``obj_get_list`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('list', **kwargs)
obj_list = self._meta.cache.get(cache_key)
if obj_list is None:
obj_list = self.obj_get_list(request=request, **kwargs)
self._meta.cache.set(cache_key, obj_list)
return obj_list
def obj_get(self, request=None, **kwargs):
"""
Fetches an individual object on the resource.
This needs to be implemented at the user level. If the object can not
be found, this should raise a ``NotFound`` exception.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def cached_obj_get(self, request=None, **kwargs):
"""
A version of ``obj_get`` that uses the cache as a means to get
commonly-accessed data faster.
"""
cache_key = self.generate_cache_key('detail', **kwargs)
bundle = self._meta.cache.get(cache_key)
if bundle is None:
bundle = self.obj_get(request=request, **kwargs)
self._meta.cache.set(cache_key, bundle)
return bundle
def obj_create(self, bundle, request=None, **kwargs):
"""
Creates a new object based on the provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_update(self, bundle, request=None, **kwargs):
"""
Updates an existing object (or creates a new object) based on the
provided data.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete_list(self, request=None, **kwargs):
"""
Deletes an entire list of objects.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def obj_delete(self, request=None, **kwargs):
"""
Deletes a single object.
This needs to be implemented at the user level.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
def create_response(self, request, data, response_class=HttpResponse, **response_kwargs):
"""
Extracts the common "which-format/serialize/return-response" cycle.
Mostly a useful shortcut/hook.
"""
desired_format = self.determine_format(request)
serialized = self.serialize(request, data, desired_format)
return response_class(content=serialized, content_type=build_content_type(desired_format), **response_kwargs)
def error_response(self, errors, request):
if request:
desired_format = self.determine_format(request)
else:
desired_format = self._meta.default_format
serialized = self.serialize(request, errors, desired_format)
response = http.HttpBadRequest(content=serialized, content_type=build_content_type(desired_format))
raise ImmediateHttpResponse(response=response)
def is_valid(self, bundle, request=None):
"""
Handles checking if the data provided by the user is valid.
Mostly a hook, this uses class assigned to ``validation`` from
``Resource._meta``.
If validation fails, an error is raised with the error messages
serialized inside it.
"""
errors = self._meta.validation.is_valid(bundle, request)
if errors:
bundle.errors[self._meta.resource_name] = errors
return False
return True
def rollback(self, bundles):
"""
Given the list of bundles, delete all objects pertaining to those
bundles.
This needs to be implemented at the user level. No exceptions should
be raised if possible.
``ModelResource`` includes a full working version specific to Django's
``Models``.
"""
raise NotImplementedError()
# Views.
def get_list(self, request, **kwargs):
"""
Returns a serialized list of resources.
Calls ``obj_get_list`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
# TODO: Uncached for now. Invalidation that works for everyone may be
# impossible.
objects = self.obj_get_list(request=request, **self.remove_api_resource_names(kwargs))
sorted_objects = self.apply_sorting(objects, options=request.GET)
paginator = self._meta.paginator_class(request.GET, sorted_objects, resource_uri=self.get_resource_uri(), limit=self._meta.limit, max_limit=self._meta.max_limit, collection_name=self._meta.collection_name)
to_be_serialized = paginator.page()
# Dehydrate the bundles in preparation for serialization.
bundles = [self.build_bundle(obj=obj, request=request) for obj in to_be_serialized[self._meta.collection_name]]
to_be_serialized[self._meta.collection_name] = [self.full_dehydrate(bundle) for bundle in bundles]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized)
def get_detail(self, request, **kwargs):
"""
Returns a single serialized resource.
Calls ``cached_obj_get/obj_get`` to provide the data, then handles that result
set and serializes it.
Should return a HttpResponse (200 OK).
"""
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle)
def put_list(self, request, **kwargs):
"""
Replaces a collection of resources with another collection.
Calls ``delete_list`` to clear out the collection then ``obj_create``
with the provided the data to create the new collection.
Return ``HttpNoContent`` (204 No Content) if
``Meta.always_return_data = False`` (default).
Return ``HttpAccepted`` (202 Accepted) if
``Meta.always_return_data = True``.
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_list_data(request, deserialized)
if not self._meta.collection_name in deserialized:
raise BadRequest("Invalid data sent.")
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
bundles_seen = []
for object_data in deserialized[self._meta.collection_name]:
bundle = self.build_bundle(data=dict_strip_unicode_keys(object_data), request=request)
# Attempt to be transactional, deleting any previously created
# objects if validation fails.
try:
self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
bundles_seen.append(bundle)
except ImmediateHttpResponse:
self.rollback(bundles_seen)
raise
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
to_be_serialized = {}
to_be_serialized[self._meta.collection_name] = [self.full_dehydrate(bundle) for bundle in bundles_seen]
to_be_serialized = self.alter_list_data_to_serialize(request, to_be_serialized)
return self.create_response(request, to_be_serialized, response_class=http.HttpAccepted)
def put_detail(self, request, **kwargs):
"""
Either updates an existing resource or creates a new one with the
provided data.
Calls ``obj_update`` with the provided data first, but falls back to
``obj_create`` if the object does not already exist.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
If an existing resource is modified and
``Meta.always_return_data = False`` (default), return ``HttpNoContent``
(204 No Content).
If an existing resource is modified and
``Meta.always_return_data = True``, return ``HttpAccepted`` (202
Accepted).
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
try:
updated_bundle = self.obj_update(bundle, request=request, **self.remove_api_resource_names(kwargs))
if not self._meta.always_return_data:
return http.HttpNoContent()
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpAccepted)
except (NotFound, MultipleObjectsReturned):
updated_bundle = self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_list(self, request, **kwargs):
"""
Creates a new resource/object with the provided data.
Calls ``obj_create`` with the provided data and returns a response
with the new resource's location.
If a new resource is created, return ``HttpCreated`` (201 Created).
If ``Meta.always_return_data = True``, there will be a populated body
of serialized data.
"""
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
deserialized = self.alter_deserialized_detail_data(request, deserialized)
bundle = self.build_bundle(data=dict_strip_unicode_keys(deserialized), request=request)
updated_bundle = self.obj_create(bundle, request=request, **self.remove_api_resource_names(kwargs))
location = self.get_resource_uri(updated_bundle)
if not self._meta.always_return_data:
return http.HttpCreated(location=location)
else:
updated_bundle = self.full_dehydrate(updated_bundle)
updated_bundle = self.alter_detail_data_to_serialize(request, updated_bundle)
return self.create_response(request, updated_bundle, response_class=http.HttpCreated, location=location)
def post_detail(self, request, **kwargs):
"""
Creates a new subcollection of the resource under a resource.
This is not implemented by default because most people's data models
aren't self-referential.
If a new resource is created, return ``HttpCreated`` (201 Created).
"""
return http.HttpNotImplemented()
def delete_list(self, request, **kwargs):
"""
Destroys a collection of resources/objects.
Calls ``obj_delete_list``.
If the resources are deleted, return ``HttpNoContent`` (204 No Content).
"""
self.obj_delete_list(request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
def delete_detail(self, request, **kwargs):
"""
Destroys a single resource/object.
Calls ``obj_delete``.
If the resource is deleted, return ``HttpNoContent`` (204 No Content).
If the resource did not exist, return ``Http404`` (404 Not Found).
"""
try:
self.obj_delete(request=request, **self.remove_api_resource_names(kwargs))
return http.HttpNoContent()
except NotFound:
return http.HttpNotFound()
def patch_list(self, request, **kwargs):
"""
Updates a collection in-place.
The exact behavior of ``PATCH`` to a list resource is still the matter of
some debate in REST circles, and the ``PATCH`` RFC isn't standard. So the
behavior this method implements (described below) is something of a
stab in the dark. It's mostly cribbed from GData, with a smattering
of ActiveResource-isms and maybe even an original idea or two.
The ``PATCH`` format is one that's similar to the response returned from
a ``GET`` on a list resource::
{
"objects": [{object}, {object}, ...],
"deleted_objects": ["URI", "URI", "URI", ...],
}
For each object in ``objects``:
* If the dict does not have a ``resource_uri`` key then the item is
considered "new" and is handled like a ``POST`` to the resource list.
* If the dict has a ``resource_uri`` key and the ``resource_uri`` refers
to an existing resource then the item is a update; it's treated
like a ``PATCH`` to the corresponding resource detail.
* If the dict has a ``resource_uri`` but the resource *doesn't* exist,
then this is considered to be a create-via-``PUT``.
Each entry in ``deleted_objects`` referes to a resource URI of an existing
resource to be deleted; each is handled like a ``DELETE`` to the relevent
resource.
In any case:
* If there's a resource URI it *must* refer to a resource of this
type. It's an error to include a URI of a different resource.
* ``PATCH`` is all or nothing. If a single sub-operation fails, the
entire request will fail and all resources will be rolled back.
* For ``PATCH`` to work, you **must** have ``put`` in your
:ref:`detail-allowed-methods` setting.
* To delete objects via ``deleted_objects`` in a ``PATCH`` request you
**must** have ``delete`` in your :ref:`detail-allowed-methods`
setting.
Substitute appropriate names for ``objects`` and
``deleted_objects`` if ``Meta.collection_name`` is set to something
other than ``objects`` (default).
"""
request = convert_post_to_patch(request)
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
collection_name = self._meta.collection_name
deleted_collection_name = 'deleted_%s' % collection_name
if collection_name not in deserialized:
raise BadRequest("Invalid data sent: missing '%s'" % collection_name)
if len(deserialized[collection_name]) and 'put' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for data in deserialized[collection_name]:
# If there's a resource_uri then this is either an
# update-in-place or a create-via-PUT.
if "resource_uri" in data:
uri = data.pop('resource_uri')
try:
obj = self.get_via_uri(uri, request=request)
# The object does exist, so this is an update-in-place.
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
self.update_in_place(request, bundle, data)
except (ObjectDoesNotExist, MultipleObjectsReturned):
# The object referenced by resource_uri doesn't exist,
# so this is a create-by-PUT equivalent.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle, request=request)
else:
# There's no resource URI, so this is a create call just
# like a POST to the list resource.
data = self.alter_deserialized_detail_data(request, data)
bundle = self.build_bundle(data=dict_strip_unicode_keys(data), request=request)
self.obj_create(bundle, request=request)
deleted_collection = deserialized.get(deleted_collection_name, [])
if deleted_collection:
if 'delete' not in self._meta.detail_allowed_methods:
raise ImmediateHttpResponse(response=http.HttpMethodNotAllowed())
for uri in deleted_collection:
obj = self.get_via_uri(uri, request=request)
self.obj_delete(request=request, _obj=obj)
return http.HttpAccepted()
def patch_detail(self, request, **kwargs):
"""
Updates a resource in-place.
Calls ``obj_update``.
If the resource is updated, return ``HttpAccepted`` (202 Accepted).
If the resource did not exist, return ``HttpNotFound`` (404 Not Found).
"""
request = convert_post_to_patch(request)
# We want to be able to validate the update, but we can't just pass
# the partial data into the validator since all data needs to be
# present. Instead, we basically simulate a PUT by pulling out the
# original data and updating it in-place.
# So first pull out the original object. This is essentially
# ``get_detail``.
try:
obj = self.cached_obj_get(request=request, **self.remove_api_resource_names(kwargs))
except ObjectDoesNotExist:
return http.HttpNotFound()
except MultipleObjectsReturned:
return http.HttpMultipleChoices("More than one resource is found at this URI.")
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
# Now update the bundle in-place.
deserialized = self.deserialize(request, request.raw_post_data, format=request.META.get('CONTENT_TYPE', 'application/json'))
self.update_in_place(request, bundle, deserialized)
if not self._meta.always_return_data:
return http.HttpAccepted()
else:
bundle = self.full_dehydrate(bundle)
bundle = self.alter_detail_data_to_serialize(request, bundle)
return self.create_response(request, bundle, response_class=http.HttpAccepted)
def update_in_place(self, request, original_bundle, new_data):
"""
Update the object in original_bundle in-place using new_data.
"""
original_bundle.data.update(**dict_strip_unicode_keys(new_data))
# Now we've got a bundle with the new data sitting in it and we're
# we're basically in the same spot as a PUT request. SO the rest of this
# function is cribbed from put_detail.
self.alter_deserialized_detail_data(request, original_bundle.data)
kwargs = {
self._meta.detail_uri_name: self.get_bundle_detail_data(original_bundle),
'request': request,
}
return self.obj_update(original_bundle, **kwargs)
def get_schema(self, request, **kwargs):
"""
Returns a serialized form of the schema of the resource.
Calls ``build_schema`` to generate the data. This method only responds
to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
self.log_throttled_access(request)
return self.create_response(request, self.build_schema())
def get_multiple(self, request, **kwargs):
"""
Returns a serialized list of resources based on the identifiers
from the URL.
Calls ``obj_get`` to fetch only the objects requested. This method
only responds to HTTP GET.
Should return a HttpResponse (200 OK).
"""
self.method_check(request, allowed=['get'])
self.is_authenticated(request)
self.throttle_check(request)
# Rip apart the list then iterate.
kwarg_name = '%s_list' % self._meta.detail_uri_name
obj_identifiers = kwargs.get(kwarg_name, '').split(';')
objects = []
not_found = []
for identifier in obj_identifiers:
try:
obj = self.obj_get(request, **{self._meta.detail_uri_name: identifier})
bundle = self.build_bundle(obj=obj, request=request)
bundle = self.full_dehydrate(bundle)
objects.append(bundle)
except ObjectDoesNotExist:
not_found.append(identifier)
object_list = {
self._meta.collection_name: objects,
}
if len(not_found):
object_list['not_found'] = not_found
self.log_throttled_access(request)
return self.create_response(request, object_list)
class ModelDeclarativeMetaclass(DeclarativeMetaclass):
def __new__(cls, name, bases, attrs):
meta = attrs.get('Meta')
if meta and hasattr(meta, 'queryset'):
setattr(meta, 'object_class', meta.queryset.model)
new_class = super(ModelDeclarativeMetaclass, cls).__new__(cls, name, bases, attrs)
include_fields = getattr(new_class._meta, 'fields', [])
excludes = getattr(new_class._meta, 'excludes', [])
field_names = new_class.base_fields.keys()
for field_name in field_names:
if field_name == 'resource_uri':
continue
if field_name in new_class.declared_fields:
continue
if len(include_fields) and not field_name in include_fields:
del(new_class.base_fields[field_name])
if len(excludes) and field_name in excludes:
del(new_class.base_fields[field_name])
# Add in the new fields.
new_class.base_fields.update(new_class.get_fields(include_fields, excludes))
if getattr(new_class._meta, 'include_absolute_url', True):
if not 'absolute_url' in new_class.base_fields:
new_class.base_fields['absolute_url'] = fields.CharField(attribute='get_absolute_url', readonly=True)
elif 'absolute_url' in new_class.base_fields and not 'absolute_url' in attrs:
del(new_class.base_fields['absolute_url'])
return new_class
class ModelResource(Resource):
"""
A subclass of ``Resource`` designed to work with Django's ``Models``.
This class will introspect a given ``Model`` and build a field list based
on the fields found on the model (excluding relational fields).
Given that it is aware of Django's ORM, it also handles the CRUD data
operations of the resource.
"""
__metaclass__ = ModelDeclarativeMetaclass
@classmethod
def should_skip_field(cls, field):
"""
Given a Django model field, return if it should be included in the
contributed ApiFields.
"""
# Ignore certain fields (related fields).
if getattr(field, 'rel'):
return True
return False
@classmethod
def api_field_from_django_field(cls, f, default=fields.CharField):
"""
Returns the field type that would likely be associated with each
Django type.
"""
result = default
internal_type = f.get_internal_type()
if internal_type in ('DateField', 'DateTimeField'):
result = fields.DateTimeField
elif internal_type in ('BooleanField', 'NullBooleanField'):
result = fields.BooleanField
elif internal_type in ('FloatField',):
result = fields.FloatField
elif internal_type in ('DecimalField',):
result = fields.DecimalField
elif internal_type in ('IntegerField', 'PositiveIntegerField', 'PositiveSmallIntegerField', 'SmallIntegerField', 'AutoField'):
result = fields.IntegerField
elif internal_type in ('FileField', 'ImageField'):
result = fields.FileField
elif internal_type == 'TimeField':
result = fields.TimeField
# TODO: Perhaps enable these via introspection. The reason they're not enabled
# by default is the very different ``__init__`` they have over
# the other fields.
# elif internal_type == 'ForeignKey':
# result = ForeignKey
# elif internal_type == 'ManyToManyField':
# result = ManyToManyField
return result
@classmethod
def get_fields(cls, fields=None, excludes=None):
"""
Given any explicit fields to include and fields to exclude, add
additional fields based on the associated model.
"""
final_fields = {}
fields = fields or []
excludes = excludes or []
if not cls._meta.object_class:
return final_fields
for f in cls._meta.object_class._meta.fields:
# If the field name is already present, skip
if f.name in cls.base_fields:
continue
# If field is not present in explicit field listing, skip
if fields and f.name not in fields:
continue
# If field is in exclude list, skip
if excludes and f.name in excludes:
continue
if cls.should_skip_field(f):
continue
api_field_class = cls.api_field_from_django_field(f)
kwargs = {
'attribute': f.name,
'help_text': f.help_text,
}
if f.null is True:
kwargs['null'] = True
kwargs['unique'] = f.unique
if not f.null and f.blank is True:
kwargs['default'] = ''
kwargs['blank'] = True
if f.get_internal_type() == 'TextField':
kwargs['default'] = ''
if f.has_default():
kwargs['default'] = f.default
if getattr(f, 'auto_now', False):
kwargs['default'] = f.auto_now
if getattr(f, 'auto_now_add', False):
kwargs['default'] = f.auto_now_add
final_fields[f.name] = api_field_class(**kwargs)
final_fields[f.name].instance_name = f.name
return final_fields
def check_filtering(self, field_name, filter_type='exact', filter_bits=None):
"""
Given a field name, a optional filter type and an optional list of
additional relations, determine if a field can be filtered on.
If a filter does not meet the needed conditions, it should raise an
``InvalidFilterError``.
If the filter meets the conditions, a list of attribute names (not
field names) will be returned.
"""
if filter_bits is None:
filter_bits = []
if not field_name in self._meta.filtering:
raise InvalidFilterError("The '%s' field does not allow filtering." % field_name)
# Check to see if it's an allowed lookup type.
if not self._meta.filtering[field_name] in (ALL, ALL_WITH_RELATIONS):
# Must be an explicit whitelist.
if not filter_type in self._meta.filtering[field_name]:
raise InvalidFilterError("'%s' is not an allowed filter on the '%s' field." % (filter_type, field_name))
if self.fields[field_name].attribute is None:
raise InvalidFilterError("The '%s' field has no 'attribute' for searching with." % field_name)
# Check to see if it's a relational lookup and if that's allowed.
if len(filter_bits):
if not getattr(self.fields[field_name], 'is_related', False):
raise InvalidFilterError("The '%s' field does not support relations." % field_name)
if not self._meta.filtering[field_name] == ALL_WITH_RELATIONS:
raise InvalidFilterError("Lookups are not allowed more than one level deep on the '%s' field." % field_name)
# Recursively descend through the remaining lookups in the filter,
# if any. We should ensure that all along the way, we're allowed
# to filter on that field by the related resource.
related_resource = self.fields[field_name].get_related_resource(None)
return [self.fields[field_name].attribute] + related_resource.check_filtering(filter_bits[0], filter_type, filter_bits[1:])
return [self.fields[field_name].attribute]
def filter_value_to_python(self, value, field_name, filters, filter_expr,
filter_type):
"""
Turn the string ``value`` into a python object.
"""
# Simple values
true_values_list = ['true', 'True', True]
false_values_list = ['false', 'False', False]
none_values_list = ('nil', 'none', 'None', None)
if value in true_values_list:
value = True
elif value in false_values_list:
value = False
elif value in none_values_list:
value = None
# Split on ',' if not empty string and either an in or range filter.
if filter_type in ('in', 'range') and len(value):
if hasattr(filters, 'getlist'):
value = []
for part in filters.getlist(filter_expr):
value.extend(part.split(','))
else:
value = value.split(',')
return value
def build_filters(self, filters=None):
"""
Given a dictionary of filters, create the necessary ORM-level filters.
Keys should be resource fields, **NOT** model fields.
Valid values are either a list of Django filter types (i.e.
``['startswith', 'exact', 'lte']``), the ``ALL`` constant or the
``ALL_WITH_RELATIONS`` constant.
"""
# At the declarative level:
# filtering = {
# 'resource_field_name': ['exact', 'startswith', 'endswith', 'contains'],
# 'resource_field_name_2': ['exact', 'gt', 'gte', 'lt', 'lte', 'range'],
# 'resource_field_name_3': ALL,
# 'resource_field_name_4': ALL_WITH_RELATIONS,
# ...
# }
# Accepts the filters as a dict. None by default, meaning no filters.
if filters is None:
filters = {}
qs_filters = {}
if getattr(self._meta, 'queryset', None) is not None:
# Get the possible query terms from the current QuerySet.
if hasattr(self._meta.queryset.query.query_terms, 'keys'):
# Django 1.4 & below compatibility.
query_terms = self._meta.queryset.query.query_terms.keys()
else:
# Django 1.5+.
query_terms = self._meta.queryset.query.query_terms
else:
if hasattr(QUERY_TERMS, 'keys'):
# Django 1.4 & below compatibility.
query_terms = QUERY_TERMS.keys()
else:
# Django 1.5+.
query_terms = QUERY_TERMS
for filter_expr, value in filters.items():
filter_bits = filter_expr.split(LOOKUP_SEP)
field_name = filter_bits.pop(0)
filter_type = 'exact'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
continue
if len(filter_bits) and filter_bits[-1] in query_terms:
filter_type = filter_bits.pop()
lookup_bits = self.check_filtering(field_name, filter_type, filter_bits)
value = self.filter_value_to_python(value, field_name, filters, filter_expr, filter_type)
db_field_name = LOOKUP_SEP.join(lookup_bits)
qs_filter = "%s%s%s" % (db_field_name, LOOKUP_SEP, filter_type)
qs_filters[qs_filter] = value
return dict_strip_unicode_keys(qs_filters)
def apply_sorting(self, obj_list, options=None):
"""
Given a dictionary of options, apply some ORM-level sorting to the
provided ``QuerySet``.
Looks for the ``order_by`` key and handles either ascending (just the
field name) or descending (the field name with a ``-`` in front).
The field name should be the resource field, **NOT** model field.
"""
if options is None:
options = {}
parameter_name = 'order_by'
if not 'order_by' in options:
if not 'sort_by' in options:
# Nothing to alter the order. Return what we've got.
return obj_list
else:
warnings.warn("'sort_by' is a deprecated parameter. Please use 'order_by' instead.")
parameter_name = 'sort_by'
order_by_args = []
if hasattr(options, 'getlist'):
order_bits = options.getlist(parameter_name)
else:
order_bits = options.get(parameter_name)
if not isinstance(order_bits, (list, tuple)):
order_bits = [order_bits]
for order_by in order_bits:
order_by_bits = order_by.split(LOOKUP_SEP)
field_name = order_by_bits[0]
order = ''
if order_by_bits[0].startswith('-'):
field_name = order_by_bits[0][1:]
order = '-'
if not field_name in self.fields:
# It's not a field we know about. Move along citizen.
raise InvalidSortError("No matching '%s' field for ordering on." % field_name)
if not field_name in self._meta.ordering:
raise InvalidSortError("The '%s' field does not allow ordering." % field_name)
if self.fields[field_name].attribute is None:
raise InvalidSortError("The '%s' field has no 'attribute' for ordering with." % field_name)
order_by_args.append("%s%s" % (order, LOOKUP_SEP.join([self.fields[field_name].attribute] + order_by_bits[1:])))
return obj_list.order_by(*order_by_args)
def apply_filters(self, request, applicable_filters):
"""
An ORM-specific implementation of ``apply_filters``.
The default simply applies the ``applicable_filters`` as ``**kwargs``,
but should make it possible to do more advanced things.
"""
return self.get_object_list(request).filter(**applicable_filters)
def get_object_list(self, request):
"""
An ORM-specific implementation of ``get_object_list``.
Returns a queryset that may have been limited by other overrides.
"""
return self._meta.queryset._clone()
def obj_get_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get_list``.
Takes an optional ``request`` object, whose ``GET`` dictionary can be
used to narrow the query.
"""
filters = {}
if hasattr(request, 'GET'):
# Grab a mutable copy.
filters = request.GET.copy()
# Update with the provided kwargs.
filters.update(kwargs)
applicable_filters = self.build_filters(filters=filters)
try:
base_object_list = self.apply_filters(request, applicable_filters)
return self.apply_authorization_limits(request, base_object_list)
except ValueError:
raise BadRequest("Invalid resource lookup data provided (mismatched type).")
def obj_get(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_get``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
try:
base_object_list = self.get_object_list(request).filter(**kwargs)
object_list = self.apply_authorization_limits(request, base_object_list)
stringified_kwargs = ', '.join(["%s=%s" % (k, v) for k, v in kwargs.items()])
if len(object_list) <= 0:
raise self._meta.object_class.DoesNotExist("Couldn't find an instance of '%s' which matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
elif len(object_list) > 1:
raise MultipleObjectsReturned("More than '%s' matched '%s'." % (self._meta.object_class.__name__, stringified_kwargs))
return object_list[0]
except ValueError:
raise NotFound("Invalid resource lookup data provided (mismatched type).")
def obj_create(self, bundle, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_create``.
"""
bundle.obj = self._meta.object_class()
for key, value in kwargs.items():
setattr(bundle.obj, key, value)
bundle = self.full_hydrate(bundle)
self.is_valid(bundle,request)
if bundle.errors:
self.error_response(bundle.errors, request)
# Save FKs just in case.
self.save_related(bundle)
# Save parent
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def lookup_kwargs_with_identifiers(self, bundle, kwargs):
"""
Kwargs here represent uri identifiers Ex: /repos/<user_id>/<repo_name>/
We need to turn those identifiers into Python objects for generating
lookup parameters that can find them in the DB
"""
lookup_kwargs = {}
bundle.obj = self.get_object_list(bundle.request).model()
# Override data values, we rely on uri identifiers
bundle.data.update(kwargs)
# We're going to manually hydrate, as opposed to calling
# ``full_hydrate``, to ensure we don't try to flesh out related
# resources & keep things speedy.
bundle = self.hydrate(bundle)
for identifier in kwargs:
if identifier == self._meta.detail_uri_name:
lookup_kwargs[identifier] = kwargs[identifier]
continue
field_object = self.fields[identifier]
# Skip readonly or related fields.
if field_object.readonly is True or getattr(field_object, 'is_related', False):
continue
# Check for an optional method to do further hydration.
method = getattr(self, "hydrate_%s" % identifier, None)
if method:
bundle = method(bundle)
if field_object.attribute:
value = field_object.hydrate(bundle)
lookup_kwargs[identifier] = value
return lookup_kwargs
def obj_update(self, bundle, request=None, skip_errors=False, **kwargs):
"""
A ORM-specific implementation of ``obj_update``.
"""
if not bundle.obj or not self.get_bundle_detail_data(bundle):
try:
lookup_kwargs = self.lookup_kwargs_with_identifiers(bundle, kwargs)
except:
# if there is trouble hydrating the data, fall back to just
# using kwargs by itself (usually it only contains a "pk" key
# and this will work fine.
lookup_kwargs = kwargs
try:
bundle.obj = self.obj_get(bundle.request, **lookup_kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
bundle = self.full_hydrate(bundle)
self.is_valid(bundle,request)
if bundle.errors and not skip_errors:
self.error_response(bundle.errors, request)
# Save FKs just in case.
self.save_related(bundle)
# Save the main object.
bundle.obj.save()
# Now pick up the M2M bits.
m2m_bundle = self.hydrate_m2m(bundle)
self.save_m2m(m2m_bundle)
return bundle
def obj_delete_list(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete_list``.
Takes optional ``kwargs``, which can be used to narrow the query.
"""
base_object_list = self.get_object_list(request).filter(**kwargs)
authed_object_list = self.apply_authorization_limits(request, base_object_list)
if hasattr(authed_object_list, 'delete'):
# It's likely a ``QuerySet``. Call ``.delete()`` for efficiency.
authed_object_list.delete()
else:
for authed_obj in authed_object_list:
authed_obj.delete()
def obj_delete(self, request=None, **kwargs):
"""
A ORM-specific implementation of ``obj_delete``.
Takes optional ``kwargs``, which are used to narrow the query to find
the instance.
"""
obj = kwargs.pop('_obj', None)
if not hasattr(obj, 'delete'):
try:
obj = self.obj_get(request, **kwargs)
except ObjectDoesNotExist:
raise NotFound("A model instance matching the provided arguments could not be found.")
obj.delete()
@transaction.commit_on_success()
def patch_list(self, request, **kwargs):
"""
An ORM-specific implementation of ``patch_list``.
Necessary because PATCH should be atomic (all-success or all-fail)
and the only way to do this neatly is at the database level.
"""
return super(ModelResource, self).patch_list(request, **kwargs)
def rollback(self, bundles):
"""
A ORM-specific implementation of ``rollback``.
Given the list of bundles, delete all models pertaining to those
bundles.
"""
for bundle in bundles:
if bundle.obj and self.get_bundle_detail_data(bundle):
bundle.obj.delete()
def save_related(self, bundle):
"""
Handles the saving of related non-M2M data.
Calling assigning ``child.parent = parent`` & then calling
``Child.save`` isn't good enough to make sure the ``parent``
is saved.
To get around this, we go through all our related fields &
call ``save`` on them if they have related, non-M2M data.
M2M data is handled by the ``ModelResource.save_m2m`` method.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_related', False):
continue
if getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
if field_object.blank and not bundle.data.has_key(field_name):
continue
# Get the object.
try:
related_obj = getattr(bundle.obj, field_object.attribute)
except ObjectDoesNotExist:
related_obj = None
# Because sometimes it's ``None`` & that's OK.
if related_obj:
if field_object.related_name:
if not self.get_bundle_detail_data(bundle):
bundle.obj.save()
setattr(related_obj, field_object.related_name, bundle.obj)
related_obj.save()
setattr(bundle.obj, field_object.attribute, related_obj)
def save_m2m(self, bundle):
"""
Handles the saving of related M2M data.
Due to the way Django works, the M2M data must be handled after the
main instance, which is why this isn't a part of the main ``save`` bits.
Currently slightly inefficient in that it will clear out the whole
relation and recreate the related data as needed.
"""
for field_name, field_object in self.fields.items():
if not getattr(field_object, 'is_m2m', False):
continue
if not field_object.attribute:
continue
if field_object.readonly:
continue
# Get the manager.
related_mngr = None
if isinstance(field_object.attribute, basestring):
related_mngr = getattr(bundle.obj, field_object.attribute)
elif callable(field_object.attribute):
related_mngr = field_object.attribute(bundle)
if not related_mngr:
continue
if hasattr(related_mngr, 'clear'):
# Clear it out, just to be safe.
related_mngr.clear()
related_objs = []
for related_bundle in bundle.data[field_name]:
related_bundle.obj.save()
related_objs.append(related_bundle.obj)
related_mngr.add(*related_objs)
def detail_uri_kwargs(self, bundle_or_obj):
"""
Given a ``Bundle`` or an object (typically a ``Model`` instance),
it returns the extra kwargs needed to generate a detail URI.
By default, it uses the model's ``pk`` in order to create the URI.
"""
kwargs = {}
if isinstance(bundle_or_obj, Bundle):
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj.obj, self._meta.detail_uri_name)
else:
kwargs[self._meta.detail_uri_name] = getattr(bundle_or_obj, self._meta.detail_uri_name)
return kwargs
class NamespacedModelResource(ModelResource):
"""
A ModelResource subclass that respects Django namespaces.
"""
def _build_reverse_url(self, name, args=None, kwargs=None):
namespaced = "%s:%s" % (self._meta.urlconf_namespace, name)
return reverse(namespaced, args=args, kwargs=kwargs)
# Based off of ``piston.utils.coerce_put_post``. Similarly BSD-licensed.
# And no, the irony is not lost on me.
def convert_post_to_VERB(request, verb):
"""
Force Django to process the VERB.
"""
if request.method == verb:
if hasattr(request, '_post'):
del(request._post)
del(request._files)
try:
request.method = "POST"
request._load_post_and_files()
request.method = verb
except AttributeError:
request.META['REQUEST_METHOD'] = 'POST'
request._load_post_and_files()
request.META['REQUEST_METHOD'] = verb
setattr(request, verb, request.POST)
return request
def convert_post_to_put(request):
return convert_post_to_VERB(request, verb='PUT')
def convert_post_to_patch(request):
return convert_post_to_VERB(request, verb='PATCH')
| VishvajitP/django-tastypie | tastypie/resources.py | Python | bsd-3-clause | 84,667 |
from execnet import Group
from execnet.gateway_bootstrap import fix_pid_for_jython_popen
def test_jython_bootstrap_not_on_remote():
group = Group()
try:
group.makegateway('popen//id=via')
group.makegateway('popen//via=via')
finally:
group.terminate(timeout=1.0)
def test_jython_bootstrap_fix():
group = Group()
gw = group.makegateway('popen')
popen = gw._io.popen
real_pid = popen.pid
try:
# nothing happens when calling it on a normal seyup
fix_pid_for_jython_popen(gw)
assert popen.pid == real_pid
# if there is no pid for a popen gw, restore
popen.pid = None
fix_pid_for_jython_popen(gw)
assert popen.pid == real_pid
# if there is no pid for other gw, ignore - they are remote
gw.spec.popen = False
popen.pid = None
fix_pid_for_jython_popen(gw)
assert popen.pid is None
finally:
popen.pid = real_pid
group.terminate(timeout=1)
| bryan-lunt/execnet | testing/test_fixes.py | Python | mit | 1,009 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/component/armor/shared_armor_segment_chitin.iff"
result.attribute_template_id = -1
result.stfName("craft_clothing_ingredients_n","armor_segment_chitin")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/tangible/component/armor/shared_armor_segment_chitin.py | Python | mit | 489 |
#!/usr/bin/env python
__author__ = 'Rolf Jagerman'
from PySide import QtGui
import os
from authentication import AuthenticationListener, AuthenticationClient
from loadui import loadUi
from config import UI_DIRECTORY
from drawers import Drawers
from users import User
from locations import Location
class AddDelivery(QtGui.QFrame, AuthenticationListener):
"""
The add delivery form that enables a user to place a delivery in the robot
"""
def __init__(self, content):
super(AddDelivery, self).__init__()
self.content = content
loadUi(os.path.join(UI_DIRECTORY, 'add_delivery.ui'), self)
self.cancel_button.clicked.connect(self.cancel)
self.save_button.clicked.connect(self.place)
self.deliveries = {}
self.sender = None
self.drawer_id = '1'
for location in Location.get_locations():
self.location_combobox.addItem(location.name)
#from PySide.QtGui import QComboBox
#test = QComboBox()
#self.recipient_combobox.addItems(User.get_users())
#self.location_combobox.addItems(Location.get_locations())
AuthenticationClient.add_listener(self)
def show(self, *args, **kwargs):
super(AddDelivery, self).show()
self.prepare_open_drawer()
while self.recipient_combobox.count() > 0:
self.recipient_combobox.removeItem(0);
for user in User.get_users():
if user.id != self.sender.id:
self.recipient_combobox.addItem(user.name)
# Reset the combobox fields when this form is shown
#from PySide.QtGui import QComboBox
##test = QComboBox()
while self.drawer_combobox.count() > 0:
self.drawer_combobox.removeItem(0)
#self.drawer_combobox.removeItems()
for drawer in Drawers.available_drawers():
self.drawer_combobox.addItem(drawer)
self.recipient_combobox.setCurrentIndex(0)
self.location_combobox.setCurrentIndex(0)
self.drawer_combobox.setCurrentIndex(0)
def prepare_open_drawer(self):
self.content.components['open_drawer'].save = lambda : self.save()
self.content.components['open_drawer'].back = lambda : self.back()
self.content.components['open_drawer'].success_message = 'Have you succesfully placed your delivery?'
self.content.components['open_drawer'].instruction_message = 'place'
def on_login(self, user):
self.sender = user
def on_login_failure(self, user):
self.sender = None
def on_logout(self, user):
self.sender = None
def place(self):
self.drawer_id = self.drawer_combobox.currentText()
recipient_text = self.recipient_combobox.currentText()
for user in User.get_users():
if user.name == recipient_text:
self.recipient_id = user.id
location_text = self.location_combobox.currentText()
for location in Location.get_locations():
if location.name == location_text:
self.location_id = location.id
self.content.components['open_drawer'].drawer_id = self.drawer_combobox.currentText()
self.content.activate(self.content.components['open_drawer'])
def cancel(self):
self.content.activate(self.content.components['welcome'])
def back(self):
self.content.activate(self.content.components['add_delivery'])
def save(self):
Drawers.add_delivery(self.drawer_id, self.recipient_id, self.location_id)
self.content.activate(self.content.components['welcome']) | MartienLagerweij/aidu | aidu_gui/src/aidu_gui/add_delivery.py | Python | mit | 3,609 |
import demistomock as demisto
from CommonServerPython import BaseClient
import BitSightForSecurityPerformanceManagement as bitsight
from datetime import datetime
def test_get_companies_guid_command(mocker):
# Positive Scenario
client = bitsight.Client(base_url='https://test.com')
res = {"my_company": {"guid": "123"}, "companies": [{"name": "abc", "shortname": "abc", "guid": "123"}]}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
_, outputs, _ = bitsight.get_companies_guid_command(client)
assert outputs[0].get('guid') == '123'
def test_get_company_details_command(mocker):
inp_args = {'guid': '123'}
client = bitsight.Client(base_url='https://test.com')
res = {"name": "abc"}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
_, outputs, _ = bitsight.get_company_details_command(client, inp_args)
assert outputs.get('name') == 'abc'
def test_get_company_findings_command(mocker):
inp_args = {'guid': '123', 'first_seen': '2021-01-01', 'last_seen': '2021-01-02'}
client = bitsight.Client(base_url='https://test.com')
res = {"results": [{"severity": "severe"}]}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
_, outputs, _ = bitsight.get_company_findings_command(client, inp_args)
assert outputs[0].get('severity') == 'severe'
def test_fetch_incidents(mocker):
inp_args = {'guid': '123', 'findings_min_severity': 'severe', 'findings_grade': 'WARN',
'findings_asset_category': 'high', 'risk_vector': 'breaches,dkim'}
client = bitsight.Client(base_url='https://test.com')
mocker.patch.object(demisto, 'params', return_value=inp_args)
res = {"results": [{"severity": "severe", "first_seen": "2021-02-01", "temporary_id": "temp1"}]}
mocker.patch.object(BaseClient, '_http_request', return_value=res)
last_run, events = bitsight.fetch_incidents(client=client,
last_run={'time': '2020-12-01T01:01:01Z'},
params=inp_args)
curr_date = datetime.now().strftime('%Y-%m-%d')
assert curr_date in last_run['time']
assert events == [{'name': 'BitSight Finding - temp1', 'occurred': '2021-02-01T00:00:00Z',
'rawJSON': '{"severity": "severe", "first_seen": "2021-02-01", "temporary_id": "temp1"}'}]
| demisto/content | Packs/BitSight/Integrations/BitSightForSecurityPerformanceManagement/BitSightForSecurityPerformanceManagement_test.py | Python | mit | 2,406 |
#! /usr/bin/env python
from openturns import *
ref = NumericalMathFunction("x", "sin(x)")
size = 12
locations = NumericalPoint(size)
values = NumericalPoint(size)
derivatives = NumericalPoint(size)
# Build locations/values/derivatives with non-increasing locations
for i in range(size):
locations[i] = 10.0 * i * i / (size - 1.0) / (size - 1.0)
values[i] = ref([locations[i]])[0]
derivatives[i] = ref.gradient([locations[i]])[0, 0]
evaluation = PiecewiseHermiteEvaluationImplementation(
locations, values, derivatives)
print "evaluation=", evaluation
# Check the values
for i in range(2 * size):
x = [-1.0 + 12.0 * i / (2.0 * size - 1.0)]
print "f( %.12g )=" % x[0], evaluation(x), ", ref=", ref(x)
| sofianehaddad/ot-svn | python/test/t_PiecewiseHermiteEvaluationImplementation_std.py | Python | mit | 726 |
#!/usr/bin/env python
import os
import csv
import rospy
from std_msgs.msg import Bool
from dbw_mkz_msgs.msg import ThrottleCmd, SteeringCmd, BrakeCmd, SteeringReport
'''
You can use this file to test your DBW code against a bag recorded with a reference implementation.
The bag can be found at https://s3-us-west-1.amazonaws.com/udacity-selfdrivingcar/files/reference.bag.zip
To use the downloaded bag file, rename it to 'dbw_test.rosbag.bag' and place it in the CarND-Capstone/data folder.
Then with roscore running, you can then use roslaunch with the dbw_test.launch file found in
<project_repo>/ros/src/twist_controller/launch.
This file will produce 3 csv files which you can process to figure out how your DBW node is
performing on various commands.
`/actual/*` are commands from the recorded bag while `/vehicle/*` are the output of your node.
'''
class DBWTestNode(object):
def __init__(self):
rospy.init_node('dbw_test_node')
rospy.Subscriber('/vehicle/steering_cmd', SteeringCmd, self.steer_cb)
rospy.Subscriber('/vehicle/throttle_cmd', ThrottleCmd, self.throttle_cb)
rospy.Subscriber('/vehicle/brake_cmd', BrakeCmd, self.brake_cb)
rospy.Subscriber('/actual/steering_cmd', SteeringCmd, self.actual_steer_cb)
rospy.Subscriber('/actual/throttle_cmd', ThrottleCmd, self.actual_throttle_cb)
rospy.Subscriber('/actual/brake_cmd', BrakeCmd, self.actual_brake_cb)
rospy.Subscriber('/vehicle/dbw_enabled', Bool, self.dbw_enabled_cb)
self.steer = self.throttle = self.brake = None
self.steer_data = []
self.throttle_data = []
self.brake_data = []
self.dbw_enabled = False
base_path = os.path.dirname(os.path.abspath(__file__))
self.steerfile = os.path.join(base_path, 'steers.csv')
self.throttlefile = os.path.join(base_path, 'throttles.csv')
self.brakefile = os.path.join(base_path, 'brakes.csv')
self.loop()
def loop(self):
rate = rospy.Rate(10) # 10Hz
while not rospy.is_shutdown():
rate.sleep()
fieldnames = ['actual', 'proposed']
with open(self.steerfile, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(self.steer_data)
with open(self.throttlefile, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(self.throttle_data)
with open(self.brakefile, 'w') as csvfile:
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
writer.writerows(self.brake_data)
def dbw_enabled_cb(self, msg):
self.dbw_enabled = msg.data
def steer_cb(self, msg):
self.steer = msg.steering_wheel_angle_cmd
def throttle_cb(self, msg):
self.throttle = msg.pedal_cmd
def brake_cb(self, msg):
self.brake = msg.pedal_cmd
def actual_steer_cb(self, msg):
if self.dbw_enabled and self.steer is not None:
self.steer_data.append({'actual': msg.steering_wheel_angle_cmd,
'proposed': self.steer})
self.steer = None
def actual_throttle_cb(self, msg):
if self.dbw_enabled and self.throttle is not None:
self.throttle_data.append({'actual': msg.pedal_cmd,
'proposed': self.throttle})
self.throttle = None
def actual_brake_cb(self, msg):
if self.dbw_enabled and self.brake is not None:
self.brake_data.append({'actual': msg.pedal_cmd,
'proposed': self.brake})
self.brake = None
if __name__ == '__main__':
DBWTestNode()
| zegnus/self-driving-car-machine-learning | p13-final-project/ros/src/twist_controller/dbw_test.py | Python | mit | 3,850 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('projects', '0002_auto_20150401_2057'),
]
operations = [
migrations.AlterUniqueTogether(
name='membership',
unique_together=None,
),
migrations.RemoveField(
model_name='membership',
name='member',
),
migrations.RemoveField(
model_name='membership',
name='project',
),
migrations.DeleteModel(
name='Membership',
),
migrations.AddField(
model_name='project',
name='manager',
field=models.ForeignKey(default=1, to=settings.AUTH_USER_MODEL),
preserve_default=False,
),
]
| bilbeyt/ituro | ituro/projects/migrations/0003_auto_20160131_0706.py | Python | mit | 970 |
# -*- coding: utf-8 -*-
# Copyright (C) 2008-2015, Luis Pedro Coelho <luis@luispedro.org>
# vim: set ts=4 sts=4 sw=4 expandtab smartindent:
#
# License: MIT. See COPYING.MIT file in the milk distribution
from __future__ import division
from .classifier import normaliselabels, ctransforms_model
from .base import supervised_model
import numpy
import numpy as np
from . import _svm
__all__ = [
'rbf_kernel',
'polynomial_kernel',
'precomputed_kernel',
'dot_kernel',
'svm_raw',
'svm_binary',
'svm_to_binary',
'svm_sigmoidal_correction',
'sigma_value_fisher',
'fisher_tuned_rbf_svm',
]
def _svm_apply(SVM, q):
'''
f_i = _svm_apply(SVM, q)
@internal: This is mostly used for testing
'''
X,Y,Alphas,b,C,kernel=SVM
N = len(X)
s = 0.0
for i in range(N):
s += Alphas[i] * Y[i] * kernel(q, X[i])
return s - b
def svm_learn_smo(X,Y,kernel,C,eps=1e-4,tol=1e-2,cache_size=(1<<20)):
'''
Learn a svm classifier
X: data
Y: labels in SVM format (ie Y[i] in (1,-1))
This is a very raw interface. In general, you should use a class
like svm_classifier.
Implements the Sequential Minimum Optimisation Algorithm from Platt's
"Fast training of support vector machines using sequential minimal optimization"
in Advances in kernel methods: support vector learning
Pages: 185 - 208
Year of Publication: 1999
ISBN:0-262-19416-3
'''
assert numpy.all(numpy.abs(Y) == 1)
assert len(X) == len(Y)
N = len(Y)
Y = Y.astype(numpy.int32)
params = numpy.array([0,C,1e-3,1e-5],numpy.double)
Alphas0 = numpy.zeros(N, numpy.double)
_svm.eval_SMO(X,Y,Alphas0,params,kernel,cache_size)
return Alphas0, params[0]
def svm_learn_libsvm(features, labels, kernel, C, eps=1e-4, tol=1e-2, cache_size=(1<<20), alphas=None):
'''
Learn a svm classifier using LIBSVM optimiser
This is a very raw interface. In general, you should use a class
like svm_classifier.
This uses the LIBSVM optimisation algorithm
Parameters
----------
X : ndarray
data
Y : ndarray
labels in SVM format (ie Y[i] in (1,-1))
kernel : kernel
C : float
eps : float, optional
tol : float, optional
cache_size : int, optional
alphas : ndarray, optional
Returns
-------
alphas : ndarray
b : float
'''
if not np.all(np.abs(labels) == 1):
raise ValueError('milk.supervised.svm.svm_learn_libsvm: Y[i] != (-1,+1)')
assert len(features) == len(labels)
n = len(labels)
labels = labels.astype(np.int32)
p = -np.ones(n, np.double)
params = np.array([0,C,eps,tol], dtype=np.double)
if alphas is None:
alphas = np.zeros(n, np.double)
elif alphas.dtype != np.double or len(alphas) != n:
raise ValueError('milk.supervised.svm_learn_libsvm: alphas is in wrong format')
_svm.eval_LIBSVM(features, labels, alphas, p, params, kernel, cache_size)
return alphas, params[0]
class preprocessed_rbf_kernel(object):
def __init__(self, X, sigma, beta):
self.X = X
self.Xsum = (X**2).sum(1)
self.sigma = sigma
self.beta = beta
def call_many(self, qs):
from milk.unsupervised import pdist
dists = pdist(self.X, qs, 'euclidean2')
dists /= -self.sigma
np.exp(dists, dists)
dists *= self.beta
return dists.T
def __call__(self, q):
minus_d2_sigma = np.dot(self.X,q)
minus_d2_sigma *= 2.
minus_d2_sigma -= self.Xsum
minus_d2_sigma -= np.dot(q,q)
minus_d2_sigma /= self.sigma
return self.beta * np.exp(minus_d2_sigma)
class rbf_kernel(object):
'''
kernel = rbf_kernel(sigma,beta=1)
Radial Basis Function kernel
Returns a kernel (ie, a function that implements)
beta * exp( - ||x1 - x2|| / sigma)
'''
def __init__(self, sigma, beta=1):
self.sigma = sigma
self.beta = beta
self.kernel_nr_ = 0
self.kernel_arg_ = float(sigma)
def __call__(self, x1, x2):
d2 = x1 - x2
d2 **= 2
d2 = d2.sum()
res = self.beta*np.exp(-d2/self.sigma)
return res
def __str__(self):
return 'rbf_kernel(%s, %s)' % (self.sigma, self.beta)
__repr__ = __str__
def preprocess(self, X):
return preprocessed_rbf_kernel(X, self.sigma, self.beta)
class polynomial_kernel(object):
'''
kernel = polynomial_kernel(d,c=1)
returns a kernel (ie, a function) that implements:
(<x1,x2>+c)**d
'''
def __init__(self, d, c=1):
self.d = d
self.c = c
def __call__(self,x1,x2):
return (np.dot(x1,x2)+self.c)**self.d
class precomputed_kernel(object):
'''
kernel = precomputed_kernel(kmatrix)
A "fake" kernel which is precomputed.
'''
def __init__(self, kmatrix, copy=False):
self.kmatrix = np.ascontiguousarray(kmatrix, np.double, copy=copy)
self.kernel_nr_ = 1
self.kernel_arg_ = 0.
def __call__(self, x0, x1):
return self.kmatrix[x0,x1]
class _call_kernel(object):
def __init__(self, k, svs):
self.svs = svs
self.kernel = k
def __call__(self, q):
return np.array([self.kernel(s, q) for s in self.svs])
class preprocessed_dot_kernel(object):
def __init__(self, svs):
self.svs = svs
def __call__(self, x1):
return np.dot(self.svs, x1)
class dot_kernel(object):
def __init__(self):
self.kernel_nr_ = 2
self.kernel_arg_ = 0.
def __call__(self, x0, x1):
return np.dot(x0, x1)
def preprocess(self, svs):
return preprocessed_dot_kernel(svs)
class svm_raw_model(supervised_model):
def __init__(self, svs, Yw, b, kernel):
self.svs = svs
self.Yw = Yw
self.b = b
self.kernel = kernel
try:
self.kernelfunction = self.kernel.preprocess(self.svs)
except AttributeError:
self.kernelfunction = _call_kernel(self.kernel, self.svs)
def apply_many(self, qs):
try:
qs = self.kernelfunction.call_many(qs)
except AttributeError:
qs = np.array(list(map(self.kernelfunction, qs)))
return np.dot(qs, self.Yw) - self.b
def apply(self, q):
Q = self.kernelfunction(q)
return np.dot(Q, self.Yw) - self.b
class svm_raw(object):
'''
svm_raw: classifier
classifier = svm_raw(kernel, C, eps=1e-3, tol=1e-8)
Parameters
----------
kernel : callable
the kernel to use. This should be a function that takes two data
arguments see rbf_kernel and polynomial_kernel.
C : float
the C parameter
eps : float, optional
the precision to which to solve the problem (default 1e-3)
tol : float, optional
(|x| < tol) is considered zero
'''
def __init__(self, kernel=None, C=1., eps=1e-3, tol=1e-8):
self.C = C
self.kernel = kernel
self.eps = eps
self.tol = tol
self.algorithm = 'libsvm'
def train(self, features, labels, normalisedlabels=False, **kwargs):
assert self.kernel is not None, 'milk.supervised.svm_raw.train: kernel not set!'
assert self.algorithm in ('libsvm','smo'), 'milk.supervised.svm_raw: unknown algorithm (%s)' % self.algorithm
assert not (np.isinf(self.C) or np.isnan(self.C)), 'milk.supervised.svm_raw: setting C to NaN or Inf causes problems.'
features = np.asanyarray(features)
if normalisedlabels:
Y = labels.copy()
else:
Y,_ = normaliselabels(labels)
assert Y.max() == 1, 'milk.supervised.svm_raw can only handle binary problems'
Y *= 2
Y -= 1
kernel = self.kernel
try:
kernel = (self.kernel.kernel_nr_, self.kernel.kernel_arg_)
features = np.ascontiguousarray(features, np.double)
except AttributeError:
pass
if self.algorithm == 'smo':
alphas,b = svm_learn_smo(features,Y,kernel,self.C,self.eps,self.tol)
else:
alphas,b = svm_learn_libsvm(features,Y,kernel,self.C,self.eps,self.tol)
svsi = (alphas != 0)
svs = features[svsi]
w = alphas[svsi]
Y = Y[svsi]
Yw = w * Y
return svm_raw_model(svs, Yw, b, self.kernel)
def get_params(self):
return self.C, self.eps,self.tol
def set_params(self,params):
self.C,self.eps,self.tol = params
def set_option(self, optname, value):
setattr(self, optname, value)
def learn_sigmoid_constants(F,Y,
max_iters=None,
min_step=1e-10,
sigma=1e-12,
eps=1e-5):
'''
A,B = learn_sigmoid_constants(F,Y)
This is a very low-level interface look into the svm_classifier class.
Parameters
----------
F : Values of the function F
Y : Labels (in boolean format, ie, in (0,1))
Other Parameters
----------------
max_iters : Maximum nr. of iterations
min_step : Minimum step
sigma : sigma
eps : A small number
Reference for Implementation
----------------------------
Implements the algorithm from "A Note on Platt's Probabilistic Outputs for
Support Vector Machines" by Lin, Lin, and Weng.
Machine Learning, Vol. 68, No. 3. (23 October 2007), pp. 267-276
'''
# Below we use safe constructs to avoid using the overflown values, but we
# must compute them because of the way numpy works.
errorstate = np.seterr(over='ignore')
# the deci[i] array is called F in this code
F = np.asanyarray(F)
Y = np.asanyarray(Y)
assert len(F) == len(Y)
assert numpy.all( (Y == 1) | (Y == 0) )
if max_iters is None:
max_iters = 1000
prior1 = Y.sum()
prior0 = len(F)-prior1
hi_t = (prior1+1.)/(prior1+2.)
lo_t = 1./(prior0+2.)
T = Y*hi_t + (1-Y)*lo_t
A = 0.
B = np.log( (prior0+1.)/(prior1+1.) )
def target(A,B):
fApB = F*A + B
lef = np.log1p(np.exp(fApB))
lemf = np.log1p(np.exp(-fApB))
fvals = np.choose(fApB >= 0, ( T*fApB + lemf, (T-1.)*fApB + lef))
return np.sum(fvals)
fval = target(A,B)
for iter in range(max_iters):
fApB = F*A + B
ef = np.exp(fApB)
emf = np.exp(-fApB)
p = np.choose(fApB >= 0, ( emf/(1.+emf), 1./(1.+ef) ))
q = np.choose(fApB >= 0, ( 1/(1.+emf), ef/(1.+ef) ))
d2 = p * q
h11 = np.dot(F*F,d2) + sigma
h22 = np.sum(d2) + sigma
h21 = np.dot(F,d2)
d1 = T - p
g1 = np.dot(F,d1)
g2 = np.sum(d1)
if abs(g1) < eps and abs(g2) < eps: # Stopping criteria
break
det = h11*h22 - h21*h21
dA = - (h22*g1 - h21*g2)/det
dB = - (h21*g1 + h11*g2)/det
gd = g1*dA + g2*dB
stepsize = 1.
while stepsize >= min_step:
newA = A + stepsize*dA
newB = B + stepsize*dB
newf = target(newA,newB)
if newf < fval+eps*stepsize*gd:
A = newA
B = newB
fval = newf
break
stepsize /= 2
else:
print('Line search fails')
break
np.seterr(**errorstate)
return A,B
class svm_binary_model(supervised_model):
def __init__(self, classes):
self.classes = classes
self.raw = False
def apply(self,f):
if self.raw:
return f
return self.classes[f >= 0.]
class svm_binary(object):
'''
classifier = svm_binary()
model = classifier.train(features, labels)
assert model.apply(f) in labels
'''
def train(self, features, labels, normalisedlabels=False, **kwargs):
if normalisedlabels:
return svm_binary_model( (0,1) )
assert len(labels) >= 2, 'Cannot train from a single example'
names = sorted(set(labels))
assert len(names) == 2, 'milk.supervised.svm.svm_binary.train: Can only handle two class problems'
return svm_binary_model(names)
class svm_to_binary(object):
'''
svm_to_binary(base_svm)
A simple wrapper so that
svm_to_binary(base_svm)
is a model that takes the base_svm classifier and then binarises its model output.
NOTE: This class does the same job as::
ctransforms(base_svm, svm_binary())
'''
def __init__(self, svm_base):
'''
binclassifier = svm_to_binary(svm_base)
a classifier that binarises the output of svm_base.
'''
self.base = svm_base
def train(self, features, labels, **kwargs):
model = self.base.train(features, labels, **kwargs)
binary = svm_binary()
binary_model = binary.train(features, labels, **kwargs)
return ctransforms_model([model, binary_model])
def set_option(self, opt, value):
self.base.set_option(opt, value)
class svm_sigmoidal_correction_model(supervised_model):
def __init__(self, A, B):
self.A = A
self.B = B
def apply(self,features):
return 1./(1.+numpy.exp(features*self.A+self.B))
class svm_sigmoidal_correction(object):
'''
svm_sigmoidal_correction : a classifier
Sigmoidal approximation for obtaining a probability estimate out of the output
of an SVM.
'''
def __init__(self):
self.max_iters = None
def train(self, features, labels, **kwargs):
A,B = learn_sigmoid_constants(features,labels,self.max_iters)
return svm_sigmoidal_correction_model(A, B)
def get_params(self):
return self.max_iters
def set_params(self,params):
self.max_iters = params
def sigma_value_fisher(features,labels):
'''
f = sigma_value_fisher(features,labels)
value_s = f(s)
Computes a function which computes how good the value of sigma
is for the features. This function should be *minimised* for a
good value of sigma.
Parameters
-----------
features : features matrix as 2-ndarray.
Returns
-------
f : a function: float -> float
this function should be minimised for a good `sigma`
Reference
----------
Implements the measure in
"Determination of the spread parameter in the
Gaussian kernel for classification and regression"
by Wenjian Wanga, Zongben Xua, Weizhen Luc, and Xiaoyun Zhanga
'''
features = np.asanyarray(features)
xij = np.dot(features,features.T)
f2 = np.sum(features**2,1)
d = f2-2*xij
d = d.T + f2
N1 = (labels==0).sum()
N2 = (labels==1).sum()
C1 = -d[labels == 0][:,labels == 0]
C2 = -d[labels == 1][:,labels == 1]
C12 = -d[labels == 0][:,labels == 1]
C1 = C1.copy()
C2 = C2.copy()
C12 = C12.copy()
def f(sigma):
sigma = float(sigma)
N1 = C1.shape[0]
N2 = C2.shape[0]
if C12.shape != (N1,N2):
raise ValueError
C1v = np.sum(np.exp(C1/sigma))/N1
C2v = np.sum(np.exp(C2/sigma))/N2
C12v = np.sum(np.exp(C12/sigma))/N1/N2
return (N1 + N2 - C1v - C2v)/(C1v/N1+C2v/N2 - 2.*C12v)
return f
class fisher_tuned_rbf_svm(object):
'''
F = fisher_tuned_rbf_svm(sigmas, base)
Returns a wrapper classifier that uses RBF kernels automatically
tuned using sigma_value_fisher.
'''
def __init__(self, sigmas, base):
self.sigmas = sigmas
self.base = base
def train(self, features, labels, **kwargs):
f = sigma_value_fisher(features, labels)
fs = [f(s) for s in self.sigmas]
self.sigma = self.sigmas[np.argmin(fs)]
self.base.set_option('kernel',rbf_kernel(self.sigma))
return self.base.train(features, labels, **kwargs)
| pombredanne/milk | milk/supervised/svm.py | Python | mit | 15,907 |
"""SCons.Tool.mwcc
Tool-specific initialization for the Metrowerks CodeWarrior compiler.
There normally shouldn't be any need to import this module directly.
It will usually be imported through the generic SCons.Tool.Tool()
selection method.
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Tool/mwcc.py 2014/08/24 12:12:31 garyo"
import os
import os.path
import SCons.Util
def set_vars(env):
"""Set MWCW_VERSION, MWCW_VERSIONS, and some codewarrior environment vars
MWCW_VERSIONS is set to a list of objects representing installed versions
MWCW_VERSION is set to the version object that will be used for building.
MWCW_VERSION can be set to a string during Environment
construction to influence which version is chosen, otherwise
the latest one from MWCW_VERSIONS is used.
Returns true if at least one version is found, false otherwise
"""
desired = env.get('MWCW_VERSION', '')
# return right away if the variables are already set
if isinstance(desired, MWVersion):
return 1
elif desired is None:
return 0
versions = find_versions()
version = None
if desired:
for v in versions:
if str(v) == desired:
version = v
elif versions:
version = versions[-1]
env['MWCW_VERSIONS'] = versions
env['MWCW_VERSION'] = version
if version is None:
return 0
env.PrependENVPath('PATH', version.clpath)
env.PrependENVPath('PATH', version.dllpath)
ENV = env['ENV']
ENV['CWFolder'] = version.path
ENV['LM_LICENSE_FILE'] = version.license
plus = lambda x: '+%s' % x
ENV['MWCIncludes'] = os.pathsep.join(map(plus, version.includes))
ENV['MWLibraries'] = os.pathsep.join(map(plus, version.libs))
return 1
def find_versions():
"""Return a list of MWVersion objects representing installed versions"""
versions = []
### This function finds CodeWarrior by reading from the registry on
### Windows. Some other method needs to be implemented for other
### platforms, maybe something that calls env.WhereIs('mwcc')
if SCons.Util.can_read_reg:
try:
HLM = SCons.Util.HKEY_LOCAL_MACHINE
product = 'SOFTWARE\\Metrowerks\\CodeWarrior\\Product Versions'
product_key = SCons.Util.RegOpenKeyEx(HLM, product)
i = 0
while True:
name = product + '\\' + SCons.Util.RegEnumKey(product_key, i)
name_key = SCons.Util.RegOpenKeyEx(HLM, name)
try:
version = SCons.Util.RegQueryValueEx(name_key, 'VERSION')
path = SCons.Util.RegQueryValueEx(name_key, 'PATH')
mwv = MWVersion(version[0], path[0], 'Win32-X86')
versions.append(mwv)
except SCons.Util.RegError:
pass
i = i + 1
except SCons.Util.RegError:
pass
return versions
class MWVersion(object):
def __init__(self, version, path, platform):
self.version = version
self.path = path
self.platform = platform
self.clpath = os.path.join(path, 'Other Metrowerks Tools',
'Command Line Tools')
self.dllpath = os.path.join(path, 'Bin')
# The Metrowerks tools don't store any configuration data so they
# are totally dumb when it comes to locating standard headers,
# libraries, and other files, expecting all the information
# to be handed to them in environment variables. The members set
# below control what information scons injects into the environment
### The paths below give a normal build environment in CodeWarrior for
### Windows, other versions of CodeWarrior might need different paths.
msl = os.path.join(path, 'MSL')
support = os.path.join(path, '%s Support' % platform)
self.license = os.path.join(path, 'license.dat')
self.includes = [msl, support]
self.libs = [msl, support]
def __str__(self):
return self.version
CSuffixes = ['.c', '.C']
CXXSuffixes = ['.cc', '.cpp', '.cxx', '.c++', '.C++']
def generate(env):
"""Add Builders and construction variables for the mwcc to an Environment."""
import SCons.Defaults
import SCons.Tool
set_vars(env)
static_obj, shared_obj = SCons.Tool.createObjBuilders(env)
for suffix in CSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCAction)
for suffix in CXXSuffixes:
static_obj.add_action(suffix, SCons.Defaults.CXXAction)
shared_obj.add_action(suffix, SCons.Defaults.ShCXXAction)
env['CCCOMFLAGS'] = '$CPPFLAGS $_CPPDEFFLAGS $_CPPINCFLAGS -nolink -o $TARGET $SOURCES'
env['CC'] = 'mwcc'
env['CCCOM'] = '$CC $CFLAGS $CCFLAGS $CCCOMFLAGS'
env['CXX'] = 'mwcc'
env['CXXCOM'] = '$CXX $CXXFLAGS $CCCOMFLAGS'
env['SHCC'] = '$CC'
env['SHCCFLAGS'] = '$CCFLAGS'
env['SHCFLAGS'] = '$CFLAGS'
env['SHCCCOM'] = '$SHCC $SHCFLAGS $SHCCFLAGS $CCCOMFLAGS'
env['SHCXX'] = '$CXX'
env['SHCXXFLAGS'] = '$CXXFLAGS'
env['SHCXXCOM'] = '$SHCXX $SHCXXFLAGS $CCCOMFLAGS'
env['CFILESUFFIX'] = '.c'
env['CXXFILESUFFIX'] = '.cpp'
env['CPPDEFPREFIX'] = '-D'
env['CPPDEFSUFFIX'] = ''
env['INCPREFIX'] = '-I'
env['INCSUFFIX'] = ''
#env['PCH'] = ?
#env['PCHSTOP'] = ?
def exists(env):
return set_vars(env)
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| engineer0x47/SCONS | engine/SCons/Tool/mwcc.py | Python | mit | 6,841 |
import os
from countershape import model
from countershape import state
from countershape import widgets
from . import testpages, tutils
class TestContext(testpages.DummyState):
def setUp(self):
testpages.DummyState.setUp(self)
def tearDown(self):
testpages.DummyState.tearDown(self)
def test_relativePath(self):
self.application = testpages.TestApplication(
model.BaseRoot(
[
testpages.TPageHTML("foo"), [
testpages.TPageHTML("bar")
]
]
)
)
p = self.application.getPage(os.path.join("foo","bar"))
assert p.relativePath(["oink"]) == "../oink"
assert p.relativePath(["oink", "voing"]) == "../oink/voing"
assert p.relativePath(["foo"]) == "../foo"
assert p.relativePath([]) == ".."
assert p.relativePath(["foo", "bar"]) == "bar"
assert p.relativePath(["foo", "bar", "voing"]) == "bar/voing"
assert p.absolutePath() == "foo/bar"
p = self.application.getPage("foo")
assert p.relativePath(["oink"]) == "oink"
assert p.relativePath([""]) == ""
assert p.relativePath(["foo", "bar"]) == "foo/bar"
assert p.absolutePath() == "foo"
def test_top(self):
assert not self.application.getPage(os.path.join("foo","bar"))
class TestPageInstantiate(testpages.DummyState):
def test_instantiate_err(self):
self.application.testing = False
tutils.raises("instantiated during page call", model.BasePage)
class TestHeader(testpages.DummyState):
def test_path(self):
h = model.Header(state.page)
h.path("foo.css")
h.path("bar.js")
assert "foo" in h._cssPath[0]
assert "bar" in h._jsPath[0]
def test_path_err(self):
h = model.Header(state.page)
tutils.raises("unrecognised resource extension", h.path, "foo.bar")
def test_cssPath(self):
h = model.Header(state.page)
h.cssPath("foo")
h.cssPath("bar")
assert "foo" in h._cssPath[0]
assert "bar" in h._cssPath[1]
def test_jsPath(self):
h = model.Header(state.page)
h.jsPath("foo")
h.jsPath("bar")
assert "foo" in h._jsPath[0]
assert "bar" in h._jsPath[1]
def test_str(self):
h = model.Header(state.page)
h.cssPath("foo")
h.cssPath("bar")
h.jsPath("foo")
h.jsPath("bar")
s = str(h)
assert len([i for i in s.splitlines() if i]) == 4
class TestHTMLPage(testpages.RenderTester):
def setUp(self):
self.application = testpages.TestApplication(
model.BaseRoot(
[
testpages.TPageHTMLFileTemplate(),
[
testpages.TPageHTML("nestedpage")
],
testpages.TPageHTMLTemplate(),
]
)
)
self.application.testing = 2
def test_pageTitle(self):
t = testpages.TPageHTMLTemplate()
assert t.pageTitle() == "TPageHTMLTemplate"
t.title = "Foo"
assert t.pageTitle() == "Foo"
t.pageTitle = "Bar"
assert t.pageTitle == "Bar"
def test_template(self):
d = self.call("TPageHTMLTemplate")
assert d.find("html") > -1
assert d.find("TPageHTMLTemplate") > -1
def test_filetemplate(self):
d = self.call("TPageHTMLFileTemplate")
assert d.find("template") > -1
assert d.find("html") > -1
def test_repr(self):
t = testpages.TPageHTMLTemplate()
assert repr(t)
class TestBaseApplication(testpages.RenderTester):
def setUp(self):
self.r = model.BaseRoot(
[
TException("one"),
]
)
self.application = model.BaseApplication(self.r)
def test_pageexception(self):
p = self.application.getPage("one")
tutils.raises("an exception", self.application, p)
class TestApplication(testpages.DummyState):
def setUp(self):
self.application = testpages.TestApplication(
model.BaseRoot(
[
testpages.TPageHTML("base"),
[
testpages.TPageNoLink(),
testpages.TPageWithTitle()
],
testpages.TPage("internal", internal=True)
]
)
)
self.pageName = "base"
testpages.DummyState.setUp(self)
def test_getPageErr(self):
assert not self.application.getPage('nonexistent')
tutils.raises("invalid argument", self.application.getPage, 0)
def test_getPageIdempotence(self):
p = self.application.getPage('base')
assert self.application.getPage(p) == p
def test_getPageRoot(self):
assert self.application.getPage("").name == "BaseRoot"
def test_LinkTo(self):
assert str(widgets.LinkTo("base"))
assert widgets.LinkTo("base")()
def test_linkTo_withTitle(self):
assert str(widgets.LinkTo("TPageWithTitle"))
def test_linkTo_nopage(self):
tutils.raises(
"unknown page",
str,
widgets.LinkTo("Nonexistent")
)
def test_linkTo_nolink(self):
assert str(widgets.LinkTo("TPageNoLink"))
def test_url(self):
assert str(widgets.UrlTo("TPageNoLink"))
def test_url_anchor(self):
s = str(widgets.UrlTo("TPageNoLink", anchor="foo"))
assert s == "base/TPageNoLink#foo"
def test_url_nopage(self):
tutils.raises("unknown page", str, widgets.UrlTo("Nonexistent"))
def test_url_internal(self):
tutils.raises("internal page", str, widgets.UrlTo("internal"))
def test_alink(self):
s = str(widgets.ALink("TPageNoLink", "text", "foo"))
assert "TPageNoLink#foo" in s
def test_linkTo_internal(self):
tutils.raises(
model.exceptions.ApplicationError,
str,
widgets.LinkTo("internal")
)
class TestPageModel:
"""
A suite of tests testing the application page model functionality.
Tests span the Application and Page classes.
"""
def setUp(self):
state.page = None
self.a, self.b = testpages.TPage("test"), testpages.TPage("test")
self.s1, self.s2 = testpages.TPage("end", structural=True), testpages.TPage("end", structural=True)
self.p1, self.p2 = testpages.TPage("sub1", structural=True), testpages.TPage("sub2", structural=True)
self.r = model.BaseRoot([
testpages.TPage("base", structural=False, internal=True),[
self.a,
testpages.TPage("one", structural=True), [
testpages.TPage("X", structural=False),[
testpages.TPage("two", structural=True, internal=False), [
self.b,
]
]
],
self.p1, [
testpages.TPage("page", structural=True), [
self.s1
],
],
self.p2, [
testpages.TPage("page", structural=True), [
self.s2,
]
],
]
])
self.t = testpages.TestApplication(self.r)
state.application = self.t
def tearDown(self):
state.ctx = None
def test_getPage(self):
tutils.raises("ambiguous path", self.t.getPage, os.path.join("page","end"))
assert self.t.getPage(os.path.join("sub1","page","end"))
assert self.t.getPage(os.path.join("sub2","page","end"))
def test_getPageChild(self):
state.page = self.p1
assert self.t.getPage(os.path.join(".","page","end")) is self.s1
assert not self.t.getPage(os.path.join(".","page","foo"))
assert self.t.getPage(os.path.join(".","page"))
def test_getPage_nostate(self):
tutils.raises("relative page link", self.t.getPage, os.path.join(".","page","end"))
def test_getPageParent(self):
state.page = self.s1
assert self.t.getPage("^/page") is self.p1.children[0]
assert self.t.getPage("^/sub1") is self.p1
def test_getPageSibling(self):
state.page = self.p1
assert self.t.getPage("-/sub2") is self.p2
assert not self.t.getPage("-/page")
def test_getPageLocal(self):
state.page = self.p1
assert self.t.getPage("$/sub2") is self.p2
assert self.t.getPage("$/base")
assert not self.t.getPage("$/X")
def test_match(self):
assert self.b.match([], False)
assert self.b.match("", False)
assert self.b.match(["two", "test"], False)
assert self.b.match(["one", "two", "test"], False)
assert not self.b.match(["two", "two", "test"], False)
assert self.s1.match(["sub1", "page", "end"], False)
assert self.s1.match(["page", "end"], False)
assert self.s2.match(["page", "end"], False)
assert self.s1.match(["sub1", "page", "end"], True)
assert self.s1.match("sub1/page/end", True)
assert not self.s1.match(["page", "end"], True)
assert not self.r.match(["page", "end"], False)
def test_getPath(self):
page, path = self.t.getPath(["one", "two"])
assert page.name == "two"
assert path == []
page, path = self.t.getPath(["one"])
assert page.name == "one"
assert path == []
page, path = self.t.getPath(["one", "argument"])
assert page.name == "one"
assert path == ["argument"]
page, path = self.t.getPath(["test"])
assert page.name == "test"
assert path == []
assert self.t.getPath([]) == (self.r, [])
assert self.t.getPath(["piglet"]) == (self.r, ["piglet"])
assert self.t.getPath(["two", "foo"]) == (self.r, ["two", "foo"])
def test_url(self):
state.page = self.t.getPage(os.path.join("one","two"))
assert str(widgets.UrlTo("two")) == "two"
state.page = self.t.getPage("one")
assert str(widgets.UrlTo("one")) == "one"
class TestPage:
def test_isDocDescendantOf(self):
one = testpages.TPage("one")
two = testpages.TPage("two")
r = model.BaseRoot(
[
one,
testpages.TPage("dir", internal=True), [
two
]
]
)
t = testpages.TestApplication(r)
assert not two.isDescendantOf(one)
assert two.isDocDescendantOf(one)
assert two.isDocDescendantOf(r)
assert r.isDescendantOf(two)
class TestPageModelErrors:
def test_ambiguouschild(self):
r = model.BaseRoot([
testpages.TPage("one", structural=True), [
testpages.TPage("test"),
testpages.TPage("test"),
]
])
tutils.raises(
model.exceptions.ApplicationError,
testpages.TestApplication,
r
)
def test_ambiguouschild2(self):
r = model.BaseRoot([
testpages.TPage("one", structural=True), [
testpages.TPage("test"),
testpages.TPage("X", structural=False),[
testpages.TPage("test"),
]
]
])
tutils.raises(
model.exceptions.ApplicationError,
testpages.TestApplication,
r
)
def test_ambiguoustoplevel(self):
r = model.BaseRoot([
testpages.TPage("test", structural=True),
testpages.TPage("test", structural=False),
])
tutils.raises(
model.exceptions.ApplicationError,
testpages.TestApplication,
r
)
class TException(testpages.TPage):
def render(self, *args, **kwargs):
raise ValueError("An exception")
_TestApp = testpages.TestApplication(
model.BaseRoot(
[
testpages.TPage("one", structural=True),
[
testpages.TPage("two"),
testpages.TPage("three")
],
testpages.TPage("internal", internal=True),
TException("exception"),
]
)
)
class TestApplicationRenderNoTesting(testpages.RenderTester):
def setUp(self):
self.application = _TestApp
self.application.testing = 1
def test_prenotesting(self):
self.application.testing = 0
p = model.BasePage()
self.application.pre(p)
class TestApplicationRender(testpages.RenderTester):
def setUp(self):
self.application = _TestApp
self.application.testing = 2
def test_call(self):
assert self.call("one")
def test_call_nonexistent(self):
tutils.raises(model.exceptions.ApplicationError, self.call, "nonexistent")
class TestApplicationError:
def test_str(self):
a = model.exceptions.ApplicationError("foo")
str(a)
| mhils/countershape | test/test_model.py | Python | mit | 13,305 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/wearables/armor/padded/shared_armor_padded_s01_gloves.iff"
result.attribute_template_id = 0
result.stfName("wearables_name","armor_padded_s01_gloves")
#### BEGIN MODIFICATIONS ####
#### END MODIFICATIONS ####
return result | anhstudios/swganh | data/scripts/templates/object/tangible/wearables/armor/padded/shared_armor_padded_s01_gloves.py | Python | mit | 487 |
import logging
from datetime import datetime
from collections import defaultdict
from servicelayer.jobs import Job
from aleph.core import db, cache
from aleph.authz import Authz
from aleph.queues import cancel_queue, ingest_entity, get_status
from aleph.model import Collection, Entity, Document, Mapping
from aleph.model import Permission, Events, EntitySet
from aleph.index import collections as index
from aleph.index import xref as xref_index
from aleph.index import entities as entities_index
from aleph.logic.notifications import publish, flush_notifications
from aleph.logic.documents import ingest_flush, MODEL_ORIGIN
from aleph.logic.aggregator import get_aggregator
log = logging.getLogger(__name__)
def create_collection(data, authz, sync=False):
now = datetime.utcnow()
collection = Collection.create(data, authz, created_at=now)
if collection.created_at == now:
publish(
Events.CREATE_COLLECTION,
params={"collection": collection},
channels=[collection, authz.role],
actor_id=authz.id,
)
db.session.commit()
return update_collection(collection, sync=sync)
def update_collection(collection, sync=False):
"""Update a collection and re-index."""
Authz.flush()
refresh_collection(collection.id)
return index.index_collection(collection, sync=sync)
def refresh_collection(collection_id):
"""Operations to execute after updating a collection-related
domain object. This will refresh stats and flush cache."""
cache.kv.delete(
cache.object_key(Collection, collection_id),
cache.object_key(Collection, collection_id, "stats"),
)
def get_deep_collection(collection):
mappings = Mapping.by_collection(collection.id).count()
entitysets = EntitySet.type_counts(collection_id=collection.id)
return {
"statistics": index.get_collection_stats(collection.id),
"counts": {"mappings": mappings, "entitysets": entitysets},
"status": get_status(collection),
"shallow": False,
}
def compute_collections():
"""Update collection caches, including the global stats cache."""
authz = Authz.from_role(None)
schemata = defaultdict(int)
countries = defaultdict(int)
categories = defaultdict(int)
for collection in Collection.all():
compute_collection(collection)
if authz.can(collection.id, authz.READ):
categories[collection.category] += 1
things = index.get_collection_things(collection.id)
for schema, count in things.items():
schemata[schema] += count
for country in collection.countries:
countries[country] += 1
log.info("Updating global statistics cache...")
data = {
"collections": sum(categories.values()),
"schemata": dict(schemata),
"countries": dict(countries),
"categories": dict(categories),
"things": sum(schemata.values()),
}
key = cache.key(cache.STATISTICS)
cache.set_complex(key, data, expires=cache.EXPIRE)
def compute_collection(collection, force=False, sync=False):
key = cache.object_key(Collection, collection.id, "stats")
if cache.get(key) is not None and not force:
return
refresh_collection(collection.id)
log.info("[%s] Computing statistics...", collection)
index.update_collection_stats(collection.id)
cache.set(key, datetime.utcnow().isoformat())
index.index_collection(collection, sync=sync)
def aggregate_model(collection, aggregator):
"""Sync up the aggregator from the Aleph domain model."""
log.debug("[%s] Aggregating model...", collection)
aggregator.delete(origin=MODEL_ORIGIN)
writer = aggregator.bulk()
for document in Document.by_collection(collection.id):
proxy = document.to_proxy(ns=collection.ns)
writer.put(proxy, fragment="db", origin=MODEL_ORIGIN)
for entity in Entity.by_collection(collection.id):
proxy = entity.to_proxy()
aggregator.delete(entity_id=proxy.id)
writer.put(proxy, fragment="db", origin=MODEL_ORIGIN)
writer.flush()
def index_aggregator(
collection, aggregator, entity_ids=None, skip_errors=False, sync=False
):
def _generate():
idx = 0
entities = aggregator.iterate(entity_id=entity_ids, skip_errors=skip_errors)
for idx, proxy in enumerate(entities, 1):
if idx > 0 and idx % 1000 == 0:
log.debug("[%s] Index: %s...", collection, idx)
yield proxy
log.debug("[%s] Indexed %s entities", collection, idx)
entities_index.index_bulk(collection, _generate(), sync=sync)
def reingest_collection(collection, job_id=None, index=False, flush=True):
"""Trigger a re-ingest for all documents in the collection."""
job_id = job_id or Job.random_id()
if flush:
ingest_flush(collection)
for document in Document.by_collection(collection.id):
proxy = document.to_proxy(ns=collection.ns)
ingest_entity(collection, proxy, job_id=job_id, index=index)
def reindex_collection(collection, skip_errors=True, sync=False, flush=False):
"""Re-index all entities from the model, mappings and aggregator cache."""
from aleph.logic.mapping import map_to_aggregator
from aleph.logic.profiles import profile_fragments
aggregator = get_aggregator(collection)
for mapping in collection.mappings:
if mapping.disabled:
log.debug("[%s] Skip mapping: %r", collection, mapping)
continue
try:
map_to_aggregator(collection, mapping, aggregator)
except Exception:
# More or less ignore broken models.
log.exception("Failed mapping: %r", mapping)
aggregate_model(collection, aggregator)
profile_fragments(collection, aggregator)
if flush:
log.debug("[%s] Flushing...", collection)
index.delete_entities(collection.id, sync=True)
index_aggregator(collection, aggregator, skip_errors=skip_errors, sync=sync)
compute_collection(collection, force=True)
def delete_collection(collection, keep_metadata=False, sync=False):
deleted_at = collection.deleted_at or datetime.utcnow()
cancel_queue(collection)
aggregator = get_aggregator(collection)
aggregator.delete()
flush_notifications(collection, sync=sync)
index.delete_entities(collection.id, sync=sync)
xref_index.delete_xref(collection, sync=sync)
Mapping.delete_by_collection(collection.id)
EntitySet.delete_by_collection(collection.id, deleted_at)
Entity.delete_by_collection(collection.id)
Document.delete_by_collection(collection.id)
if not keep_metadata:
Permission.delete_by_collection(collection.id)
collection.delete(deleted_at=deleted_at)
db.session.commit()
if not keep_metadata:
index.delete_collection(collection.id, sync=True)
aggregator.drop()
refresh_collection(collection.id)
Authz.flush()
def upgrade_collections():
for collection in Collection.all(deleted=True):
if collection.deleted_at is not None:
delete_collection(collection, keep_metadata=True, sync=True)
else:
compute_collection(collection, force=True)
# update global cache:
compute_collections()
| pudo/aleph | aleph/logic/collections.py | Python | mit | 7,335 |
import pytest
def pytest_addoption(parser):
group = parser.getgroup("debugconfig")
group.addoption(
"--setupplan",
"--setup-plan",
action="store_true",
help="show what fixtures and tests would be executed but "
"don't execute anything.",
)
@pytest.hookimpl(tryfirst=True)
def pytest_fixture_setup(fixturedef, request):
# Will return a dummy fixture if the setuponly option is provided.
if request.config.option.setupplan:
my_cache_key = fixturedef.cache_key(request)
fixturedef.cached_result = (None, my_cache_key, None)
return fixturedef.cached_result
@pytest.hookimpl(tryfirst=True)
def pytest_cmdline_main(config):
if config.option.setupplan:
config.option.setuponly = True
config.option.setupshow = True
| alfredodeza/pytest | src/_pytest/setupplan.py | Python | mit | 818 |
from .. util import deprecated
if deprecated.allowed():
from . channel_order import ChannelOrder
| rec/BiblioPixel | bibliopixel/drivers/__init__.py | Python | mit | 102 |
import unittest
import pycodestyle
from os.path import exists
class TestCodeFormat(unittest.TestCase):
def test_conformance(self):
"""Test that we conform to PEP-8."""
style = pycodestyle.StyleGuide(quiet=False, ignore=['E501', 'W605'])
if exists('transitions'): # when run from root directory (e.g. tox)
style.input_dir('transitions')
style.input_dir('tests')
else: # when run from test directory (e.g. pycharm)
style.input_dir('../transitions')
style.input_dir('.')
result = style.check_files()
self.assertEqual(result.total_errors, 0,
"Found code style errors (and warnings).")
| tyarkoni/transitions | tests/test_codestyle.py | Python | mit | 710 |
# This file is part of beets.
# Copyright 2011, Adrian Sampson.
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
"""Tests for the virtual filesystem builder.."""
import _common
from _common import unittest
from beets import library
from beets import vfs
class VFSTest(unittest.TestCase):
def setUp(self):
self.lib = library.Library(':memory:', path_formats=[
('default', 'albums/$album/$title'),
('singleton:true', 'tracks/$artist/$title'),
])
self.lib.add(_common.item())
self.lib.add_album([_common.item()])
self.lib.save()
self.tree = vfs.libtree(self.lib)
def test_singleton_item(self):
self.assertEqual(self.tree.dirs['tracks'].dirs['the artist'].
files['the title'], 1)
def test_album_item(self):
self.assertEqual(self.tree.dirs['albums'].dirs['the album'].
files['the title'], 2)
def suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == '__main__':
unittest.main(defaultTest='suite')
| karlbright/beets | test/test_vfs.py | Python | mit | 1,621 |
# caution: this module was written by a Python novice learning on the fly
# you should not assume this is "good" or idiomatic Python.
# Hackathons are fun.
from flask import Flask, Blueprint, render_template, abort, request, jsonify
from flask.ext.login import LoginManager, login_required
import logging
import json
from user import User
import base64
import db.user
app = Flask(__name__)
# setting up auth ... this should be interesting
login_manager = LoginManager()
login_manager.init_app(app)
@login_manager.request_loader
def load_user_from_request(request):
api_key = request.headers.get('Authorization')
if api_key:
api_key = api_key.replace('Basic ', '', 1)
try:
raw = base64.b64decode(api_key)
parts = raw.decode("utf-8").split( ":" )
userName = parts[ 0 ]
password = parts[ 1 ]
record = db.user.getUser(userName, password)
if record:
userType = "tutor" if record[2] else "student"
return User(record[1], record[0], userType)
except TypeError:
logging.exception('')
pass
# finally, return if user is unauthorized
return None
@login_manager.user_loader
def load_user(user_id):
record = db.user.getUserById(user_id)
userType = "tutor" if record[2] else "student"
return User(record[1], record[0], userType)
@app.route('/')
def root():
return app.send_static_file('index.html')
# load other blueprints into app
from resource.registration import registration
app.register_blueprint(registration)
from resource.student import student
app.register_blueprint(student)
if __name__ == "__main__":
app.run(host="0.0.0.0") | arobson/tutortrueblue | server/app.py | Python | mit | 1,577 |
# author: Fei Gao
#
# Combinations
#
# Given two integers n and k, return all possible combinations of k numbers out of 1 ... n.
# For example,
# If n = 4 and k = 2, a solution is:
# [
# [2,4],
# [3,4],
# [2,3],
# [1,2],
# [1,3],
# [1,4],
# ]
import itertools
class Solution:
# @return a list of lists of integers
def combine(self, n, k):
return [list(p) for p in itertools.combinations(range(1, n + 1), k)]
def main():
solver = Solution()
print(solver.combine(4, 2))
pass
if __name__ == '__main__':
main()
pass
| feigaochn/leetcode | p77_combinations.py | Python | mit | 567 |
#!/usr/bin/env python3
#Author: Stefan Toman
if __name__ == '__main__':
n = int(input())
a = set(map(int, input().split()))
m = int(input())
b = set(map(int, input().split()))
print(len(a-b))
| stoman/CompetitiveProgramming | problems/pythonsetdifference/submissions/accepted/stefan.py | Python | mit | 214 |
# -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding unique constraint on 'Company', fields ['slug']
db.create_unique(u'ecg_balancing_company', ['slug'])
def backwards(self, orm):
# Removing unique constraint on 'Company', fields ['slug']
db.delete_unique(u'ecg_balancing_company', ['slug'])
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'ecg_balancing.company': {
'Meta': {'object_name': 'Company'},
'activities': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'employees_number': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'foundation_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'industry': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'managing_directors': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'model_creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'owners': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'revenue': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'website': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'zipcode': ('django.db.models.fields.PositiveIntegerField', [], {})
},
u'ecg_balancing.companybalance': {
'Meta': {'object_name': 'CompanyBalance'},
'auditor': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'common_good': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'company': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'balance'", 'to': u"orm['ecg_balancing.Company']"}),
'end_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balances'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'peer_companies': ('django.db.models.fields.related.ManyToManyField', [], {'max_length': '255', 'to': u"orm['ecg_balancing.Company']", 'null': 'True', 'symmetrical': 'False', 'blank': 'True'}),
'process_description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'prospect': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'year': ('django.db.models.fields.SmallIntegerField', [], {'max_length': '4', 'null': 'True', 'blank': 'True'})
},
u'ecg_balancing.companybalanceindicator': {
'Meta': {'object_name': 'CompanyBalanceIndicator'},
'company_balance': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.CompanyBalance']"}),
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'evaluation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'indicator': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'company_balance'", 'to': u"orm['ecg_balancing.Indicator']"})
},
u'ecg_balancing.ecgmatrix': {
'Meta': {'object_name': 'ECGMatrix'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'version': ('django.db.models.fields.CharField', [], {'default': "u'4.1'", 'max_length': '6'})
},
u'ecg_balancing.indicator': {
'Meta': {'object_name': 'Indicator'},
'contact': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'ecg_value': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'matrix': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "u'indicators'", 'to': u"orm['ecg_balancing.ECGMatrix']"}),
'max_evaluation': ('django.db.models.fields.IntegerField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'parent_indicator'", 'null': 'True', 'to': u"orm['ecg_balancing.Indicator']"}),
'stakeholder': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'subindicator_number': ('django.db.models.fields.IntegerField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'ecg_balancing.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'avatar': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'companies': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['ecg_balancing.Company']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "u'profile'", 'unique': 'True', 'to': u"orm['auth.User']"})
},
u'ecg_balancing.userrole': {
'Meta': {'object_name': 'UserRole'},
'company': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['ecg_balancing.Company']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'role': ('django.db.models.fields.CharField', [], {'max_length': '5'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
}
}
complete_apps = ['ecg_balancing'] | sinnwerkstatt/ecg-balancing | ecg_balancing/migrations/0012_auto__add_unique_company_slug.py | Python | mit | 10,698 |
from distutils.core import setup
setup(
name = 'srht',
packages = ['srht'],
version = '1.0.0',
description = 'sr.ht services',
author = 'Drew DeVault',
author_email = 'sir@cmpwn.com',
url = 'https://github.com/SirCmpwn/sr.ht',
download_url = '',
keywords = [],
classifiers = []
)
| Debetux/sr.ht | setup.py | Python | mit | 300 |
import string
class Tree:
def __init__(self,u,vs):
self.u = u
self.vs = vs
def distance(self,w):
if w == self.u:
return 0,[w]
for v in self.vs:
d,path = v.distance(w)
if d != -1:
return d+1,[self.u]+path
return -1,[]
def taxa(self):
ret = set()
if type(self.u) == type(""):
ret.add(self.u)
for v in self.vs:
ret = ret.union(v.taxa())
return ret
def nodes(self):
ret = set([self])
for v in self.vs:
ret = ret.union(v.nodes())
return ret
def level_traverse(self,ret=None):
if ret == None:
ret = []
for v in self.vs:
ret = v.level_traverse(ret)
ret.append(self.u)
return ret
def splits(self):
if len(self.vs) == 0:
return []
taxa = self.taxa()
ret = []
for v in self.vs:
vt = v.taxa()
delta = taxa.difference(vt)
r = v.splits() #the split happen in subtrees
ret += [(L,R.union(delta)) for L,R in r]
ret.append((vt,delta))
return ret
def adj_list(self,father=None,cur=None,children=None):
if cur == None:
cur = {}
if children == None:
children = {}
cur[self.u] = set()
children[self.u] = set()
if father != None:
cur[self.u].add(father)
for v in self.vs:
cur,children = v.adj_list(father = self.u, cur = cur, children = children)
cur[self.u].add(v.u)
children[self.u].add(v.u)
return cur, children
def find_rev(self, dnas, pos, pre = None, mid = None):
if pre == None:
ret = []
for v in self.vs:
ret += v.find_rev(dnas,pos,dnas[self.u][pos], None)
return ret
elif mid == None:
if dnas[self.u][pos] != pre:
ret = []
for v in self.vs:
ret += [[self] + path for path in v.find_rev(dnas, pos, pre, dnas[self.u][pos])]
return ret
else:
return []
else:
if dnas[self.u][pos] == pre:
#print("")
return [[self]]
elif dnas[self.u][pos] == mid:
ret = []
for v in self.vs:
ret += [[self] + path for path in v.find_rev(dnas,pos,pre,mid)]
return ret
else:
#print("")
return []
def newick_parse(s):
def S():
ret = None
if s[S.pos] == "(":
S.pos += 1
label = S.N
S.N += 1
ret = Tree(label,[])
ret.vs.append(S())
while s[S.pos] == ",":
S.pos += 1
ret.vs.append(S())
assert s[S.pos] == ")"
S.pos += 1
if s[S.pos] in string.ascii_letters or s[S.pos] == "_": # has label
label = s[S.pos]
S.pos += 1
while s[S.pos] in string.ascii_letters or s[S.pos] == "_":
label += s[S.pos]
S.pos += 1
ret.u = label
elif s[S.pos] in string.ascii_letters or s[S.pos] == "_":
label = s[S.pos]
S.pos += 1
while s[S.pos] in string.ascii_letters or s[S.pos] == "_":
label += s[S.pos]
S.pos += 1
ret = Tree(label,[])
else:
label = S.N
S.N += 1
ret = Tree(label,[])
return ret
S.N = 1
S.pos = 0
return S()
def edge_splits(t,taxa):
splits = t.splits()
splits = filter(lambda x:len(x[0]) != 1 and len(x[1]) != 1, splits)
ret = []
for split in splits:
s = ""
for i in range(len(taxa)):
if taxa[i] in split[0]:
s += "1"
else:
s += "0"
ret.append(s)
return ret | crf1111/Bio-Informatics-Learning | Bio-StrongHold/src/newick_git.py | Python | mit | 4,136 |
#!/usr/bin/env python3
# Copyright (c) 2015-2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test transaction signing using the signrawtransaction* RPCs."""
from test_framework.test_framework import BitcoinTestFramework
import decimal
class RpcCreateMultiSigTest(BitcoinTestFramework):
def set_test_params(self):
self.setup_clean_chain = True
self.num_nodes = 3
def get_keys(self):
node0,node1,node2 = self.nodes
self.add = [node1.getnewaddress() for _ in range(self.nkeys)]
self.pub = [node1.getaddressinfo(a)["pubkey"] for a in self.add]
self.priv = [node1.dumpprivkey(a) for a in self.add]
self.final = node2.getnewaddress()
def run_test(self):
node0,node1,node2 = self.nodes
# 50 BTC each, rest will be 25 BTC each
node0.generate(149)
self.sync_all()
self.moved = 0
for self.nkeys in [3,5]:
for self.nsigs in [2,3]:
for self.output_type in ["bech32", "p2sh-segwit", "legacy"]:
self.get_keys()
self.do_multisig()
self.checkbalances()
def checkbalances(self):
node0,node1,node2 = self.nodes
node0.generate(100)
self.sync_all()
bal0 = node0.getbalance()
bal1 = node1.getbalance()
bal2 = node2.getbalance()
height = node0.getblockchaininfo()["blocks"]
assert 150 < height < 350
total = 149*50 + (height-149-100)*25
assert bal1 == 0
assert bal2 == self.moved
assert bal0+bal1+bal2 == total
def do_multisig(self):
node0,node1,node2 = self.nodes
msig = node2.createmultisig(self.nsigs, self.pub, self.output_type)
madd = msig["address"]
mredeem = msig["redeemScript"]
if self.output_type == 'bech32':
assert madd[0:4] == "bcrt" # actually a bech32 address
# compare against addmultisigaddress
msigw = node1.addmultisigaddress(self.nsigs, self.pub, None, self.output_type)
maddw = msigw["address"]
mredeemw = msigw["redeemScript"]
# addmultisigiaddress and createmultisig work the same
assert maddw == madd
assert mredeemw == mredeem
txid = node0.sendtoaddress(madd, 40)
tx = node0.getrawtransaction(txid, True)
vout = [v["n"] for v in tx["vout"] if madd in v["scriptPubKey"].get("addresses",[])]
assert len(vout) == 1
vout = vout[0]
scriptPubKey = tx["vout"][vout]["scriptPubKey"]["hex"]
value = tx["vout"][vout]["value"]
prevtxs = [{"txid": txid, "vout": vout, "scriptPubKey": scriptPubKey, "redeemScript": mredeem, "amount": value}]
node0.generate(1)
outval = value - decimal.Decimal("0.00001000")
rawtx = node2.createrawtransaction([{"txid": txid, "vout": vout}], [{self.final: outval}])
rawtx2 = node2.signrawtransactionwithkey(rawtx, self.priv[0:self.nsigs-1], prevtxs)
rawtx3 = node2.signrawtransactionwithkey(rawtx2["hex"], [self.priv[-1]], prevtxs)
self.moved += outval
tx = node0.sendrawtransaction(rawtx3["hex"], True)
blk = node0.generate(1)[0]
assert tx in node0.getblock(blk)["tx"]
txinfo = node0.getrawtransaction(tx, True, blk)
self.log.info("n/m=%d/%d %s size=%d vsize=%d weight=%d" % (self.nsigs, self.nkeys, self.output_type, txinfo["size"], txinfo["vsize"], txinfo["weight"]))
if __name__ == '__main__':
RpcCreateMultiSigTest().main()
| Flowdalic/bitcoin | test/functional/rpc_createmultisig.py | Python | mit | 3,657 |
from __future__ import print_function, division
import matplotlib
matplotlib.use('Agg') # Must be before importing matplotlib.pyplot or pylab!
from neuralnilm import Net, RealApplianceSource, BLSTMLayer, DimshuffleLayer
from lasagne.nonlinearities import sigmoid, rectify
from lasagne.objectives import crossentropy, mse
from lasagne.init import Uniform, Normal
from lasagne.layers import LSTMLayer, DenseLayer, Conv1DLayer, ReshapeLayer, FeaturePoolLayer
from neuralnilm.updates import nesterov_momentum
from functools import partial
import os
from neuralnilm.source import standardise, discretize, fdiff, power_and_fdiff
from neuralnilm.experiment import run_experiment
from neuralnilm.net import TrainingError
import __main__
NAME = os.path.splitext(os.path.split(__main__.__file__)[1])[0]
PATH = "/homes/dk3810/workspace/python/neuralnilm/figures"
SAVE_PLOT_INTERVAL = 250
GRADIENT_STEPS = 100
"""
e103
Discovered that bottom layer is hardly changing. So will try
just a single lstm layer
e104
standard init
lower learning rate
e106
lower learning rate to 0.001
e108
is e107 but with batch size of 5
e109
Normal(1) for LSTM
e110
* Back to Uniform(5) for LSTM
* Using nntools eb17bd923ef9ff2cacde2e92d7323b4e51bb5f1f
RESULTS: Seems to run fine again!
e111
* Try with nntools head
* peepholes=False
RESULTS: appears to be working well. Haven't seen a NaN,
even with training rate of 0.1
e112
* n_seq_per_batch = 50
e114
* Trying looking at layer by layer training again.
* Start with single LSTM layer
e115
* Learning rate = 1
e116
* Standard inits
e117
* Uniform(1) init
e119
* Learning rate 10
# Result: didn't work well!
e120
* init: Normal(1)
* not as good as Uniform(5)
e121
* Uniform(25)
e122
* Just 10 cells
* Uniform(5)
e125
* Pre-train lower layers
e128
* Add back all 5 appliances
* Seq length 1500
* skip_prob = 0.7
e129
* max_input_power = None
* 2nd layer has Uniform(5)
* pre-train bottom layer for 2000 epochs
* add third layer at 4000 epochs
e131
e138
* Trying to replicate e82 and then break it ;)
e140
diff
e141
conv1D layer has Uniform(1), as does 2nd LSTM layer
e142
diff AND power
e144
diff and power and max power is 5900
e145
Uniform(25) for first layer
e146
gradient clip and use peepholes
e147
* try again with new code
e148
* learning rate 0.1
e150
* Same as e149 but without peepholes and using LSTM not BLSTM
e151
* Max pooling
"""
def exp_a(name):
# 151d but training for much longer and skip prob = 0.7
source = RealApplianceSource(
filename='/data/dk3810/ukdale.h5',
appliances=[
['fridge freezer', 'fridge', 'freezer'],
'hair straighteners',
'television'
# 'dish washer'
# ['washer dryer', 'washing machine']
],
max_appliance_powers=[300, 500, 200, 2500, 2400],
on_power_thresholds=[5, 5, 5, 5, 5],
max_input_power=5900,
min_on_durations=[60, 60, 60, 1800, 1800],
min_off_durations=[12, 12, 12, 1800, 600],
window=("2013-06-01", "2014-07-01"),
seq_length=1500,
output_one_appliance=False,
boolean_targets=False,
train_buildings=[1],
validation_buildings=[1],
skip_probability=0.0,
n_seq_per_batch=25,
include_diff=True
)
net = Net(
experiment_name=name,
source=source,
save_plot_interval=250,
loss_function=mse,
updates=partial(nesterov_momentum, learning_rate=.1, clip_range=(-1, 1)),
layers_config=[
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(25),
'b': Uniform(25)
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(10),
'b': Uniform(10)
},
{
'type': LSTMLayer,
'num_units': 50,
'W_in_to_cell': Uniform(5),
'gradient_steps': GRADIENT_STEPS,
'peepholes': False
},
{
'type': DenseLayer,
'num_units': 50,
'nonlinearity': sigmoid,
'W': Uniform(1),
'b': Uniform(1)
},
{
'type': DenseLayer,
'num_units': source.n_outputs,
'nonlinearity': None
}
]
)
return net
def init_experiment(experiment):
full_exp_name = NAME + experiment
func_call = 'exp_{:s}(full_exp_name)'.format(experiment)
print("***********************************")
print("Preparing", full_exp_name, "...")
net = eval(func_call)
return net
def main():
for experiment in list('a'):
full_exp_name = NAME + experiment
path = os.path.join(PATH, full_exp_name)
try:
net = init_experiment(experiment)
run_experiment(net, path, epochs=None)
except KeyboardInterrupt:
break
except TrainingError as exception:
print("EXCEPTION:", exception)
except Exception as exception:
print("EXCEPTION:", exception)
import ipdb; ipdb.set_trace()
if __name__ == "__main__":
main()
| JackKelly/neuralnilm_prototype | scripts/e159.py | Python | mit | 5,375 |
"""
Django settings for django_engage project.
Generated by 'django-admin startproject' using Django 1.8.4.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '4%z9k*!y3p*yba5*^-ve^4^2rxol0f4fqvdkq41&tc5unkz=#^'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'swampdragon',
'engage',
'testapp',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.security.SecurityMiddleware',
)
ROOT_URLCONF = 'django_engage.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
'django.template.context_processors.media',
'django.template.context_processors.static',
],
'debug': True
},
},
]
WSGI_APPLICATION = 'django_engage.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
# SwampDragon settings
SWAMP_DRAGON_CONNECTION = (
'swampdragon_auth.socketconnection.HttpDataConnection',
'/data'
)
DRAGON_URL = 'http://localhost:9999/'
| djangothon/django-engage | django_engage/settings.py | Python | mit | 3,040 |
# Time: O(m * n)
# Space: O(m * n)
import collections
class Solution(object):
def findBlackPixel(self, picture, N):
"""
:type picture: List[List[str]]
:type N: int
:rtype: int
"""
rows, cols = [0] * len(picture), [0] * len(picture[0])
lookup = collections.defaultdict(int)
for i in xrange(len(picture)):
for j in xrange(len(picture[0])):
if picture[i][j] == 'B':
rows[i] += 1
cols[j] += 1
lookup[tuple(picture[i])] += 1
result = 0
for i in xrange(len(picture)):
if rows[i] == N and lookup[tuple(picture[i])] == N:
for j in xrange(len(picture[0])):
result += picture[i][j] == 'B' and cols[j] == N
return result
class Solution2(object):
def findBlackPixel(self, picture, N):
"""
:type picture: List[List[str]]
:type N: int
:rtype: int
"""
lookup = collections.Counter(map(tuple, picture))
cols = [col.count('B') for col in zip(*picture)]
return sum(N * zip(row, cols).count(('B', N)) \
for row, cnt in lookup.iteritems() \
if cnt == N == row.count('B'))
| kamyu104/LeetCode | Python/lonely-pixel-ii.py | Python | mit | 1,287 |
import sys
# If our base template isn't on the PYTHONPATH already, we need to do this:
sys.path.append('../path/to/base/templates')
import basetemplate
class AlteredTemplate(basetemplate.BaseTemplate):
"""This project only needs an S3 bucket, but no EC2 server."""
def add_resources(self):
self.add_bucket()
def add_bucket(self):
"""This will add a bucket using the base template, and then add a custom CORS
configuration to it."""
super(AlteredTemplate, self).add_bucket()
self.resources['StaticFiles']['Properties']['CorsConfiguration'] = {
'CorsRules': [
{
'AllowedHeaders': ['*'],
'AllowedMethods': ['GET'],
'AllowedOrigins': ['*'],
}
]
}
cft = AlteredTemplate("S3 Bucket Project", options)
cft.add_resources()
| broadinstitute/cfn-pyplates | docs/source/examples/advanced/altered_template.py | Python | mit | 899 |
"""
EmailManager - a helper class to login, search for, and delete emails.
"""
import email
import htmlentitydefs
import imaplib
import quopri
import re
import time
import types
from seleniumbase.config import settings
class EmailManager:
""" A helper class to interface with an Email account. These imap methods
can search for and fetch messages without needing a browser.
Example:
em = EmailManager()
result = em.check_for_recipient(
"[GMAIL.USER]+[SOME CODE OR TIMESTAMP KEY]@gmail.com")
"""
HTML = "text/html"
PLAIN = "text/plain"
TIMEOUT = 1800
def __init__(self, uname=settings.EMAIL_USERNAME,
pwd=settings.EMAIL_PASSWORD,
imap_string=settings.EMAIL_IMAP_STRING,
port=settings.EMAIL_IMAP_PORT):
self.uname = uname
self.pwd = pwd
self.imap_string = imap_string
self.port = port
def imap_connect(self):
"""
Connect to the IMAP mailbox.
"""
self.mailbox = imaplib.IMAP4_SSL(self.imap_string, self.port)
self.mailbox.login(self.uname, self.pwd)
self.mailbox.select()
def imap_disconnect(self):
"""
Disconnect from the IMAP mailbox.
"""
self.mailbox.close()
self.mailbox.logout()
def __imap_search(self, ** criteria_dict):
""" Searches for query in the given IMAP criteria and returns
the message numbers that match as a list of strings.
Criteria without values (eg DELETED) should be keyword args
with KEY=True, or else not passed. Criteria with values should
be keyword args of the form KEY="VALUE" where KEY is a valid
IMAP key.
IMAP default is to AND all criteria together. We don't support
other logic quite yet.
All valid keys: ALL, ANSWERED, BCC <string>, BEFORE <string>,
BODY <string>, CC <string>, DELETED, DRAFT, FLAGGED, FROM
<string>, HEADER <field-name> <string> (UNTESTED), KEYWORD
<flag>, LARGER <n>, NEW, NOT <search-key>, OLD, ON <date>,
OR <search-key1> <search-key2> (UNTESTED), RECENT, SEEN,
SENTBEFORE <date>, SENTON <date>, SENTSINCE <date>, SINCE <date>,
SMALLER <n>, SUBJECT <string>, TEXT <string>, TO <string>,
UID <sequence set>, UNANSWERED, UNDELETED, UNDRAFT, UNFLAGGED,
UNKEYWORD <flag>, UNSEEN.
For details on keys and their values, see
http://tools.ietf.org/html/rfc3501#section-6.4.4
:param criteria_dict: dictionary of search criteria keywords
:raises: EmailException if something in IMAP breaks
:returns: List of message numbers as strings matched by given criteria
"""
self.imap_connect()
criteria = []
for key in criteria_dict:
if criteria_dict[key] is True:
criteria.append('(%s)' % key)
else:
criteria.append('(%s "%s")' % (key, criteria_dict[key]))
# If any of these criteria are not valid IMAP keys, IMAP will tell us.
status, msg_nums = self.mailbox.search('UTF-8', * criteria)
self.imap_disconnect()
if 0 == len(msg_nums):
msg_nums = []
if 'OK' in status:
return self.__parse_imap_search_result(msg_nums)
else:
raise EmailException("IMAP status is " + str(status))
def remove_formatting(self, html):
"""
Clean out any whitespace
@Params
html - String of html to remove whitespace from
@Returns
Cleaned string
"""
return ' '.join(html.split())
def __parse_imap_search_result(self, result):
"""
This takes the result of imap_search and returns SANE results
@Params
result - result from an imap_search call
@Returns
List of IMAP search results
"""
if isinstance(result, types.ListType):
# Above is same as "type(result) == types.ListType"
if len(result) == 1:
return self.__parse_imap_search_result(result[0])
else:
return result
elif isinstance(result, types.StringType):
# Above is same as "type(result) == types.StringType"
return result.split()
else:
# Fail silently assuming tests will fail if emails are not found
return []
def fetch_html(self, msg_nums):
"""
Given a message number that we found with imap_search,
get the text/html content.
@Params
msg_nums - message number to get html message for
@Returns
HTML content of message matched by message number
"""
if not msg_nums:
raise Exception("Invalid Message Number!")
return self.__imap_fetch_content_type(msg_nums, self.HTML)
def fetch_plaintext(self, msg_nums):
"""
Given a message number that we found with imap_search,
get the text/plain content.
@Params
msg_nums - message number to get message for
@Returns
Plaintext content of message matched by message number
"""
if not msg_nums:
raise Exception("Invalid Message Number!")
return self.__imap_fetch_content_type(msg_nums, self.PLAIN)
def __imap_fetch_content_type(self, msg_nums, content_type):
"""
Given a message number that we found with imap_search, fetch the
whole source, dump that into an email object, and pick out the part
that matches the content type specified. Return that, if we got
multiple emails, return dict of all the parts.
@Params
msg_nums - message number to search for
content_type - content type of email message to return
@Returns
Specified content type string or dict of all content types of matched
email.
"""
if not msg_nums:
raise Exception("Invalid Message Number!")
if not content_type:
raise Exception("Need a content type!")
contents = {}
self.imap_connect()
for num in msg_nums:
status, data = self.mailbox.fetch(num, "(RFC822)")
for response_part in data:
if isinstance(response_part, tuple):
msg = email.message_from_string(response_part[1])
for part in msg.walk():
if str(part.get_content_type()) == content_type:
content = str(part.get_payload(decode=True))
contents[int(num)] = content
self.imap_disconnect()
return contents
def fetch_html_by_subject(self, email_name):
"""
Get the html of an email, searching by subject.
@Params
email_name - the subject to search for
@Returns
HTML content of the matched email
"""
if not email_name:
raise EmailException("Subject cannot be null")
results = self.__imap_search(SUBJECT=email_name)
sources = self.fetch_html(results)
return sources
def fetch_plaintext_by_subject(self, email_name):
"""
Get the plain text of an email, searching by subject.
@Params
email_name - the subject to search for
@Returns
Plaintext content of the matched email
"""
if not email_name:
raise EmailException("Subject cannot be null")
results = self.__imap_search(SUBJECT=email_name)
sources = self.fetch_plaintext(results)
return sources
def search_for_recipient(self, email, timeout=None, content_type=None):
"""
Get content of emails, sent to a specific email address.
@Params
email - the recipient email address to search for
timeout - seconds to try beore timing out
content_type - type of email string to return
@Returns
Content of the matched email in the given content type
"""
return self.search(timeout=timeout,
content_type=content_type, TO=email)
def search_for_subject(self, subject, timeout=None, content_type=None):
"""
Get content of emails, sent to a specific email address.
@Params
email - the recipient email address to search for
timeout - seconds to try beore timing out
content_type - type of email string to return
@Returns
Content of the matched email in the given content type
"""
return self.search(timeout=timeout,
content_type=content_type, SUBJECT=subject)
def search_for_count(self, ** args):
"""
A search that keeps searching up until timeout for a
specific number of matches to a search. If timeout is not
specified we use the default. If count= is not specified we
will fail. Return values are the same as search(), except for count=0,
where we will return an empty list. Use this if you need to wait for a
number of emails other than 1.
@Params
args - dict of arguments to use in search:
count - number of emails to search for
timeout - seconds to try search before timing out
@Returns
List of message numbers matched by search
"""
if "timeout" not in args.keys():
timeout = self.TIMEOUT
elif args["timeout"]:
timeout = args["timeout"]
args["timeout"] = timeout / 15
if "count" not in args.keys():
raise EmailException("Count param not defined!")
else:
count = int(args["count"])
del args["count"]
results = None
timer = timeout
count = 0
while count < timer:
try:
results = self.search(** args)
except EmailException:
if count == 0:
return []
if results and len(results) == count:
return results
else:
time.sleep(15)
count += 15
if count >= timer:
raise EmailException("Failed to match criteria %s in %s minutes" %
(args, timeout / 60))
def __check_msg_for_headers(self, msg, ** email_headers):
"""
Checks an Email.Message object for the headers in email_headers.
Following are acceptable header names: ['Delivered-To',
'Received', 'Return-Path', 'Received-SPF',
'Authentication-Results', 'DKIM-Signature',
'DomainKey-Signature', 'From', 'To', 'Message-ID',
'Subject', 'MIME-Version', 'Content-Type', 'Date',
'X-Sendgrid-EID', 'Sender'].
@Params
msg - the Email.message object to check
email_headers - list of headers to check against
@Returns
Boolean whether all the headers were found
"""
all_headers_found = False
email_headers['Delivered-To'] = email_headers['To']
email_headers.pop('To')
all_headers_found = all(k in msg.keys() for k in email_headers)
return all_headers_found
def fetch_message(self, msgnum):
"""
Given a message number, return the Email.Message object.
@Params
msgnum - message number to find
@Returns
Email.Message object for the given message number
"""
self.imap_connect()
status, data = self.mailbox.fetch(msgnum, "(RFC822)")
self.imap_disconnect()
for response_part in data:
if isinstance(response_part, tuple):
return email.message_from_string(response_part[1])
def get_content_type(self, msg, content_type="HTML"):
"""
Given an Email.Message object, gets the content-type payload
as specified by @content_type. This is the actual body of the
email.
@Params
msg - Email.Message object to get message content for
content_type - Type of content to get from the email
@Return
String content of the email in the given type
"""
if "HTML" in content_type.upper():
content_type = self.HTML
elif "PLAIN" in content_type.upper():
content_type = self.PLAIN
for part in msg.walk():
if str(part.get_content_type()) == content_type:
return str(part.get_payload(decode=True))
def search(self, ** args):
"""
Checks email inbox every 15 seconds that match the criteria
up until timeout.
Search criteria should be keyword args eg
TO="selenium@gmail.com". See __imap_search docstring for list
of valid criteria. If content_type is not defined, will return
a list of msg numbers.
Options:
- fetch: will return a dict of Message objects, keyed on msgnum,
which can be used to look at headers and other parts of the complete
message. (http://docs.python.org/library/email.message.html)
- timeout: will replace the default module timeout with the
value in SECONDS.
- content_type: should be either "PLAIN" or
"HTML". If defined returns the source of the matched messages
as a dict of msgnum:content. If not defined we return a list
of msg nums.
"""
if "content_type" not in args.keys():
content_type = None
elif "HTML" in args["content_type"]:
content_type = self.HTML
del args["content_type"]
elif "PLAIN" in args["content_type"]:
content_type = self.PLAIN
del args["content_type"]
elif args["content_type"]:
content_type = args['content_type']
del args["content_type"]
if "timeout" not in args.keys():
timeout = self.TIMEOUT
elif "timeout" in args:
timeout = args["timeout"]
del args["timeout"]
fetch = False
if "fetch" in args.keys():
fetch = True
del args["fetch"]
results = None
timer = timeout
count = 0
while count < timer:
results = self.__imap_search(** args)
if len(results) > 0:
if fetch:
msgs = {}
for msgnum in results:
msgs[msgnum] = self.fetch_message(msgnum)
return msgs
elif not content_type:
return results
else:
return self.__imap_fetch_content_type(results,
content_type)
else:
time.sleep(15)
count += 15
if count >= timer:
raise EmailException(
"Failed to find message for criteria %s in %s minutes" %
(args, timeout / 60))
def remove_whitespace(self, html):
"""
Clean whitespace from html
@Params
html - html source to remove whitespace from
@Returns
String html without whitespace
"""
# Does python have a better way to do exactly this?
clean_html = html
for char in ("\r", "\n", "\t"):
clean_html = clean_html.replace(char, "")
return clean_html
def remove_control_chars(self, html):
"""
Clean control characters from html
@Params
html - html source to remove control characters from
@Returns
String html without control characters
"""
return self.remove_whitespace(html)
def replace_entities(self, html):
"""
Replace htmlentities with unicode characters
@Params
html - html source to replace entities in
@Returns
String html with entities replaced
"""
def fixup(text):
"""replace the htmlentities in some text"""
text = text.group(0)
if text[:2] == "&#":
# character reference
try:
if text[:3] == "&#x":
return unichr(int(text[3:-1], 16))
else:
return unichr(int(text[2:-1]))
except ValueError:
pass
else:
# named entity
try:
text = unichr(htmlentitydefs.name2codepoint[text[1:-1]])
except KeyError:
pass
return text # leave as is
return re.sub("&#?\w+;", fixup, html)
def decode_quoted_printable(self, html):
"""
Decoding from Quoted-printable, or QP encoding, that uses ASCII 7bit
chars to encode 8 bit chars, resulting in =3D to represent '='. Python
supports UTF-8 so we decode. Also removes line breaks with '= at the
end.'
@Params
html - html source to decode
@Returns
String decoded HTML source
"""
return self.replace_entities(quopri.decodestring(html))
def html_bleach(self, html):
"""
Cleanup and get rid of all extraneous stuff for better comparison
later. Turns formatted into into a single line string.
@Params
html - HTML source to clean up
@Returns
String cleaned up HTML source
"""
return self.decode_quoted_printable(html)
class EmailException(Exception):
"""Raised when we have an Email-related problem."""
def __init__(self, value):
self.parameter = value
def __str__(self):
return repr(self.parameter)
| possoumous/Watchers | seleniumbase/fixtures/email_manager.py | Python | mit | 17,901 |
"""Default website configurations, used only for testing.
"""
from donut import environment
# Public Test Database
TEST = environment.Environment(
db_hostname="localhost",
db_name="donut_test",
db_user="donut_test",
db_password="public",
debug=True,
testing=True,
secret_key="1234567890",
imgur_api={
"id": "b579f690cacf867",
"secret": "****************************************"
},
restricted_ips=r"127\.0\.0\.1")
| ASCIT/donut-python | donut/default_config.py | Python | mit | 472 |
#!/usr/bin/env python
"""
Copyright (c) 2013-2014 Ben Croston
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
of the Software, and to permit persons to whom the Software is furnished to do
so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
"""This test suite assumes the following circuit is connected:
GND_PIN = 6
LED_PIN = 12 (with resistor to 0v)
SWITCH_PIN = 18 (with 0.1 uF capacitor around switch) to 0v
LOOP_IN = 16 connected with 1K resistor to LOOP_OUT
LOOP_OUT = 22
"""
import sys
import warnings
import time
from threading import Timer
import RPi.GPIO as GPIO
if sys.version[:3] == '2.6':
import unittest2 as unittest
else:
import unittest
GND_PIN = 6
LED_PIN = 12
LED_PIN_BCM = 18
SWITCH_PIN = 18
LOOP_IN = 16
LOOP_OUT = 22
# Test starts with 'AAA' so that it is run first
class TestAAASetup(unittest.TestCase):
def runTest(self):
# Test mode not set (BOARD or BCM) exception
with self.assertRaises(RuntimeError) as e:
GPIO.setup(LED_PIN, GPIO.OUT)
self.assertEqual(str(e.exception), 'Please set pin numbering mode using GPIO.setmode(GPIO.BOARD) or GPIO.setmode(GPIO.BCM)')
GPIO.setmode(GPIO.BOARD)
# Test not set as OUTPUT message
with self.assertRaises(RuntimeError) as e:
GPIO.output(LED_PIN, GPIO.HIGH)
self.assertEqual(str(e.exception), 'The GPIO channel has not been set up as an OUTPUT')
GPIO.setup(LED_PIN, GPIO.IN)
# Test setup(..., pull_up_down=GPIO.HIGH) raises exception
with self.assertRaises(ValueError):
GPIO.setup(LED_PIN, GPIO.IN, pull_up_down=GPIO.HIGH)
# Test 'already in use' warning
GPIO.cleanup()
with open('/sys/class/gpio/export','wb') as f:
f.write(str(LED_PIN_BCM).encode())
with open('/sys/class/gpio/gpio%s/direction'%LED_PIN_BCM,'wb') as f:
f.write(b'out')
with open('/sys/class/gpio/gpio%s/value'%LED_PIN_BCM,'wb') as f:
f.write(b'1')
with warnings.catch_warnings(record=True) as w:
GPIO.setup(LED_PIN, GPIO.OUT) # generate 'already in use' warning
self.assertEqual(w[0].category, RuntimeWarning)
with open('/sys/class/gpio/unexport','wb') as f:
f.write(str(LED_PIN_BCM).encode())
GPIO.cleanup()
# test initial value of high reads back as high
GPIO.setup(LED_PIN, GPIO.OUT, initial=GPIO.HIGH)
self.assertEqual(GPIO.input(LED_PIN), GPIO.HIGH)
GPIO.cleanup()
# test initial value of low reads back as low
GPIO.setup(LED_PIN, GPIO.OUT, initial=GPIO.LOW)
self.assertEqual(GPIO.input(LED_PIN), GPIO.LOW)
GPIO.cleanup()
class TestInputOutput(unittest.TestCase):
def test_outputread(self):
"""Test that an output() can be input()"""
GPIO.setup(LED_PIN, GPIO.OUT)
GPIO.output(LED_PIN, GPIO.HIGH)
self.assertEqual(GPIO.input(LED_PIN), GPIO.HIGH)
GPIO.output(LED_PIN, GPIO.LOW)
self.assertEqual(GPIO.input(LED_PIN), GPIO.LOW)
GPIO.cleanup()
def test_loopback(self):
"""Test output loops back to another input"""
GPIO.setup(LOOP_IN, GPIO.IN, pull_up_down=GPIO.PUD_OFF)
GPIO.setup(LOOP_OUT, GPIO.OUT, initial=GPIO.LOW)
self.assertEqual(GPIO.input(LOOP_IN), GPIO.LOW)
GPIO.output(LOOP_OUT, GPIO.HIGH)
self.assertEqual(GPIO.input(LOOP_IN), GPIO.HIGH)
GPIO.cleanup()
def test_output_on_input(self):
"""Test output() can not be done on input"""
GPIO.setup(SWITCH_PIN, GPIO.IN)
with self.assertRaises(RuntimeError):
GPIO.output(SWITCH_PIN, GPIO.LOW)
GPIO.cleanup()
class TestSoftPWM(unittest.TestCase):
def runTest(self):
GPIO.setup(LED_PIN, GPIO.OUT)
pwm = GPIO.PWM(LED_PIN, 50)
pwm.start(100)
print "\nPWM tests"
response = raw_input('Is the LED on (y/n) ? ').upper()
self.assertEqual(response,'Y')
pwm.start(0)
response = raw_input('Is the LED off (y/n) ? ').upper()
self.assertEqual(response,'Y')
print "LED Brighten/fade test..."
for i in range(0,3):
for x in range(0,101,5):
pwm.ChangeDutyCycle(x)
time.sleep(0.1)
for x in range(100,-1,-5):
pwm.ChangeDutyCycle(x)
time.sleep(0.1)
pwm.stop()
response = raw_input('Did it work (y/n) ? ').upper()
self.assertEqual(response,'Y')
GPIO.cleanup()
class TestSetWarnings(unittest.TestCase):
def test_alreadyinuse(self):
"""Test 'already in use' warning"""
GPIO.setwarnings(False)
with open('/sys/class/gpio/export','wb') as f:
f.write(str(LED_PIN_BCM).encode())
with open('/sys/class/gpio/gpio%s/direction'%LED_PIN_BCM,'wb') as f:
f.write(b'out')
with open('/sys/class/gpio/gpio%s/value'%LED_PIN_BCM,'wb') as f:
f.write(b'1')
with warnings.catch_warnings(record=True) as w:
GPIO.setup(LED_PIN, GPIO.OUT) # generate 'already in use' warning
self.assertEqual(len(w),0) # should be no warnings
with open('/sys/class/gpio/unexport','wb') as f:
f.write(str(LED_PIN_BCM).encode())
GPIO.cleanup()
GPIO.setwarnings(True)
with open('/sys/class/gpio/export','wb') as f:
f.write(str(LED_PIN_BCM).encode())
with open('/sys/class/gpio/gpio%s/direction'%LED_PIN_BCM,'wb') as f:
f.write(b'out')
with open('/sys/class/gpio/gpio%s/value'%LED_PIN_BCM,'wb') as f:
f.write(b'1')
with warnings.catch_warnings(record=True) as w:
GPIO.setup(LED_PIN, GPIO.OUT) # generate 'already in use' warning
self.assertEqual(w[0].category, RuntimeWarning)
with open('/sys/class/gpio/unexport','wb') as f:
f.write(str(LED_PIN_BCM).encode())
GPIO.cleanup()
def test_cleanupwarning(self):
"""Test initial GPIO.cleanup() produces warning"""
GPIO.setwarnings(False)
GPIO.setup(SWITCH_PIN, GPIO.IN)
with warnings.catch_warnings(record=True) as w:
GPIO.cleanup()
self.assertEqual(len(w),0) # no warnings
GPIO.cleanup()
self.assertEqual(len(w),0) # no warnings
GPIO.setwarnings(True)
GPIO.setup(SWITCH_PIN, GPIO.IN)
with warnings.catch_warnings(record=True) as w:
GPIO.cleanup()
self.assertEqual(len(w),0) # no warnings
GPIO.cleanup()
self.assertEqual(w[0].category, RuntimeWarning) # a warning
class TestVersions(unittest.TestCase):
def test_rpi_revision(self):
if GPIO.RPI_REVISION == 0:
revision = 'Compute Module'
elif GPIO.RPI_REVISION == 1:
revision = 'revision 1'
elif GPIO.RPI_REVISION == 2:
revision = 'revision 2'
elif GPIO.RPI_REVISION == 3:
revision = 'Model B+'
else:
revision = '**undetected**'
response = raw_input('\nThis board appears to be a %s - is this correct (y/n) ? '%revision).upper()
self.assertEqual(response, 'Y')
def test_gpio_version(self):
response = raw_input('\nRPi.GPIO version %s - is this correct (y/n) ? '%GPIO.VERSION).upper()
self.assertEqual(response, 'Y')
class TestGPIOFunction(unittest.TestCase):
def runTest(self):
GPIO.setmode(GPIO.BCM)
GPIO.setup(LED_PIN_BCM, GPIO.IN)
self.assertEqual(GPIO.gpio_function(LED_PIN_BCM), GPIO.IN)
GPIO.setup(LED_PIN_BCM, GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LED_PIN_BCM), GPIO.OUT)
GPIO.setmode(GPIO.BOARD)
GPIO.setup(LED_PIN, GPIO.IN)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.IN)
GPIO.setup(LED_PIN, GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.OUT)
def tearDown(self):
GPIO.cleanup()
class TestSwitchBounce(unittest.TestCase):
def __init__(self, *a, **k):
unittest.TestCase.__init__(self, *a, **k)
self.switchcount = 0
def cb(self,chan):
self.switchcount += 1
print 'Button press',self.switchcount
def setUp(self):
GPIO.setup(SWITCH_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)
def test_switchbounce(self):
self.switchcount = 0
print "\nSwitch bounce test. Press switch at least 10 times and count..."
GPIO.add_event_detect(SWITCH_PIN, GPIO.FALLING, callback=self.cb, bouncetime=200)
while self.switchcount < 10:
time.sleep(1)
GPIO.remove_event_detect(SWITCH_PIN)
def test_event_detected(self):
self.switchcount = 0
print "\nGPIO.event_detected() switch bounce test. Press switch at least 10 times and count..."
GPIO.add_event_detect(SWITCH_PIN, GPIO.FALLING, bouncetime=200)
while self.switchcount < 10:
if GPIO.event_detected(SWITCH_PIN):
self.switchcount += 1
print 'Button press',self.switchcount
GPIO.remove_event_detect(SWITCH_PIN)
def tearDown(self):
GPIO.cleanup()
class TestEdgeDetection(unittest.TestCase):
def setUp(self):
GPIO.setup(LOOP_IN, GPIO.IN)
GPIO.setup(LOOP_OUT, GPIO.OUT)
def testWaitForEdgeWithCallback(self):
def cb():
raise Exception("Callback should not be called")
def makehigh():
GPIO.output(LOOP_OUT, GPIO.HIGH)
GPIO.output(LOOP_OUT, GPIO.LOW)
t = Timer(0.1, makehigh)
GPIO.add_event_detect(LOOP_IN, GPIO.RISING)
t.start()
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING)
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_callback(LOOP_IN, callback=cb)
with self.assertRaises(RuntimeError):
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING)
GPIO.remove_event_detect(LOOP_IN)
def testWaitForEventSwitchbounce(self):
def bounce():
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.2)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.output(LOOP_OUT, GPIO.LOW)
t1 = Timer(0.1, bounce)
t1.start()
starttime = time.time()
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING, bouncetime=100)
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING, bouncetime=100)
finishtime = time.time()
self.assertGreater(finishtime-starttime, 0.2)
def testInvalidBouncetime(self):
with self.assertRaises(ValueError):
GPIO.add_event_detect(LOOP_IN, GPIO.RISING, bouncetime=-1)
with self.assertRaises(ValueError):
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING, bouncetime=-1)
GPIO.add_event_detect(LOOP_IN, GPIO.RISING, bouncetime=123)
with self.assertRaises(RuntimeError):
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING, bouncetime=321)
GPIO.remove_event_detect(LOOP_IN)
def testAlreadyAdded(self):
GPIO.add_event_detect(LOOP_IN, GPIO.RISING)
with self.assertRaises(RuntimeError):
GPIO.add_event_detect(LOOP_IN, GPIO.RISING)
with self.assertRaises(RuntimeError):
GPIO.wait_for_edge(LOOP_IN, GPIO.FALLING)
GPIO.remove_event_detect(LOOP_IN)
def testHighLowEvent(self):
with self.assertRaises(ValueError):
GPIO.add_event_detect(LOOP_IN, GPIO.LOW)
with self.assertRaises(ValueError):
GPIO.add_event_detect(LOOP_IN, GPIO.HIGH)
def testFallingEventDetected(self):
GPIO.output(LOOP_OUT, GPIO.HIGH)
GPIO.add_event_detect(LOOP_IN, GPIO.FALLING)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), True)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.remove_event_detect(LOOP_IN)
def testRisingEventDetected(self):
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_detect(LOOP_IN, GPIO.RISING)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), True)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.remove_event_detect(LOOP_IN)
def testBothEventDetected(self):
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_detect(LOOP_IN, GPIO.BOTH)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), True)
self.assertEqual(GPIO.event_detected(LOOP_IN), False)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.01)
self.assertEqual(GPIO.event_detected(LOOP_IN), True)
GPIO.remove_event_detect(LOOP_IN)
def testWaitForRising(self):
def makehigh():
GPIO.output(LOOP_OUT, GPIO.HIGH)
GPIO.output(LOOP_OUT, GPIO.LOW)
t = Timer(0.1, makehigh)
t.start()
GPIO.wait_for_edge(LOOP_IN, GPIO.RISING)
def testWaitForFalling(self):
def makelow():
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.output(LOOP_OUT, GPIO.HIGH)
t = Timer(0.1, makelow)
t.start()
GPIO.wait_for_edge(LOOP_IN, GPIO.FALLING)
def testExceptionInCallback(self):
self.run_cb = False
def cb(channel):
with self.assertRaises(ZeroDivisionError):
self.run_cb = True
a = 1/0
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_detect(LOOP_IN, GPIO.RISING, callback=cb)
time.sleep(0.01)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.01)
self.assertEqual(self.run_cb, True)
GPIO.remove_event_detect(LOOP_IN)
def testAddEventCallback(self):
def cb(channel):
self.callback_count += 1
# falling test
self.callback_count = 0
GPIO.output(LOOP_OUT, GPIO.HIGH)
GPIO.add_event_detect(LOOP_IN, GPIO.FALLING)
GPIO.add_event_callback(LOOP_IN, cb)
time.sleep(0.01)
for i in range(2048):
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.001)
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.001)
GPIO.remove_event_detect(LOOP_IN)
self.assertEqual(self.callback_count, 2048)
# rising test
self.callback_count = 0
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_detect(LOOP_IN, GPIO.RISING, callback=cb)
time.sleep(0.01)
for i in range(2048):
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.001)
GPIO.remove_event_detect(LOOP_IN)
self.assertEqual(self.callback_count, 2048)
# both test
self.callback_count = 0
GPIO.output(LOOP_OUT, GPIO.LOW)
GPIO.add_event_detect(LOOP_IN, GPIO.BOTH, callback=cb)
time.sleep(0.01)
for i in range(2048):
GPIO.output(LOOP_OUT, GPIO.HIGH)
time.sleep(0.001)
GPIO.output(LOOP_OUT, GPIO.LOW)
time.sleep(0.001)
GPIO.remove_event_detect(LOOP_IN)
self.assertEqual(self.callback_count, 4096)
def testEventOnOutput(self):
with self.assertRaises(RuntimeError):
GPIO.add_event_detect(LOOP_OUT, GPIO.FALLING)
def tearDown(self):
GPIO.cleanup()
class TestCleanup(unittest.TestCase):
def test_cleanall(self):
GPIO.setup(LOOP_OUT, GPIO.OUT)
GPIO.setup(LED_PIN, GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LOOP_OUT), GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.OUT)
GPIO.cleanup()
self.assertEqual(GPIO.gpio_function(LOOP_OUT), GPIO.IN)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.IN)
def test_cleanone(self):
GPIO.setup(LOOP_OUT, GPIO.OUT)
GPIO.setup(LED_PIN, GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LOOP_OUT), GPIO.OUT)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.OUT)
GPIO.cleanup(LOOP_OUT)
self.assertEqual(GPIO.gpio_function(LOOP_OUT), GPIO.IN)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.OUT)
GPIO.cleanup(LED_PIN)
self.assertEqual(GPIO.gpio_function(LOOP_OUT), GPIO.IN)
self.assertEqual(GPIO.gpio_function(LED_PIN), GPIO.IN)
#def test_suite():
# suite = unittest.TestLoader().loadTestsFromModule()
# return suite
if __name__ == '__main__':
unittest.main()
| NeoBelerophon/Arietta.GPIO | test/test.py | Python | mit | 18,250 |
#!/usr/bin/env python
#
# Common client code
#
# Copyright 2016 Markus Zoeller
import os
from launchpadlib.launchpad import Launchpad
def get_project_client(project_name):
cachedir = os.path.expanduser("~/.launchpadlib/cache/")
if not os.path.exists(cachedir):
os.makedirs(cachedir, 0o700)
launchpad = Launchpad.login_anonymously(project_name + '-bugs',
'production', cachedir)
project = launchpad.projects[project_name]
return project
def remove_first_line(invalid_json):
return '\n'.join(invalid_json.split('\n')[1:])
class BugReport(object):
def __init__(self, link, title, age):
self.link = link
self.title = title.encode('ascii', 'replace')
self.age = age
def __str__(self):
data = {'link': self.link, 'title': self.title, 'age': self.age}
return "{link} ({title}) - ({age} days)".format(**data)
def __cmp__(self, other):
return cmp(self.age, other.age) | mzoellerGit/openstack | scripts/launchpad/common.py | Python | mit | 1,002 |