code stringlengths 2 1.05M | repo_name stringlengths 5 104 | path stringlengths 4 251 | language stringclasses 1 value | license stringclasses 15 values | size int32 2 1.05M |
|---|---|---|---|---|---|
'''
This file contains the manually chosen admin forms, as needed for an easy-to-use
editor.
'''
from django.contrib import admin
from django.conf import settings
from metashare.repository.editor import admin_site as editor_site
from metashare.repository.editor.resource_editor import ResourceModelAdmin, \
LicenceModelAdmin
from metashare.repository.editor.superadmin import SchemaModelAdmin
from metashare.repository.models import resourceInfoType_model, \
identificationInfoType_model, metadataInfoType_model, \
communicationInfoType_model, validationInfoType_model, \
relationInfoType_model, foreseenUseInfoType_model, \
corpusMediaTypeType_model, corpusTextInfoType_model, \
corpusVideoInfoType_model, textNumericalFormatInfoType_model, \
videoClassificationInfoType_model, imageClassificationInfoType_model, \
participantInfoType_model, corpusAudioInfoType_model, \
corpusImageInfoType_model, corpusTextNumericalInfoType_model, \
corpusTextNgramInfoType_model, languageDescriptionInfoType_model, \
languageDescriptionTextInfoType_model, actualUseInfoType_model, \
languageDescriptionVideoInfoType_model, \
languageDescriptionImageInfoType_model, \
lexicalConceptualResourceInfoType_model, \
lexicalConceptualResourceTextInfoType_model, \
lexicalConceptualResourceAudioInfoType_model, \
lexicalConceptualResourceVideoInfoType_model, \
lexicalConceptualResourceImageInfoType_model, toolServiceInfoType_model, \
licenceInfoType_model, personInfoType_model, projectInfoType_model, \
documentInfoType_model, organizationInfoType_model, \
documentUnstructuredString_model
from metashare.repository.editor.related_mixin import RelatedAdminMixin
from django.views.decorators.csrf import csrf_protect
from django.db import transaction
from django.utils.decorators import method_decorator
from django.contrib.admin.util import unquote
from django.core.exceptions import PermissionDenied
from django.utils.html import escape
from django.utils.encoding import force_unicode
from django.http import Http404
from django.utils.safestring import mark_safe
from django.contrib.admin import helpers
from django.utils.translation import ugettext as _
from metashare.repository.editor.related_objects import AdminRelatedInfo
csrf_protect_m = method_decorator(csrf_protect)
# Custom admin classes
class CorpusTextInfoAdmin(SchemaModelAdmin):
hidden_fields = ('back_to_corpusmediatypetype_model', )
show_tabbed_fieldsets = True
class CorpusVideoInfoAdmin(SchemaModelAdmin):
hidden_fields = ('back_to_corpusmediatypetype_model', )
show_tabbed_fieldsets = True
class GenericTabbedAdmin(SchemaModelAdmin):
show_tabbed_fieldsets = True
class LexicalConceptualResourceInfoAdmin(SchemaModelAdmin):
readonly_fields = ('lexicalConceptualResourceMediaType', )
show_tabbed_fieldsets = True
class LanguageDescriptionInfoAdmin(SchemaModelAdmin):
readonly_fields = ('languageDescriptionMediaType', )
show_tabbed_fieldsets = True
class CorpusAudioModelAdmin(SchemaModelAdmin):
show_tabbed_fieldsets = True
class PersonModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Person')
class OrganizationModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Organization')
class ProjectModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Project')
class DocumentModelAdmin(AdminRelatedInfo, SchemaModelAdmin):
exclude = ('source_url', 'copy_status')
list_display = ('instance_data', 'num_related_resources', 'related_resources')
def instance_data(self, obj):
return obj.__unicode__()
instance_data.short_description = _('Document')
class DocumentUnstructuredStringModelAdmin(admin.ModelAdmin, RelatedAdminMixin):
def response_change(self, request, obj):
'''
Response sent after a successful submission of a change form.
We customize this to allow closing edit popups in the same way
as response_add deals with add popups.
'''
if '_popup_o2m' in request.REQUEST:
caller = None
if '_caller' in request.REQUEST:
caller = request.REQUEST['_caller']
return self.edit_response_close_popup_magic_o2m(obj, caller)
if '_popup' in request.REQUEST:
if request.POST.has_key("_continue"):
return self.save_and_continue_in_popup(obj, request)
return self.edit_response_close_popup_magic(obj)
else:
return super(DocumentUnstructuredStringModelAdmin, self).response_change(request, obj)
@csrf_protect_m
@transaction.commit_on_success
def change_view(self, request, object_id, extra_context=None):
"""
The 'change' admin view for this model.
This follows closely the base implementation from Django 1.3's
django.contrib.admin.options.ModelAdmin,
with the explicitly marked modifications.
"""
# pylint: disable-msg=C0103
model = self.model
opts = model._meta
obj = self.get_object(request, unquote(object_id))
if not self.has_change_permission(request, obj):
raise PermissionDenied
#### begin modification ####
# make sure that the user has a full session length time for the current
# edit activity
request.session.set_expiry(settings.SESSION_COOKIE_AGE)
#### end modification ####
if obj is None:
raise Http404(_('%(name)s object with primary key %(key)r does not exist.') % {'name': force_unicode(opts.verbose_name), 'key': escape(object_id)})
if request.method == 'POST' and "_saveasnew" in request.POST:
return self.add_view(request, form_url='../add/')
ModelForm = self.get_form(request, obj)
formsets = []
if request.method == 'POST':
form = ModelForm(request.POST, request.FILES, instance=obj)
if form.is_valid():
form_validated = True
new_object = self.save_form(request, form, change=True)
else:
form_validated = False
new_object = obj
if form_validated:
#### begin modification ####
self.save_model(request, new_object, form, change=True)
#### end modification ####
change_message = self.construct_change_message(request, form, formsets)
self.log_change(request, new_object, change_message)
return self.response_change(request, new_object)
else:
form = ModelForm(instance=obj)
#### begin modification ####
media = self.media or []
#### end modification ####
inline_admin_formsets = []
#### begin modification ####
adminForm = helpers.AdminForm(form, self.get_fieldsets(request, obj),
self.prepopulated_fields, self.get_readonly_fields(request, obj),
model_admin=self)
media = media + adminForm.media
#### end modification ####
context = {
'title': _('Change %s') % force_unicode(opts.verbose_name),
'adminform': adminForm,
'object_id': object_id,
'original': obj,
'is_popup': "_popup" in request.REQUEST or \
"_popup_o2m" in request.REQUEST,
'media': mark_safe(media),
'inline_admin_formsets': inline_admin_formsets,
'errors': helpers.AdminErrorList(form, formsets),
'root_path': self.admin_site.root_path,
'app_label': opts.app_label,
'kb_link': settings.KNOWLEDGE_BASE_URL,
'comp_name': _('%s') % force_unicode(opts.verbose_name),
}
context.update(extra_context or {})
return self.render_change_form(request, context, change=True, obj=obj)
# Models which are always rendered inline so they don't need their own admin form:
purely_inline_models = (
actualUseInfoType_model,
identificationInfoType_model,
metadataInfoType_model,
communicationInfoType_model,
validationInfoType_model,
relationInfoType_model,
foreseenUseInfoType_model,
corpusMediaTypeType_model,
textNumericalFormatInfoType_model,
videoClassificationInfoType_model,
imageClassificationInfoType_model,
participantInfoType_model,
)
custom_admin_classes = {
resourceInfoType_model: ResourceModelAdmin,
corpusAudioInfoType_model: CorpusAudioModelAdmin,
corpusTextInfoType_model: CorpusTextInfoAdmin,
corpusVideoInfoType_model: CorpusVideoInfoAdmin,
corpusImageInfoType_model: GenericTabbedAdmin,
corpusTextNumericalInfoType_model: GenericTabbedAdmin,
corpusTextNgramInfoType_model: GenericTabbedAdmin,
languageDescriptionInfoType_model: LanguageDescriptionInfoAdmin,
languageDescriptionTextInfoType_model: GenericTabbedAdmin,
languageDescriptionVideoInfoType_model: GenericTabbedAdmin,
languageDescriptionImageInfoType_model: GenericTabbedAdmin,
lexicalConceptualResourceInfoType_model: LexicalConceptualResourceInfoAdmin,
lexicalConceptualResourceTextInfoType_model: GenericTabbedAdmin,
lexicalConceptualResourceAudioInfoType_model: GenericTabbedAdmin,
lexicalConceptualResourceVideoInfoType_model: GenericTabbedAdmin,
lexicalConceptualResourceImageInfoType_model: GenericTabbedAdmin,
toolServiceInfoType_model: GenericTabbedAdmin,
licenceInfoType_model: LicenceModelAdmin,
personInfoType_model: PersonModelAdmin,
organizationInfoType_model: OrganizationModelAdmin,
projectInfoType_model: ProjectModelAdmin,
documentInfoType_model: DocumentModelAdmin,
documentUnstructuredString_model: DocumentUnstructuredStringModelAdmin,
}
def register():
'''
Manual improvements over the automatically generated admin registration.
This presupposes the automatic parts have already been run.
'''
for model in purely_inline_models:
admin.site.unregister(model)
for modelclass, adminclass in custom_admin_classes.items():
admin.site.unregister(modelclass)
admin.site.register(modelclass, adminclass)
# And finally, make sure that our editor has the exact same model/admin pairs registered:
for modelclass, adminobject in admin.site._registry.items():
editor_site.register(modelclass, adminobject.__class__)
| JuliBakagianni/CEF-ELRC | metashare/repository/editor/manual_admin_registration.py | Python | bsd-3-clause | 11,138 |
from decimal import Decimal
import random
import hashlib
from django.conf import settings
from django.core.cache import cache
from django.db.models.signals import post_save, post_delete, m2m_changed
from waffle.models import Flag, Sample, Switch
VERSION = (0, 9, 2)
__version__ = '.'.join(map(str, VERSION))
CACHE_PREFIX = getattr(settings, 'WAFFLE_CACHE_PREFIX', u'waffle:')
FLAG_CACHE_KEY = u'flag:%s'
FLAGS_ALL_CACHE_KEY = u'flags:all'
FLAG_USERS_CACHE_KEY = u'flag:%s:users'
FLAG_GROUPS_CACHE_KEY = u'flag:%s:groups'
SAMPLE_CACHE_KEY = u'sample:%s'
SAMPLES_ALL_CACHE_KEY = u'samples:all'
SWITCH_CACHE_KEY = u'switch:%s'
SWITCHES_ALL_CACHE_KEY = u'switches:all'
COOKIE_NAME = getattr(settings, 'WAFFLE_COOKIE', 'dwf_%s')
TEST_COOKIE_NAME = getattr(settings, 'WAFFLE_TESTING_COOKIE', 'dwft_%s')
def keyfmt(k, v=None):
if v is None:
return CACHE_PREFIX + k
return CACHE_PREFIX + hashlib.md5(k % v).hexdigest()
class DoesNotExist(object):
"""The record does not exist."""
@property
def active(self):
return getattr(settings, 'WAFFLE_SWITCH_DEFAULT', False)
def set_flag(request, flag_name, active=True, session_only=False):
"""Set a flag value on a request object."""
if not hasattr(request, 'waffles'):
request.waffles = {}
request.waffles[flag_name] = [active, session_only]
def flag_is_active(request, flag_name):
flag = cache.get(keyfmt(FLAG_CACHE_KEY, flag_name))
if flag is None:
try:
flag = Flag.objects.get(name=flag_name)
cache_flag(instance=flag)
except Flag.DoesNotExist:
return getattr(settings, 'WAFFLE_FLAG_DEFAULT', False)
if getattr(settings, 'WAFFLE_OVERRIDE', False):
if flag_name in request.GET:
return request.GET[flag_name] == '1'
if flag.everyone:
return True
elif flag.everyone is False:
return False
if flag.testing: # Testing mode is on.
tc = TEST_COOKIE_NAME % flag_name
if tc in request.GET:
on = request.GET[tc] == '1'
if not hasattr(request, 'waffle_tests'):
request.waffle_tests = {}
request.waffle_tests[flag_name] = on
return on
if tc in request.COOKIES:
return request.COOKIES[tc] == 'True'
user = request.user
if flag.authenticated and user.is_authenticated():
return True
if flag.staff and user.is_staff:
return True
if flag.superusers and user.is_superuser:
return True
if flag.languages:
languages = flag.languages.split(',')
if (hasattr(request, 'LANGUAGE_CODE') and
request.LANGUAGE_CODE in languages):
return True
flag_users = cache.get(keyfmt(FLAG_USERS_CACHE_KEY, flag.name))
if flag_users is None:
flag_users = flag.users.all()
cache_flag(instance=flag)
if user in flag_users:
return True
flag_groups = cache.get(keyfmt(FLAG_GROUPS_CACHE_KEY, flag.name))
if flag_groups is None:
flag_groups = flag.groups.all()
cache_flag(instance=flag)
user_groups = user.groups.all()
for group in flag_groups:
if group in user_groups:
return True
if flag.percent > 0:
if not hasattr(request, 'waffles'):
request.waffles = {}
elif flag_name in request.waffles:
return request.waffles[flag_name][0]
cookie = COOKIE_NAME % flag_name
if cookie in request.COOKIES:
flag_active = (request.COOKIES[cookie] == 'True')
set_flag(request, flag_name, flag_active, flag.rollout)
return flag_active
if Decimal(str(random.uniform(0, 100))) <= flag.percent:
set_flag(request, flag_name, True, flag.rollout)
return True
set_flag(request, flag_name, False, flag.rollout)
return False
def switch_is_active(switch_name):
switch = cache.get(keyfmt(SWITCH_CACHE_KEY, switch_name))
if switch is None:
try:
switch = Switch.objects.get(name=switch_name)
cache_switch(instance=switch)
except Switch.DoesNotExist:
switch = DoesNotExist()
switch.name = switch_name
cache_switch(instance=switch)
return switch.active
def sample_is_active(sample_name):
sample = cache.get(keyfmt(SAMPLE_CACHE_KEY, sample_name))
if sample is None:
try:
sample = Sample.objects.get(name=sample_name)
cache_sample(instance=sample)
except Sample.DoesNotExist:
return getattr(settings, 'WAFFLE_SAMPLE_DEFAULT', False)
return Decimal(str(random.uniform(0, 100))) <= sample.percent
def cache_flag(**kwargs):
action = kwargs.get('action', None)
# action is included for m2m_changed signal. Only cache on the post_*.
if not action or action in ['post_add', 'post_remove', 'post_clear']:
f = kwargs.get('instance')
cache.add(keyfmt(FLAG_CACHE_KEY, f.name), f)
cache.add(keyfmt(FLAG_USERS_CACHE_KEY, f.name), f.users.all())
cache.add(keyfmt(FLAG_GROUPS_CACHE_KEY, f.name), f.groups.all())
def uncache_flag(**kwargs):
flag = kwargs.get('instance')
data = {
keyfmt(FLAG_CACHE_KEY, flag.name): None,
keyfmt(FLAG_USERS_CACHE_KEY, flag.name): None,
keyfmt(FLAG_GROUPS_CACHE_KEY, flag.name): None,
keyfmt(FLAGS_ALL_CACHE_KEY): None
}
cache.set_many(data, 5)
post_save.connect(uncache_flag, sender=Flag, dispatch_uid='save_flag')
post_delete.connect(uncache_flag, sender=Flag, dispatch_uid='delete_flag')
m2m_changed.connect(uncache_flag, sender=Flag.users.through,
dispatch_uid='m2m_flag_users')
m2m_changed.connect(uncache_flag, sender=Flag.groups.through,
dispatch_uid='m2m_flag_groups')
def cache_sample(**kwargs):
sample = kwargs.get('instance')
cache.add(keyfmt(SAMPLE_CACHE_KEY, sample.name), sample)
def uncache_sample(**kwargs):
sample = kwargs.get('instance')
cache.set(keyfmt(SAMPLE_CACHE_KEY, sample.name), None, 5)
cache.set(keyfmt(SAMPLES_ALL_CACHE_KEY), None, 5)
post_save.connect(uncache_sample, sender=Sample, dispatch_uid='save_sample')
post_delete.connect(uncache_sample, sender=Sample,
dispatch_uid='delete_sample')
def cache_switch(**kwargs):
switch = kwargs.get('instance')
cache.add(keyfmt(SWITCH_CACHE_KEY, switch.name), switch)
def uncache_switch(**kwargs):
switch = kwargs.get('instance')
cache.set(keyfmt(SWITCH_CACHE_KEY, switch.name), None, 5)
cache.set(keyfmt(SWITCHES_ALL_CACHE_KEY), None, 5)
post_delete.connect(uncache_switch, sender=Switch,
dispatch_uid='delete_switch')
post_save.connect(uncache_switch, sender=Switch, dispatch_uid='save_switch')
| ekohl/django-waffle | waffle/__init__.py | Python | bsd-3-clause | 6,842 |
import re
from setuptools import setup, find_packages
__version__ = re.search(r"__version__.*\s*=\s*[']([^']+)[']",
open('dateparser/__init__.py').read()).group(1)
introduction = re.sub(r':members:.+|..\sautomodule::.+|:class:|:func:|:ref:',
'', open('docs/introduction.rst').read())
history = re.sub(r':mod:|:class:|:func:', '', open('HISTORY.rst').read())
test_requirements = open('tests/requirements.txt').read().splitlines()
setup(
name='dateparser',
version=__version__,
description='Date parsing library designed to parse dates from HTML pages',
long_description=introduction + '\n\n' + history,
author='Scrapinghub',
author_email='info@scrapinghub.com',
url='https://github.com/scrapinghub/dateparser',
project_urls={
'History': 'https://dateparser.readthedocs.io/en/latest/history.html',
},
packages=find_packages(exclude=('tests', 'tests.*')),
include_package_data=True,
install_requires=[
'python-dateutil',
'pytz',
# https://bitbucket.org/mrabarnett/mrab-regex/issues/314/import-error-no-module-named
'regex !=2019.02.19,!=2021.8.27',
'tzlocal',
],
entry_points={
'console_scripts': ['dateparser-download = dateparser_cli.cli:entrance'],
},
extras_require={
'calendars:python_version<"3.6"': ['convertdate'],
'calendars:python_version>="3.6"': ['hijri-converter', 'convertdate'],
'fasttext': ['fasttext'],
'langdetect': ['langdetect'],
},
license="BSD",
zip_safe=False,
keywords='dateparser',
python_requires='>=3.5',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: Implementation :: CPython',
],
)
| scrapinghub/dateparser | setup.py | Python | bsd-3-clause | 2,228 |
from django.db import models
from django.utils.translation import ugettext as _
class Category(models.Model):
title = models.CharField(max_length=50)
slug = models.SlugField()
order = models.IntegerField(default=1)
class Meta:
verbose_name = _('Category')
verbose_name_plural = _('Categories')
ordering = ['order']
def __unicode__(self):
return self.title
class WMSServer(models.Model):
title = models.CharField(max_length=50)
url = models.URLField()
attribution = models.CharField(max_length=50)
class Meta:
verbose_name = _('WMSServer')
verbose_name_plural = _('WMSServers')
def __unicode__(self):
return self.title
class Layer(models.Model):
WMS_FORMAT_OPTIONS = (
('image/png', 'image/png'),
('image/jpeg', 'image/jpeg'),
)
title = models.CharField(max_length=100)
category = models.ForeignKey(Category)
visible = models.BooleanField()
category_order = models.IntegerField(default=1)
map_order = models.IntegerField(default=1)
wms_server = models.ForeignKey(WMSServer)
wms_layers = models.CharField(max_length=100)
wms_styles = models.CharField(max_length=100, null=True, blank=True)
wms_format = models.CharField(max_length=10, choices=WMS_FORMAT_OPTIONS, default='image/png')
wms_transparent = models.BooleanField(default=True)
class Meta:
verbose_name = _('Layer')
verbose_name_plural = _('Layers')
ordering = ['category_order']
def __unicode__(self):
return self.title
| MAPC/cedac | map/models.py | Python | bsd-3-clause | 1,599 |
#### PATTERN #######################################################################################
# Authors: Tom De Smedt <tom@organisms.be>, Walter Daelemans <walter.daelemans@ua.ac.be>
# License: BSD License, see LICENSE.txt
# Copyright (c) 2010 University of Antwerp, Belgium
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Pattern nor the names of its
# contributors may be used to endorse or promote products
# derived from this software without specific prior written
# permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# CLiPS Computational Linguistics Group, University of Antwerp, Belgium
# http://www.clips.ua.ac.be/pages/pattern
### CREDITS ########################################################################################
__author__ = "Tom De Smedt"
__credits__ = "Tom De Smedt, Walter Daelemans"
__version__ = "2.6"
__copyright__ = "Copyright (c) 2010 University of Antwerp (BE)"
__license__ = "BSD"
####################################################################################################
import os
# Shortcuts to pattern.en, pattern.es, ...
# (instead of pattern.text.en, pattern.text.es, ...)
try: __path__.append(os.path.join(__path__[0], "text"))
except:
pass | EricSchles/pattern | pattern/__init__.py | Python | bsd-3-clause | 2,541 |
#! -*- coding: utf-8 -*-
"""
Retrieval of version number
This file helps to compute a version number in source trees obtained from
git-archive tarball (such as those provided by githubs download-from-tag
feature). Distribution tarballs (built by setup.py sdist) and build
directories (produced by setup.py build) will contain a much shorter file
that just contains the computed version number.
This file was generated by PyScaffold.
"""
import inspect
import os
import re
import subprocess
import sys
__location__ = os.path.join(os.getcwd(), os.path.dirname(
inspect.getfile(inspect.currentframe())))
# these strings will be replaced by git during git-archive
git_refnames = "$Format:%d$"
git_full = "$Format:%H$"
# general settings
tag_prefix = 'v' # tags are like v1.2.0
package = "bonfire"
namespace = []
root_pkg = namespace[0] if namespace else package
if namespace:
pkg_path = os.path.join(*namespace[-1].split('.') + [package])
else:
pkg_path = package
class ShellCommand(object):
def __init__(self, command, shell=True, cwd=None):
self._command = command
self._shell = shell
self._cwd = cwd
def __call__(self, *args):
command = "{cmd} {args}".format(cmd=self._command,
args=subprocess.list2cmdline(args))
output = subprocess.check_output(command,
shell=self._shell,
cwd=self._cwd,
stderr=subprocess.STDOUT,
universal_newlines=True)
return self._yield_output(output)
def _yield_output(self, msg):
for line in msg.splitlines():
yield line
def get_git_cmd(**args):
if sys.platform == "win32":
for cmd in ["git.cmd", "git.exe"]:
git = ShellCommand(cmd, **args)
try:
git("--version")
except (subprocess.CalledProcessError, OSError):
continue
return git
return None
else:
git = ShellCommand("git", **args)
try:
git("--version")
except (subprocess.CalledProcessError, OSError):
return None
return git
def version_from_git(tag_prefix, root, verbose=False):
# this runs 'git' from the root of the source tree. This only gets called
# if the git-archive 'subst' keywords were *not* expanded, and
# _version.py hasn't already been rewritten with a short version string,
# meaning we're inside a checked out source tree.
git = get_git_cmd(cwd=root)
if not git:
print("no git found")
return None
try:
tag = next(git("describe", "--tags", "--dirty", "--always"))
except subprocess.CalledProcessError:
return None
if not tag.startswith(tag_prefix):
if verbose:
print("tag '{}' doesn't start with prefix '{}'".format(tag,
tag_prefix))
return None
tag = tag[len(tag_prefix):]
sha1 = next(git("rev-parse", "HEAD"))
full = sha1.strip()
if tag.endswith("-dirty"):
full += "-dirty"
return {"version": tag, "full": full}
def get_keywords(versionfile_abs):
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = dict()
try:
with open(versionfile_abs, "r") as fh:
for line in fh.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
except EnvironmentError:
return None
return keywords
def version_from_keywords(keywords, tag_prefix, verbose=False):
if not keywords:
return None # keyword-finding function failed to find keywords
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
return None # unexpanded, so not in an unpacked git-archive tarball
refs = set([r.strip() for r in refnames.strip("()").split(",")])
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '{}', no digits".format(",".join(refs-tags)))
if verbose:
print("likely tags: {}".format(",".join(sorted(tags))))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking {}".format(r))
return {"version": r,
"full": keywords["full"].strip()}
else:
if verbose:
print("no suitable tags, using full revision id")
return {"version": keywords["full"].strip(),
"full": keywords["full"].strip()}
def version_from_parentdir(parentdir_prefix, root, verbose=False):
# Source tarballs conventionally unpack into a directory that includes
# both the project name and a version string.
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '{}', but '{}' doesn't start with "
"prefix '{}'".format(root, dirname, parentdir_prefix))
return None
version = dirname[len(parentdir_prefix):].split('-')[0]
return {"version": version, "full": ""}
def git2pep440(ver_str):
dash_count = ver_str.count('-')
if dash_count == 0:
return ver_str
elif dash_count == 1:
return ver_str.split('-')[0] + "+dirty"
elif dash_count == 2:
tag, commits, sha1 = ver_str.split('-')
return "{}.post0.dev{}+{}".format(tag, commits, sha1)
elif dash_count == 3:
tag, commits, sha1, _ = ver_str.split('-')
return "{}.post0.dev{}+{}.dirty".format(tag, commits, sha1)
else:
raise RuntimeError("Invalid version string")
def get_versions(verbose=False):
vcs_kwds = {"refnames": git_refnames, "full": git_full}
parentdir = package + '-'
root = __location__
# pkg_path is the relative path from the top of the source
# tree (where the .git directory might live) to this file.
# Invert this to find the root of our package.
for _ in pkg_path.split(os.sep):
root = os.path.dirname(root)
# different version retrieval methods as (method, args, comment)
ver_retrieval = [
(version_from_keywords, (vcs_kwds, tag_prefix, verbose),
'expanded keywords'),
(version_from_parentdir, (parentdir, root, verbose), 'parentdir'),
(version_from_git, (tag_prefix, root, verbose), 'git')
]
for method, args, comment in ver_retrieval:
ver = method(*args)
if ver:
if verbose:
print("got version from {}".format(comment))
break
else:
ver = {"version": "unknown", "full": ""}
ver['version'] = git2pep440(ver['version'])
return ver
| blue-yonder/bonfire | bonfire/_version.py | Python | bsd-3-clause | 8,274 |
#!/usr/bin/python2
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Tests of directory storage adapter."""
import os
import unittest
import directory_storage
import fake_storage
import gsd_storage
import hashing_tools
import hashing_tools_test
import working_directory
class TestDirectoryStorage(unittest.TestCase):
def setUp(self):
storage = fake_storage.FakeStorage()
self._dir_storage = directory_storage.DirectoryStorageAdapter(storage)
def test_WriteRead(self):
# Check that a directory can be written and then read back.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
temp2 = os.path.join(work_dir, 'temp2')
hashing_tools_test.GenerateTestTree('write_read', temp1)
self._dir_storage.PutDirectory(temp1, 'foo')
self._dir_storage.GetDirectory('foo', temp2)
self.assertEqual(hashing_tools.StableHashPath(temp1),
hashing_tools.StableHashPath(temp2))
def test_InputUntouched(self):
# Check that PutDirectory doesn't alter its inputs.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
hashing_tools_test.GenerateTestTree('input_untouched', temp1)
h1 = hashing_tools.StableHashPath(temp1)
self._dir_storage.PutDirectory(temp1, 'hello')
h2 = hashing_tools.StableHashPath(temp1)
self.assertEqual(h1, h2)
def test_URLsPropagate(self):
# Check that consistent non-None URLs come from get and put.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
temp2 = os.path.join(work_dir, 'temp2')
hashing_tools_test.GenerateTestTree('url_propagate', temp1)
url1 = self._dir_storage.PutDirectory(temp1, 'me')
url2 = self._dir_storage.GetDirectory('me', temp2)
self.assertEqual(url1, url2)
self.assertNotEqual(None, url1)
def test_BadWrite(self):
def call(cmd):
return 1
storage = gsd_storage.GSDStorage(
gsutil=['mygsutil'],
write_bucket='mybucket',
read_buckets=[],
call=call)
dir_storage = directory_storage.DirectoryStorageAdapter(storage)
# Check that storage exceptions come thru on failure.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
hashing_tools_test.GenerateTestTree('bad_write', temp1)
self.assertRaises(gsd_storage.GSDStorageError,
dir_storage.PutDirectory, temp1, 'bad')
def test_BadRead(self):
# Check that storage exceptions come thru on failure.
with working_directory.TemporaryWorkingDirectory() as work_dir:
temp1 = os.path.join(work_dir, 'temp1')
self.assertEqual(None, self._dir_storage.GetDirectory('foo', temp1))
if __name__ == '__main__':
unittest.main()
| Lind-Project/native_client | build/directory_storage_test.py | Python | bsd-3-clause | 3,028 |
# encoding=utf-8
from .types.compound import (
ModelType, EMPTY_LIST, EMPTY_DICT, MultiType
)
import collections
import itertools
###
### Field ACL's
###
class Role(collections.Set):
"""A Role object can be used to filter specific fields against a sequence.
The Role has a set of names and one function that the specific field is
filtered with.
A Role can be operated on as a Set object representing its fields. It's
important to note that when combining multiple roles using these operations
only the function of the first role is kept on the resulting role.
"""
def __init__(self, function, fields):
self.function = function
self.fields = set(fields)
def _from_iterable(self, iterable):
return Role(self.function, iterable)
def __contains__(self, value):
return value in self.fields
def __iter__(self):
return iter(self.fields)
def __len__(self):
return len(self.fields)
def __eq__(self, other):
return (self.function.func_name == other.function.func_name and
self.fields == other.fields)
def __str__(self):
return '%s(%s)' % (self.function.func_name,
', '.join("'%s'" % f for f in self.fields))
def __repr__(self):
return '<Role %s>' % str(self)
# edit role fields
def __add__(self, other):
fields = self.fields.union(other)
return self._from_iterable(fields)
def __sub__(self, other):
fields = self.fields.difference(other)
return self._from_iterable(fields)
# apply role to field
def __call__(self, k, v):
return self.function(k, v, self.fields)
# static filter functions
@staticmethod
def wholelist(k, v, seq):
return False
@staticmethod
def whitelist(k, v, seq):
if seq is not None and len(seq) > 0:
return k not in seq
# Default to rejecting the value
return True
@staticmethod
def blacklist(k, v, seq):
if seq is not None and len(seq) > 0:
return k in seq
# Default to not rejecting the value
return False
def wholelist(*field_list):
"""Returns a function that evicts nothing. Exists mainly to be an explicit
allowance of all fields instead of a using an empty blacklist.
"""
return Role(Role.wholelist, field_list)
def whitelist(*field_list):
"""Returns a function that operates as a whitelist for the provided list of
fields.
A whitelist is a list of fields explicitly named that are allowed.
"""
return Role(Role.whitelist, field_list)
def blacklist(*field_list):
"""Returns a function that operates as a blacklist for the provided list of
fields.
A blacklist is a list of fields explicitly named that are not allowed.
"""
return Role(Role.blacklist, field_list)
###
### Serialization
###
def filter_roles_instance(fields, roles):
"Skipping field not requested"
if roles:
return [i for i in fields.iteritems() if i[0] in roles.fields]
else:
return fields.iteritems()
def atoms(cls, instance_or_dict, include_serializables=True, roles=None):
"""
Iterator for the atomic components of a model definition and relevant data
that creates a threeple of the field's name, the instance of it's type, and
it's value.
"""
if include_serializables:
all_fields = itertools.chain(filter_roles_instance(cls._fields, roles),
filter_roles_instance(
cls._serializables, roles))
else:
all_fields = filter_roles_instance(cls._fields, roles)
return ((field_name, field, instance_or_dict[field_name])
for field_name, field in all_fields)
def allow_none(cls, field):
"""
Inspects a field and class for ``serialize_when_none`` setting.
The setting defaults to the value of the class. A field can override the
class setting with it's own ``serialize_when_none`` setting.
"""
allowed = cls._options.serialize_when_none
if field.serialize_when_none is not None:
allowed = field.serialize_when_none
return allowed
def apply_shape(cls, instance_or_dict, role, field_converter, model_converter,
raise_error_on_role=False, include_serializables=True):
"""
The apply shape function is intended to be a general loop definition that
can be used for any form of data shaping, such as application of roles or
how a field is transformed.
"""
data = {}
### Translate `role` into `gottago` function
gottago = wholelist()
if role in cls._options.roles:
gottago = cls._options.roles[role]
elif role and raise_error_on_role:
error_msg = u'%s has no role "%s"'
raise ValueError(error_msg % (cls, role))
### Transformation loop
attr_gen = atoms(cls, instance_or_dict, include_serializables, gottago)
for field_name, field, value in attr_gen:
serialized_name = field.serialized_name or field_name
### Value found, convert and store it.
if value is not None:
if isinstance(field, MultiType):
if isinstance(field, ModelType):
primitive_value = model_converter(field, value)
primitive_value = field.filter_by_role(value, primitive_value,
role,
include_serializables=include_serializables)
else:
primitive_value = field_converter(field, value)
primitive_value = field.filter_by_role(value, primitive_value,
role,
raise_error_on_role=raise_error_on_role)
else:
primitive_value = field_converter(field, value)
if primitive_value is not None or allow_none(cls, field):
data[serialized_name] = primitive_value
### Store None if reqeusted
elif allow_none(cls, field):
data[serialized_name] = value
return data
def serialize(instance, role, raise_error_on_role=True):
"""
Implements serialization as a mechanism to convert ``Model`` instances into
dictionaries that represent the field_names => converted data.
The conversion is done by calling ``to_primitive`` on both model and field
instances.
"""
field_converter = lambda field, value: field.to_primitive(value)
model_converter = lambda f, v: f.to_primitive(v, raise_error_on_role)
data = apply_shape(instance, instance, role, field_converter,
model_converter, raise_error_on_role)
return data
def expand(data, context=None):
expanded_dict = {}
if context is None:
context = expanded_dict
for k, v in data.iteritems():
try:
key, remaining = k.split(".", 1)
except ValueError:
if not (v in (EMPTY_DICT, EMPTY_LIST) and k in expanded_dict):
expanded_dict[k] = v
else:
current_context = context.setdefault(key, {})
if current_context in (EMPTY_DICT, EMPTY_LIST):
current_context = {}
context[key] = current_context
current_context.update(expand({remaining: v}, current_context))
return expanded_dict
def flatten_to_dict(o, prefix=None, ignore_none=True):
if hasattr(o, "iteritems"):
iterator = o.iteritems()
else:
iterator = enumerate(o)
flat_dict = {}
for k, v in iterator:
if prefix:
key = ".".join(map(unicode, (prefix, k)))
else:
key = k
if v == []:
v = EMPTY_LIST
elif v == {}:
v = EMPTY_DICT
if isinstance(v, (dict, list)):
flat_dict.update(flatten_to_dict(v, prefix=key))
elif v is not None:
flat_dict[key] = v
elif not ignore_none:
flat_dict[key] = None
return flat_dict
def flatten(instance, role, raise_error_on_role=True, ignore_none=True,
prefix=None, include_serializables=False, **kwargs):
i = include_serializables
field_converter = lambda field, value: field.to_primitive(value)
model_converter = lambda f, v: f.to_primitive(v, include_serializables=i)
data = apply_shape(instance, instance, role, field_converter,
model_converter,
include_serializables=include_serializables)
return flatten_to_dict(data, prefix=prefix, ignore_none=ignore_none)
| nKey/schematics | schematics/serialize.py | Python | bsd-3-clause | 8,765 |
# Copyright (c) 2017, The MITRE Corporation. All rights reserved.
# See LICENSE.txt for complete terms.
from mixbox import fields
from mixbox import typedlist
from mixbox import entities
# internal
import stix
import stix.bindings.stix_common as common_binding
class KillChain(stix.Entity):
__hash__ = entities.Entity.__hash__
_binding = common_binding
_namespace = 'http://stix.mitre.org/common-1'
_binding_class = _binding.KillChainType
id_ = fields.TypedField("id")
name = fields.TypedField("name")
definer = fields.TypedField("definer")
reference = fields.TypedField("reference")
number_of_phases = fields.TypedField("number_of_phases")
kill_chain_phases = fields.TypedField("Kill_Chain_Phase", type_="stix.common.kill_chains.KillChainPhase", multiple=True, key_name="kill_chain_phases")
def __init__(self, id_=None, name=None, definer=None, reference=None):
super(KillChain, self).__init__()
self.id_ = id_
self.name = name
self.definer = definer
self.reference = reference
self.number_of_phases = None # can we just do len(self.kill_chain_phases)?
def add_kill_chain_phase(self, value):
self.kill_chain_phases.append(value)
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, self.__class__):
return False
return other.to_dict() == self.to_dict()
def __ne__(self, other):
return not self.__eq__(other)
class KillChains(stix.EntityList):
_binding = common_binding
_namespace = 'http://stix.mitre.org/common-1'
_binding_class = _binding.KillChainsType
kill_chain = fields.TypedField("Kill_Chain", KillChain, multiple=True, key_name="kill_chains")
@classmethod
def _dict_as_list(cls):
return False
class KillChainPhase(stix.Entity):
__hash__ = entities.Entity.__hash__
_binding = common_binding
_namespace = 'http://stix.mitre.org/common-1'
_binding_class = _binding.KillChainPhaseType
phase_id = fields.TypedField("phase_id")
name = fields.TypedField("name")
ordinality = fields.IntegerField("ordinality")
def __init__(self, phase_id=None, name=None, ordinality=None):
super(KillChainPhase, self).__init__()
self.phase_id = phase_id
self.name = name
self.ordinality = ordinality
def __eq__(self, other):
if other is self:
return True
if not isinstance(other, KillChainPhase):
return False
return other.to_dict() == self.to_dict()
def __ne__(self, other):
return not self.__eq__(other)
class KillChainPhaseReference(KillChainPhase):
_binding = common_binding
_namespace = 'http://stix.mitre.org/common-1'
_binding_class = _binding.KillChainPhaseReferenceType
kill_chain_id = fields.TypedField("kill_chain_id")
kill_chain_name = fields.TypedField("kill_chain_name")
def __init__(self, phase_id=None, name=None, ordinality=None, kill_chain_id=None, kill_chain_name=None):
super(KillChainPhaseReference, self).__init__(phase_id, name, ordinality)
self.kill_chain_id = kill_chain_id
self.kill_chain_name = kill_chain_name
class _KillChainPhaseReferenceList(typedlist.TypedList):
def __init__(self, *args):
super(_KillChainPhaseReferenceList, self).__init__(type=KillChainPhaseReference, *args)
def _fix_value(self, value):
if not isinstance(value, KillChainPhase):
return super(_KillChainPhaseReferenceList, self)._fix_value(value)
if value.phase_id:
return KillChainPhaseReference(phase_id=value.phase_id)
raise ValueError("KillChainPhase must have a phase_id.")
class KillChainPhasesReference(stix.EntityList):
_binding = common_binding
_namespace = 'http://stix.mitre.org/common-1'
_binding_class = _binding.KillChainPhasesReferenceType
kill_chain_phase = fields.TypedField(
name="Kill_Chain_Phase",
type_=KillChainPhaseReference,
multiple=True,
listfunc=_KillChainPhaseReferenceList,
key_name="kill_chain_phases"
)
@classmethod
def _dict_as_list(cls):
return False
# NOT AN ACTUAL STIX TYPE!
class _KillChainPhases(stix.TypedList):
_contained_type = KillChainPhase
| STIXProject/python-stix | stix/common/kill_chains/__init__.py | Python | bsd-3-clause | 4,359 |
#-----------------------------------------------------------------------------
# Copyright (c) 2012 - 2020, Anaconda, Inc., and Bokeh Contributors.
# All rights reserved.
#
# The full license is in the file LICENSE.txt, distributed with this software.
#-----------------------------------------------------------------------------
'''
'''
#-----------------------------------------------------------------------------
# Boilerplate
#-----------------------------------------------------------------------------
import logging # isort:skip
log = logging.getLogger(__name__)
#-----------------------------------------------------------------------------
# Imports
#-----------------------------------------------------------------------------
# Bokeh imports
from .doc import curdoc
from .export import export_png, export_svgs
from .notebook import install_jupyter_hooks, install_notebook_hook, push_notebook
from .output import output_file, output_notebook, reset_output
from .saving import save
from .showing import show
#-----------------------------------------------------------------------------
# Globals and constants
#-----------------------------------------------------------------------------
__all__ = (
'curdoc',
'export_png',
'export_svgs',
'install_notebook_hook',
'push_notebook',
'output_file',
'output_notebook',
'save',
'show',
)
#-----------------------------------------------------------------------------
# General API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
install_jupyter_hooks()
del install_jupyter_hooks
| ericmjl/bokeh | bokeh/io/__init__.py | Python | bsd-3-clause | 2,128 |
from __future__ import unicode_literals
from django.db import models
from django.utils import timezone
class Post(models.Model):
author = models.ForeignKey('auth.User')
title = models.CharField(max_length=200)
text = models.TextField()
created_date = models.DateTimeField(
default=timezone.now)
published_date = models.DateTimeField(
blank=True, null=True)
def published (self):
self.published_date = timezone.now()
self.save()
def __str__(self):
return self.title
| curlyjr25/FrogWebsite | frogs/apps/frogblog/models.py | Python | bsd-3-clause | 521 |
def get_all_styles():
"""
Returns previously registered by richtemplates at
``richtemplates.settings.REGISTERED_PYGMENTS_STYLES``.
"""
from richtemplates.settings import REGISTERED_PYGMENTS_STYLES
return REGISTERED_PYGMENTS_STYLES
def get_style(alias):
"""
Returns pygments style class. Available styles may be retrieved using
``get_all_styles`` method.
"""
return get_all_styles()[alias]
| lukaszb/django-richtemplates | richtemplates/pygstyles/__init__.py | Python | bsd-3-clause | 437 |
# Copyright (c) Jupyter Development Team.
# Distributed under the terms of the Modified BSD License.
#
#
# Parts of this code is from IPyVolume (24.05.2017), used here under
# this copyright and license with permission from the author
# (see https://github.com/jupyter-widgets/ipywidgets/pull/1387)
"""
Functions for generating embeddable HTML/javascript of a widget.
"""
import json
import re
from .widgets import Widget, DOMWidget
from .widgets.widget_link import Link
from .widgets.docutils import doc_subst
from ._version import __html_manager_version__
snippet_template = u"""
{load}
<script type="application/vnd.jupyter.widget-state+json">
{json_data}
</script>
{widget_views}
"""
load_template = u"""<script src="{embed_url}"{use_cors}></script>"""
load_requirejs_template = u"""
<!-- Load require.js. Delete this if your page already loads require.js -->
<script src="https://cdnjs.cloudflare.com/ajax/libs/require.js/2.3.4/require.min.js" integrity="sha256-Ae2Vz/4ePdIu6ZyI/5ZGsYnb+m0JlOmKPjt6XZ9JJkA=" crossorigin="anonymous"></script>
<script src="{embed_url}"{use_cors}></script>
"""
requirejs_snippet_template = u"""
<script type="application/vnd.jupyter.widget-state+json">
{json_data}
</script>
{widget_views}
"""
html_template = u"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{title}</title>
</head>
<body>
{snippet}
</body>
</html>
"""
widget_view_template = u"""<script type="application/vnd.jupyter.widget-view+json">
{view_spec}
</script>"""
DEFAULT_EMBED_SCRIPT_URL = u'https://unpkg.com/@jupyter-widgets/html-manager@%s/dist/embed.js'%__html_manager_version__
DEFAULT_EMBED_REQUIREJS_URL = u'https://unpkg.com/@jupyter-widgets/html-manager@%s/dist/embed-amd.js'%__html_manager_version__
_doc_snippets = {}
_doc_snippets['views_attribute'] = """
views: widget or collection of widgets or None
The widgets to include views for. If None, all DOMWidgets are
included (not just the displayed ones).
"""
_doc_snippets['embed_kwargs'] = """
drop_defaults: boolean
Whether to drop default values from the widget states.
state: dict or None (default)
The state to include. When set to None, the state of all widgets
know to the widget manager is included. Otherwise it uses the
passed state directly. This allows for end users to include a
smaller state, under the responsibility that this state is
sufficient to reconstruct the embedded views.
indent: integer, string or None
The indent to use for the JSON state dump. See `json.dumps` for
full description.
embed_url: string or None
Allows for overriding the URL used to fetch the widget manager
for the embedded code. This defaults (None) to an `unpkg` CDN url.
requirejs: boolean (True)
Enables the requirejs-based embedding, which allows for custom widgets.
If True, the embed_url should point to an AMD module.
cors: boolean (True)
If True avoids sending user credentials while requesting the scripts.
When opening an HTML file from disk, some browsers may refuse to load
the scripts.
"""
def _find_widget_refs_by_state(widget, state):
"""Find references to other widgets in a widget's state"""
# Copy keys to allow changes to state during iteration:
keys = tuple(state.keys())
for key in keys:
value = getattr(widget, key)
# Trivial case: Direct references to other widgets:
if isinstance(value, Widget):
yield value
# Also check for buried references in known, JSON-able structures
# Note: This might miss references buried in more esoteric structures
elif isinstance(value, (list, tuple)):
for item in value:
if isinstance(item, Widget):
yield item
elif isinstance(value, dict):
for item in value.values():
if isinstance(item, Widget):
yield item
def _get_recursive_state(widget, store=None, drop_defaults=False):
"""Gets the embed state of a widget, and all other widgets it refers to as well"""
if store is None:
store = dict()
state = widget._get_embed_state(drop_defaults=drop_defaults)
store[widget.model_id] = state
# Loop over all values included in state (i.e. don't consider excluded values):
for ref in _find_widget_refs_by_state(widget, state['state']):
if ref.model_id not in store:
_get_recursive_state(ref, store, drop_defaults=drop_defaults)
return store
def add_resolved_links(store, drop_defaults):
"""Adds the state of any link models between two models in store"""
for widget_id, widget in Widget.widgets.items(): # go over all widgets
if isinstance(widget, Link) and widget_id not in store:
if widget.source[0].model_id in store and widget.target[0].model_id in store:
store[widget.model_id] = widget._get_embed_state(drop_defaults=drop_defaults)
def dependency_state(widgets, drop_defaults=True):
"""Get the state of all widgets specified, and their dependencies.
This uses a simple dependency finder, including:
- any widget directly referenced in the state of an included widget
- any widget in a list/tuple attribute in the state of an included widget
- any widget in a dict attribute in the state of an included widget
- any jslink/jsdlink between two included widgets
What this alogrithm does not do:
- Find widget references in nested list/dict structures
- Find widget references in other types of attributes
Note that this searches the state of the widgets for references, so if
a widget reference is not included in the serialized state, it won't
be considered as a dependency.
Parameters
----------
widgets: single widget or list of widgets.
This function will return the state of every widget mentioned
and of all their dependencies.
drop_defaults: boolean
Whether to drop default values from the widget states.
Returns
-------
A dictionary with the state of the widgets and any widget they
depend on.
"""
# collect the state of all relevant widgets
if widgets is None:
# Get state of all widgets, no smart resolution needed.
state = Widget.get_manager_state(drop_defaults=drop_defaults, widgets=None)['state']
else:
try:
widgets[0]
except (IndexError, TypeError):
widgets = [widgets]
state = {}
for widget in widgets:
_get_recursive_state(widget, state, drop_defaults)
# Add any links between included widgets:
add_resolved_links(state, drop_defaults)
return state
@doc_subst(_doc_snippets)
def embed_data(views, drop_defaults=True, state=None):
"""Gets data for embedding.
Use this to get the raw data for embedding if you have special
formatting needs.
Parameters
----------
{views_attribute}
drop_defaults: boolean
Whether to drop default values from the widget states.
state: dict or None (default)
The state to include. When set to None, the state of all widgets
know to the widget manager is included. Otherwise it uses the
passed state directly. This allows for end users to include a
smaller state, under the responsibility that this state is
sufficient to reconstruct the embedded views.
Returns
-------
A dictionary with the following entries:
manager_state: dict of the widget manager state data
view_specs: a list of widget view specs
"""
if views is None:
views = [w for w in Widget.widgets.values() if isinstance(w, DOMWidget)]
else:
try:
views[0]
except (IndexError, TypeError):
views = [views]
if state is None:
# Get state of all known widgets
state = Widget.get_manager_state(drop_defaults=drop_defaults, widgets=None)['state']
# Rely on ipywidget to get the default values
json_data = Widget.get_manager_state(widgets=[])
# but plug in our own state
json_data['state'] = state
view_specs = [w.get_view_spec() for w in views]
return dict(manager_state=json_data, view_specs=view_specs)
script_escape_re = re.compile(r'<(script|/script|!--)', re.IGNORECASE)
def escape_script(s):
"""Escape a string that will be the content of an HTML script tag.
We replace the opening bracket of <script, </script, and <!-- with the unicode
equivalent. This is inspired by the documentation for the script tag at
https://html.spec.whatwg.org/multipage/scripting.html#restrictions-for-contents-of-script-elements
We only replace these three cases so that most html or other content
involving `<` is readable.
"""
return script_escape_re.sub(r'\u003c\1', s)
@doc_subst(_doc_snippets)
def embed_snippet(views,
drop_defaults=True,
state=None,
indent=2,
embed_url=None,
requirejs=True,
cors=True
):
"""Return a snippet that can be embedded in an HTML file.
Parameters
----------
{views_attribute}
{embed_kwargs}
Returns
-------
A unicode string with an HTML snippet containing several `<script>` tags.
"""
data = embed_data(views, drop_defaults=drop_defaults, state=state)
widget_views = u'\n'.join(
widget_view_template.format(view_spec=escape_script(json.dumps(view_spec)))
for view_spec in data['view_specs']
)
if embed_url is None:
embed_url = DEFAULT_EMBED_REQUIREJS_URL if requirejs else DEFAULT_EMBED_SCRIPT_URL
load = load_requirejs_template if requirejs else load_template
use_cors = ' crossorigin="anonymous"' if cors else ''
values = {
'load': load.format(embed_url=embed_url, use_cors=use_cors),
'json_data': escape_script(json.dumps(data['manager_state'], indent=indent)),
'widget_views': widget_views,
}
return snippet_template.format(**values)
@doc_subst(_doc_snippets)
def embed_minimal_html(fp, views, title=u'IPyWidget export', template=None, **kwargs):
"""Write a minimal HTML file with widget views embedded.
Parameters
----------
fp: filename or file-like object
The file to write the HTML output to.
{views_attribute}
title: title of the html page.
template: Template in which to embed the widget state.
This should be a Python string with placeholders
`{{title}}` and `{{snippet}}`. The `{{snippet}}` placeholder
will be replaced by all the widgets.
{embed_kwargs}
"""
snippet = embed_snippet(views, **kwargs)
values = {
'title': title,
'snippet': snippet,
}
if template is None:
template = html_template
html_code = template.format(**values)
# Check if fp is writable:
if hasattr(fp, 'write'):
fp.write(html_code)
else:
# Assume fp is a filename:
with open(fp, "w") as f:
f.write(html_code)
| ipython/ipywidgets | ipywidgets/embed.py | Python | bsd-3-clause | 11,226 |
from baseplate.events import FieldKind
from pylons import app_globals as g
from r2.lib.eventcollector import (
EventQueue,
Event,
squelch_exceptions,
)
from r2.lib.utils import sampled
from r2.models import (
FakeSubreddit,
)
class AdEvent(Event):
@classmethod
def get_context_data(cls, request, context):
data = super(AdEvent, cls).get_context_data(request, context)
dnt_header = request.headers.get("DNT", None)
if dnt_header is not None:
data["dnt"] = dnt_header == "1"
return data
class AdzerkAPIEvent(Event):
def add_target_fields(self, thing):
self.add("target_fullname", thing._fullname)
self.add("target_type", thing.__class__.__name__)
self.add("is_deleted", thing._deleted)
def add_caller_fields(self, user):
if user:
self.add("caller_user_id", user._id)
self.add("caller_user_name", user.name)
else:
self.add("is_automated", True)
def add_error_fields(self, error):
if error:
self.add("error_status_code", error.status_code)
self.add("error_body", error.response_body)
class AdEventQueue(EventQueue):
@squelch_exceptions
@sampled("events_collector_ad_serving_sample_rate")
def ad_request(
self,
keywords,
properties,
platform,
placements,
is_refresh,
subreddit=None,
request=None,
context=None,
):
"""Create an `ad_request` for event-collector.
keywords: Array of keywords used to select the ad.
properties: Object contain custom targeting parameters.
platform: The platform the ad was requested for.
placements: Array of placement objects (name, types) to be filled.
is_refresh: Whether or not the request is for the initial ad or a
refresh after refocusing the page.
subreddit: The Subreddit of the ad was displayed on.
request, context: Should be pylons.request & pylons.c respectively;
"""
event = AdEvent(
topic="ad_serving_events",
event_type="ss.ad_request",
request=request,
context=context,
)
# keywords are case insensitive, normalize and sort them
# for easier equality testing.
keywords = sorted(k.lower() for k in keywords)
event.add("keywords", keywords)
event.add("properties", properties)
event.add("platform", platform)
event.add("placements", placements)
event.add("is_refresh", is_refresh)
if not isinstance(subreddit, FakeSubreddit):
event.add_subreddit_fields(subreddit)
self.save_event(event)
@squelch_exceptions
@sampled("events_collector_ad_serving_sample_rate")
def ad_response(
self,
keywords,
properties,
platform,
placement_name,
placement_type,
adserver_ad_id,
adserver_campaign_id,
adserver_creative_id,
adserver_flight_id,
impression_id,
matched_keywords,
rate_type,
clearing_price,
link_fullname=None,
campaign_fullname=None,
subreddit=None,
priority=None,
ecpm=None,
request=None,
context=None,
):
"""Create an `ad_response` for event-collector.
keywords: Array of keywords used to select the ad.
properties: Object contain custom targeting parameters.
platform: The platform the ad was requested for.
placement_name: The identifier of the placement.
placement_type: The type of placement the ad is.
adserver_ad_id: Unique id of the ad response (from the ad server).
adserver_campaign_id: Unique id of the ad campaign (from the ad server).
adserver_creative_id: Unique id of the ad creative (from the ad server).
adserver_flight_id: Unique id of the ad flight (from the ad server).
impression_id: Unique id of the impression.
matched_keywords: An array of the keywords which matched for the ad.
rate_type: Flat/CPM/CPC/etc.
clearing_price: What was paid for the rate type.
link_fullname: The fullname of the promoted link.
campaign_fullname: The fullname of the PromoCampaign.
subreddit: The Subreddit of the ad was displayed on.
priority: The priority name of the ad.
ecpm: The effective cpm of the ad.
request, context: Should be pylons.request & pylons.c respectively;
"""
event = AdEvent(
topic="ad_serving_events",
event_type="ss.ad_response",
request=request,
context=context,
)
event.add("properties", properties)
event.add("platform", platform)
event.add("placement_name", placement_name)
event.add("placement_type", placement_type)
event.add("adserver_ad_id", adserver_ad_id)
event.add("adserver_campaign_id", adserver_campaign_id)
event.add("adserver_creative_id", adserver_creative_id)
event.add("adserver_flight_id", adserver_flight_id)
event.add("impression_id",
impression_id, kind=FieldKind.HIGH_CARDINALITY)
event.add("rate_type", rate_type)
event.add("clearing_price", clearing_price)
event.add("link_fullname", link_fullname)
event.add("campaign_fullname", campaign_fullname)
event.add("priority", priority)
event.add("ecpm", ecpm)
# keywords are case insensitive, normalize and sort them
# for easier equality testing.
keywords = sorted(k.lower() for k in keywords)
event.add("keywords", keywords)
# don't send empty arrays.
if matched_keywords:
matched_keywords = sorted(k.lower() for k in matched_keywords)
event.add("matched_keywords", matched_keywords)
if not isinstance(subreddit, FakeSubreddit):
event.add_subreddit_fields(subreddit)
self.save_event(event)
@squelch_exceptions
def adzerk_api_request(
self,
request_type,
thing,
request_body,
triggered_by=None,
additional_data=None,
request_error=None,
):
"""
Create an `adzerk_api_events` event for event-collector.
request_type: The type of request being made
thing: The `Thing` which the request data is derived from
request_body: The JSON payload to be sent to adzerk
triggered_by: The user who triggered the API call
additional_data: A dict of any additional meta data that may be
relevant to the request
request_error: An `adzerk_api.AdzerkError` if the request fails
"""
event = AdzerkAPIEvent(
topic='adzerk_api_events',
event_type='ss.%s_request' % request_type,
)
event.add_target_fields(thing)
event.add_caller_fields(triggered_by)
event.add_error_fields(request_error)
event.add("request_body", request_body)
if additional_data:
for key, value in additional_data.iteritems():
event.add(key, value)
self.save_event(event)
| madbook/reddit-plugin-adzerk | reddit_adzerk/lib/events.py | Python | bsd-3-clause | 7,482 |
from django.contrib.auth.models import User
from esus.phorum.models import Category, Table
__all__ = ("user_super", "users_usual", "table_simple")
def user_super(case):
case.user_super = User.objects.create(
username = "superuser",
password = "sha1$aaa$b27189d65f3a148a8186753f3f30774182d923d5",
first_name = "Esus",
last_name = "master",
is_staff = True,
is_superuser = True,
)
def users_usual(case):
case.user_tester = User.objects.create(
username = "Tester",
password = "sha1$aaa$b27189d65f3a148a8186753f3f30774182d923d5",
first_name = "I",
last_name = "Robot",
is_staff = False,
is_superuser = False,
)
case.user_john_doe = User.objects.create(
username = "JohnDoe",
password = "sha1$aaa$b27189d65f3a148a8186753f3f30774182d923d5",
first_name = "John",
last_name = "Doe",
is_staff = False,
is_superuser = False,
)
case.user_staff = User.objects.create(
username = "Gnome",
password = "sha1$aaa$b27189d65f3a148a8186753f3f30774182d923d5",
first_name = "Wiki",
last_name = "Gnome",
is_staff = True,
is_superuser = False,
)
def table_simple(case, table_owner=None):
case.category = Category.objects.create(
name = u"Category",
slug = u"category",
)
case.table = case.category.add_table(
name = u"Table",
owner = table_owner or case.user_tester,
)
def comment_simple(case, table=None, author=None):
table = table or case.table
author = author or case.user_john_doe
case.comment_doe = case.table.add_comment(
author = author,
text = u"Humble user's comment"
)
case.comment_owner = case.table.add_comment(
author = table.owner,
text = u"Table 0wn3rz comment"
)
| ella/esus | tests/unit_project/tests/fixtures.py | Python | bsd-3-clause | 1,899 |
try:
import arcpy.mapping
from ._publishing import (convert_desktop_map_to_service_draft as convert_map_to_service_draft,
convert_toolbox_to_service_draft)
except:
from ._publishing import (convert_pro_map_to_service_draft as convert_map_to_service_draft,
convert_toolbox_to_service_draft)
| DavidWhittingham/arcpyext | arcpyext/publishing/__init__.py | Python | bsd-3-clause | 362 |
"""HTML utilities suitable for global use."""
from __future__ import unicode_literals
import re
from django.utils.encoding import force_text, force_str
from django.utils.functional import allow_lazy
from django.utils.safestring import SafeData, mark_safe
from django.utils import six
from django.utils.six.moves.urllib.parse import quote, unquote, urlsplit, urlunsplit
from django.utils.text import normalize_newlines
from .html_parser import HTMLParser, HTMLParseError
# Configuration for urlize() function.
TRAILING_PUNCTUATION = ['.', ',', ':', ';', '.)', '"', '\'']
WRAPPING_PUNCTUATION = [('(', ')'), ('<', '>'), ('[', ']'), ('<', '>'), ('"', '"'), ('\'', '\'')]
# List of possible strings used for bullets in bulleted lists.
DOTS = ['·', '*', '\u2022', '•', '•', '•']
unencoded_ampersands_re = re.compile(r'&(?!(\w+|#\d+);)')
word_split_re = re.compile(r'(\s+)')
simple_url_re = re.compile(r'^https?://\[?\w', re.IGNORECASE)
simple_url_2_re = re.compile(r'^www\.|^(?!http)\w[^@]+\.(com|edu|gov|int|mil|net|org)$', re.IGNORECASE)
simple_email_re = re.compile(r'^\S+@\S+\.\S+$')
link_target_attribute_re = re.compile(r'(<a [^>]*?)target=[^\s>]+')
html_gunk_re = re.compile(r'(?:<br clear="all">|<i><\/i>|<b><\/b>|<em><\/em>|<strong><\/strong>|<\/?smallcaps>|<\/?uppercase>)', re.IGNORECASE)
hard_coded_bullets_re = re.compile(r'((?:<p>(?:%s).*?[a-zA-Z].*?</p>\s*)+)' % '|'.join(re.escape(x) for x in DOTS), re.DOTALL)
trailing_empty_content_re = re.compile(r'(?:<p>(?: |\s|<br \/>)*?</p>\s*)+\Z')
def escape(text):
"""
Returns the given text with ampersands, quotes and angle brackets encoded for use in HTML.
"""
return mark_safe(force_text(text).replace('&', '&').replace('<', '<').replace('>', '>').replace('"', '"').replace("'", '''))
escape = allow_lazy(escape, six.text_type)
_js_escapes = {
ord('\\'): '\\u005C',
ord('\''): '\\u0027',
ord('"'): '\\u0022',
ord('>'): '\\u003E',
ord('<'): '\\u003C',
ord('&'): '\\u0026',
ord('='): '\\u003D',
ord('-'): '\\u002D',
ord(';'): '\\u003B',
ord('\u2028'): '\\u2028',
ord('\u2029'): '\\u2029'
}
# Escape every ASCII character with a value less than 32.
_js_escapes.update((ord('%c' % z), '\\u%04X' % z) for z in range(32))
def escapejs(value):
"""Hex encodes characters for use in JavaScript strings."""
return mark_safe(force_text(value).translate(_js_escapes))
escapejs = allow_lazy(escapejs, six.text_type)
def conditional_escape(text):
"""
Similar to escape(), except that it doesn't operate on pre-escaped strings.
"""
if hasattr(text, '__html__'):
return text.__html__()
else:
return escape(text)
def format_html(format_string, *args, **kwargs):
"""
Similar to str.format, but passes all arguments through conditional_escape,
and calls 'mark_safe' on the result. This function should be used instead
of str.format or % interpolation to build up small HTML fragments.
"""
args_safe = map(conditional_escape, args)
kwargs_safe = dict((k, conditional_escape(v)) for (k, v) in six.iteritems(kwargs))
return mark_safe(format_string.format(*args_safe, **kwargs_safe))
def format_html_join(sep, format_string, args_generator):
"""
A wrapper of format_html, for the common case of a group of arguments that
need to be formatted using the same format string, and then joined using
'sep'. 'sep' is also passed through conditional_escape.
'args_generator' should be an iterator that returns the sequence of 'args'
that will be passed to format_html.
Example:
format_html_join('\n', "<li>{0} {1}</li>", ((u.first_name, u.last_name)
for u in users))
"""
return mark_safe(conditional_escape(sep).join(
format_html(format_string, *tuple(args))
for args in args_generator))
def linebreaks(value, autoescape=False):
"""Converts newlines into <p> and <br />s."""
value = normalize_newlines(value)
paras = re.split('\n{2,}', value)
if autoescape:
paras = ['<p>%s</p>' % escape(p).replace('\n', '<br />') for p in paras]
else:
paras = ['<p>%s</p>' % p.replace('\n', '<br />') for p in paras]
return '\n\n'.join(paras)
linebreaks = allow_lazy(linebreaks, six.text_type)
class MLStripper(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.reset()
self.fed = []
def handle_data(self, d):
self.fed.append(d)
def handle_entityref(self, name):
self.fed.append('&%s;' % name)
def handle_charref(self, name):
self.fed.append('&#%s;' % name)
def get_data(self):
return ''.join(self.fed)
def strip_tags(value):
"""Returns the given HTML with all tags stripped."""
s = MLStripper()
try:
s.feed(value)
s.close()
except HTMLParseError:
return value
else:
return s.get_data()
strip_tags = allow_lazy(strip_tags)
def remove_tags(html, tags):
"""Returns the given HTML with given tags removed."""
tags = [re.escape(tag) for tag in tags.split()]
tags_re = '(%s)' % '|'.join(tags)
starttag_re = re.compile(r'<%s(/?>|(\s+[^>]*>))' % tags_re, re.U)
endtag_re = re.compile('</%s>' % tags_re)
html = starttag_re.sub('', html)
html = endtag_re.sub('', html)
return html
remove_tags = allow_lazy(remove_tags, six.text_type)
def strip_spaces_between_tags(value):
"""Returns the given HTML with spaces between tags removed."""
return re.sub(r'>\s+<', '><', force_text(value))
strip_spaces_between_tags = allow_lazy(strip_spaces_between_tags, six.text_type)
def strip_entities(value):
"""Returns the given HTML with all entities (&something;) stripped."""
return re.sub(r'&(?:\w+|#\d+);', '', force_text(value))
strip_entities = allow_lazy(strip_entities, six.text_type)
def smart_urlquote(url):
"Quotes a URL if it isn't already quoted."
# Handle IDN before quoting.
try:
scheme, netloc, path, query, fragment = urlsplit(url)
try:
netloc = netloc.encode('idna').decode('ascii') # IDN -> ACE
except UnicodeError: # invalid domain part
pass
else:
url = urlunsplit((scheme, netloc, path, query, fragment))
except ValueError:
# invalid IPv6 URL (normally square brackets in hostname part).
pass
url = unquote(force_str(url))
# See http://bugs.python.org/issue2637
url = quote(url, safe=b'!*\'();:@&=+$,/?#[]~')
return force_text(url)
def urlize(text, trim_url_limit=None, nofollow=False, autoescape=False):
"""
Converts any URLs in text into clickable links.
Works on http://, https://, www. links, and also on links ending in one of
the original seven gTLDs (.com, .edu, .gov, .int, .mil, .net, and .org).
Links can have trailing punctuation (periods, commas, close-parens) and
leading punctuation (opening parens) and it'll still do the right thing.
If trim_url_limit is not None, the URLs in the link text longer than this
limit will be truncated to trim_url_limit-3 characters and appended with
an ellipsis.
If nofollow is True, the links will get a rel="nofollow" attribute.
If autoescape is True, the link text and URLs will be autoescaped.
"""
def trim_url(x, limit=trim_url_limit):
if limit is None or len(x) <= limit:
return x
return '%s...' % x[:max(0, limit - 3)]
safe_input = isinstance(text, SafeData)
words = word_split_re.split(force_text(text))
for i, word in enumerate(words):
if '.' in word or '@' in word or ':' in word:
# Deal with punctuation.
lead, middle, trail = '', word, ''
for punctuation in TRAILING_PUNCTUATION:
if middle.endswith(punctuation):
middle = middle[:-len(punctuation)]
trail = punctuation + trail
for opening, closing in WRAPPING_PUNCTUATION:
if middle.startswith(opening):
middle = middle[len(opening):]
lead = lead + opening
# Keep parentheses at the end only if they're balanced.
if (middle.endswith(closing)
and middle.count(closing) == middle.count(opening) + 1):
middle = middle[:-len(closing)]
trail = closing + trail
# Make URL we want to point to.
url = None
nofollow_attr = ' rel="nofollow"' if nofollow else ''
if simple_url_re.match(middle):
url = smart_urlquote(middle)
elif simple_url_2_re.match(middle):
url = smart_urlquote('http://%s' % middle)
elif not ':' in middle and simple_email_re.match(middle):
local, domain = middle.rsplit('@', 1)
try:
domain = domain.encode('idna').decode('ascii')
except UnicodeError:
continue
url = 'mailto:%s@%s' % (local, domain)
nofollow_attr = ''
# Make link.
if url:
trimmed = trim_url(middle)
if autoescape and not safe_input:
lead, trail = escape(lead), escape(trail)
url, trimmed = escape(url), escape(trimmed)
middle = '<a href="%s"%s>%s</a>' % (url, nofollow_attr, trimmed)
words[i] = mark_safe('%s%s%s' % (lead, middle, trail))
else:
if safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
elif safe_input:
words[i] = mark_safe(word)
elif autoescape:
words[i] = escape(word)
return ''.join(words)
urlize = allow_lazy(urlize, six.text_type)
def avoid_wrapping(value):
"""
Avoid text wrapping in the middle of a phrase by adding non-breaking
spaces where there previously were normal spaces.
"""
return value.replace(" ", "\xa0")
| errx/django | django/utils/html.py | Python | bsd-3-clause | 10,215 |
from msw.models import Page, RichText, MembersPostUser, MembersPostText
from django.contrib import admin
# could add more complicated stuff here consult:
# tutorial: https://docs.djangoproject.com/en/dev/intro/tutorial02/#enter-the-admin-site
# tutorial finished admin.py: https://github.com/haoqili/Django-Tutorial-Directory/blob/master/tutorialSite/polls/admin.py
admin.site.register(Page)
admin.site.register(RichText)
admin.site.register(MembersPostUser)
admin.site.register(MembersPostText)
| haoqili/MozSecWorld | apps/msw/admin.py | Python | bsd-3-clause | 504 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 NOEL-BARON Léo
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le type combustible."""
from primaires.interpreteur.editeur.entier import Entier
from primaires.interpreteur.editeur.selection import Selection
from primaires.objet.types.base import BaseType
class Combustible(BaseType):
"""Type d'objet: combustible.
"""
nom_type = "combustible"
def __init__(self, cle=""):
"""Constructeur de l'objet"""
BaseType.__init__(self, cle)
self.terrains = []
self.rarete = 1
self.qualite = 2
# Editeurs
self.etendre_editeur("t", "terrains", Selection, self, "terrains",
list(importeur.salle.terrains.keys()))
self.etendre_editeur("r", "rareté", Entier, self, "rarete", 1, 10)
self.etendre_editeur("a", "qualité", Entier, self, "qualite", 1, 10)
@property
def aff_terrains(self):
return ", ".join(self.terrains) if self.terrains else "aucun"
def travailler_enveloppes(self, enveloppes):
"""Travail sur les enveloppes"""
l_terrains = sorted(type(self).importeur.salle.terrains.keys())
terrains = enveloppes["t"]
terrains.apercu = "{objet.aff_terrains}"
terrains.prompt = "Entrez un terrain : "
terrains.aide_courte = \
"Entrez les |ent|terrains|ff| où l'on peut trouver ce " \
"combustible.\n\nTerrains disponibles : {}.\n\n" \
"Terrains actuels : {{objet.aff_terrains}}".format(
", ".join(l_terrains))
rarete = enveloppes["r"]
rarete.apercu = "{objet.rarete}"
rarete.prompt = "Rareté du combustible : "
rarete.aide_courte = \
"Entrez la |ent|rareté|ff| du combustible, entre |cmd|1|ff| " \
"(courant) et |cmd|10|ff| (rare).\n\n" \
"Rareté actuelle : {objet.rarete}"
qualite = enveloppes["a"]
qualite.apercu = "{objet.qualite}"
qualite.prompt = "Qualité du combustible : "
qualite.aide_courte = \
"Entrez la |ent|qualité|ff| du combustible, entre |cmd|1|ff| " \
"(mauvais) et |cmd|10|ff| (très bon).\n\n" \
"Qualité actuelle : {objet.qualite}"
| vlegoff/tsunami | src/primaires/salle/types/combustible.py | Python | bsd-3-clause | 3,785 |
import ctypes
from contextlib import contextmanager
import errno
import logging
import os
import platform
import pwd
import grp
import subprocess
from .exceptions import CommandFailed
_logger = logging.getLogger(__name__)
def get_current_user_shell():
return pwd.getpwuid(os.getuid()).pw_shell
def execute_command_assert_success(cmd, **kw):
returned = execute_command(cmd, **kw)
if returned.returncode != 0:
raise CommandFailed("Command {0!r} failed with exit code {1}".format(cmd, returned.returncode))
return returned
def execute_command(cmd, unsudo=False, **kw):
if unsudo:
cmd = _get_unsudo_command(cmd)
_logger.debug("Running %r (%s)", cmd, kw)
returned = subprocess.Popen(cmd, shell=True, **kw)
returned.wait()
_logger.debug("%r finished with exit code %s", cmd, returned.returncode)
return returned
def _get_unsudo_command(cmd):
sudo_uid = get_sudo_uid()
sudo_gid = get_sudo_gid()
if not sudo_uid and not sudo_gid:
return cmd
prefix = "sudo "
if sudo_uid is not None:
prefix += "-u \\#{0} ".format(sudo_uid)
if sudo_gid is not None:
prefix += "-g \\#{0} ".format(sudo_gid)
return prefix + cmd
def get_sudo_uid():
return _int_if_not_none(os.environ.get("SUDO_UID"))
def get_sudo_gid():
return _int_if_not_none(os.environ.get("SUDO_GID"))
def get_sudo_groups():
sudo_uid = get_sudo_uid()
if sudo_uid is None:
return None
return get_groups_by_uid(sudo_uid)
def get_groups_by_uid(uid):
try:
username = pwd.getpwuid(uid).pw_name
except KeyError:
_logger.warning("Failed to get pwd information for uid %s", uid, exc_info=True)
return []
gids = [g.gr_gid for g in grp.getgrall() if username in g.gr_mem]
return gids
if platform.system() == "Linux":
CLONE_NEWNS = 131072
_libc = ctypes.CDLL("libc.so.6")
def unshare_mounts():
return_value = _libc.unshare(CLONE_NEWNS)
if 0 != return_value:
errno_val = ctypes.get_errno()
raise OSError("unshare() called failed (errno={0} ({1}))".format(
errno_val, errno.errorcode.get(errno_val, "?")
))
else:
def unshare_mounts():
raise NotImplementedError("Only supported on Linux")
@contextmanager
def unsudo_context():
old_uid = os.geteuid()
old_gid = os.getegid()
sudo_uid = get_sudo_uid()
sudo_gid = get_sudo_gid()
if sudo_gid is not None:
_logger.debug("Changing gid to %s", sudo_gid)
os.setegid(sudo_gid)
if sudo_uid is not None:
_logger.debug("Changing uid to %s", sudo_uid)
os.seteuid(sudo_uid)
try:
yield
finally:
os.seteuid(old_uid)
os.setegid(old_gid)
def _int_if_not_none(value):
if value is not None:
value = int(value)
return value
| vmalloc/dwight | dwight_chroot/platform_utils.py | Python | bsd-3-clause | 2,868 |
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.{{ model_name }}List.as_view(), name='list'),
url(r'^new/$', views.{{ model_name }}Create.as_view(), name='create'),
url(r'^(?P<pk>\d+)/$', views.{{ model_name }}Detail.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/update/$', views.{{ model_name }}Update.as_view(), name='update'),
url(r'^(?P<pk>\d+)/delete/$', views.{{ model_name }}Delete.as_view(), name='delete'),
]
| grantmcconnaughey/django-app-gen | appgen/templates/appgen/python/urls.py | Python | bsd-3-clause | 479 |
# -*- coding: utf-8 -*-
"""
Eve Demo (Secured)
~~~~~~~~~~~~~~~~~~
This is a fork of Eve Demo (https://github.com/pyeve/eve-demo)
intended to demonstrate how a Eve API can be secured by means of
Flask-Sentinel.
For demonstration purposes, besides protecting a couple API endpoints
with a BearerToken class instance, we are also adding a static html
endpoint an protecting with via decorator.
:copyright: (c) 2015 by Nicola Iarocci.
:license: BSD, see LICENSE for more details.
"""
from eve import Eve
from oauth2 import BearerAuth
from flask.ext.sentinel import ResourceOwnerPasswordCredentials, oauth
app = Eve(auth=BearerAuth)
ResourceOwnerPasswordCredentials(app)
@app.route('/endpoint')
@oauth.require_oauth()
def restricted_access():
return "You made it through and accessed the protected resource!"
if __name__ == '__main__':
app.run(ssl_context='adhoc')
| nicolaiarocci/eve-oauth2 | run.py | Python | bsd-3-clause | 917 |
#!/usr/bin/python -tt
# vim:set ts=4 sw=4 expandtab:
#
# NodeManager plugin for creating credentials in slivers
# (*) empower slivers to make API calls throught hmac
# (*) also create a ssh key - used by the OMF resource controller
# for authenticating itself with its Experiment Controller
# in order to avoid spamming the DB with huge amounts of such tags,
# (*) slices need to have the 'enable_hmac' tag set
# (*) or the 'omf_control' tag set, respectively
"""
Sliver authentication support for NodeManager.
"""
import os
import random
import string
import tempfile
import socket
import logger
import tools
def start():
logger.log("sliverauth: (dummy) plugin starting up...")
def GetSlivers(data, config, plc):
if 'OVERRIDES' in dir(config):
if config.OVERRIDES.get('sliverauth') == '-1':
logger.log("sliverauth: Disabled", 2)
return
if 'slivers' not in data:
logger.log_missing_data("sliverauth.GetSlivers", 'slivers')
return
for sliver in data['slivers']:
path = '/vservers/%s' % sliver['name']
if not os.path.exists(path):
# ignore all non-plc-instantiated slivers
instantiation = sliver.get('instantiation','')
if instantiation == 'plc-instantiated':
logger.log("sliverauth: plc-instantiated slice %s does not yet exist. IGNORING!" % sliver['name'])
continue
system_slice = False
for chunk in sliver['attributes']:
if chunk['tagname'] == "system":
if chunk['value'] in (True, 1, '1') or chunk['value'].lower() == "true":
system_slice = True
for chunk in sliver['attributes']:
if chunk['tagname']=='enable_hmac' and not system_slice:
manage_hmac (plc, sliver)
if chunk['tagname']=='omf_control':
manage_sshkey (plc, sliver)
def SetSliverTag(plc, slice, tagname, value):
node_id = tools.node_id()
slivertags=plc.GetSliceTags({"name":slice,"node_id":node_id,"tagname":tagname})
if len(slivertags)==0:
# looks like GetSlivers reports about delegated/nm-controller slices that do *not* belong to this node
# and this is something that AddSliceTag does not like
try:
slivertag_id=plc.AddSliceTag(slice,tagname,value,node_id)
except:
logger.log_exc ("sliverauth.SetSliverTag (probably delegated) slice=%(slice)s tag=%(tagname)s node_id=%(node_id)d"%locals())
pass
else:
slivertag_id=slivertags[0]['slice_tag_id']
plc.UpdateSliceTag(slivertag_id,value)
def find_tag (sliver, tagname):
for attribute in sliver['attributes']:
# for legacy, try the old-fashioned 'name' as well
name = attribute.get('tagname',attribute.get('name',''))
if name == tagname:
return attribute['value']
return None
def manage_hmac (plc, sliver):
hmac = find_tag (sliver, 'hmac')
if not hmac:
# let python do its thing
random.seed()
d = [random.choice(string.letters) for x in xrange(32)]
hmac = "".join(d)
SetSliverTag(plc,sliver['name'],'hmac',hmac)
logger.log("sliverauth: %s: setting hmac" % sliver['name'])
path = '/vservers/%s/etc/planetlab' % sliver['name']
if os.path.exists(path):
keyfile = '%s/key' % path
if (tools.replace_file_with_string(keyfile,hmac,chmod=0400)):
logger.log ("sliverauth: (over)wrote hmac into %s " % keyfile)
# create the key if needed and returns the key contents
def generate_sshkey (sliver):
# initial version was storing stuff in the sliver directly
# keyfile="/vservers/%s/home/%s/.ssh/id_rsa"%(sliver['name'],sliver['name'])
# we're now storing this in the same place as the authorized_keys, which in turn
# gets mounted to the user's home directory in the sliver
keyfile="/home/%s/.ssh/id_rsa"%(sliver['name'])
pubfile="%s.pub"%keyfile
dotssh=os.path.dirname(keyfile)
# create dir if needed
if not os.path.isdir (dotssh):
os.mkdir (dotssh, 0700)
logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), dotssh ] )
if not os.path.isfile (pubfile):
comment="%s@%s"%(sliver['name'],socket.gethostname())
logger.log_call( [ 'ssh-keygen', '-t', 'rsa', '-N', '', '-f', keyfile , '-C', comment] )
os.chmod (keyfile, 0400)
logger.log_call ( [ 'chown', "%s:slices"%(sliver['name']), keyfile, pubfile ] )
return file(pubfile).read().strip()
# a sliver can get created, deleted and re-created
# the slice having the tag is not sufficient to skip key geneneration
def manage_sshkey (plc, sliver):
# regardless of whether the tag is there or not, we need to grab the file
# if it's lost b/c e.g. the sliver was destroyed we cannot save the tags content
ssh_key = generate_sshkey(sliver)
old_tag = find_tag (sliver, 'ssh_key')
if ssh_key <> old_tag:
SetSliverTag(plc, sliver['name'], 'ssh_key', ssh_key)
logger.log ("sliverauth: %s: setting ssh_key" % sliver['name'])
| planetlab/NodeManager | plugins/sliverauth.py | Python | bsd-3-clause | 5,118 |
from django.conf.urls import url, patterns, include
from accounts import views
user_tool_patterns = patterns(
"",
url(r"^lending/$", views.LendingManager.as_view(), name="lending"),
url(r"^manager/$", views.ToolManager.as_view(), name="manager"),
)
# namespaced under account:
urlpatterns = patterns(
"",
url(r"^$", views.SettingsView.as_view(), name="settings"),
url(r"^login/$", views.LoginView.as_view(), name="login"),
url(r"^logout/$", views.LogoutView.as_view(), name="logout"),
url(r"^register/$", views.SignupView.as_view(), name="signup"),
url(r"^user/(?P<username>[-_\w]+)/$",
views.UserDetailView.as_view(), name="user_detail"),
url(r"^confirm_email/(?P<key>\w+)/$", views.ConfirmEmailView.as_view(),
name="confirm_email"),
url(r"^password/$", views.ChangePasswordView.as_view(),
name="password"),
url(r"^password/reset/$", views.PasswordResetView.as_view(),
name="password_reset"),
url(r"^password/reset/(?P<uidb36>[0-9A-Za-z]+)-(?P<token>.+)/$",
views.PasswordResetTokenView.as_view(),
name="password_reset_token"),
url(r"^delete/$", views.DeleteView.as_view(), name="delete"),
url(r"^tool/", include(user_tool_patterns, namespace="tool")),
)
| toolhub/toolhub.co | accounts/urls.py | Python | bsd-3-clause | 1,267 |
"""Lite version of scipy.linalg.
Notes
-----
This module is a lite version of the linalg.py module in SciPy which
contains high-level Python interface to the LAPACK library. The lite
version only accesses the following LAPACK functions: dgesv, zgesv,
dgeev, zgeev, dgesdd, zgesdd, dgelsd, zgelsd, dsyevd, zheevd, dgetrf,
zgetrf, dpotrf, zpotrf, dgeqrf, zgeqrf, zungqr, dorgqr.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['matrix_power', 'solve', 'tensorsolve', 'tensorinv', 'inv',
'cholesky', 'eigvals', 'eigvalsh', 'pinv', 'slogdet', 'det',
'svd', 'eig', 'eigh', 'lstsq', 'norm', 'qr', 'cond', 'matrix_rank',
'LinAlgError', 'multi_dot']
import warnings
from numpy.core import (
array, asarray, zeros, empty, empty_like, transpose, intc, single, double,
csingle, cdouble, inexact, complexfloating, newaxis, ravel, all, Inf, dot,
add, multiply, sqrt, maximum, fastCopyAndTranspose, sum, isfinite, size,
finfo, errstate, geterrobj, longdouble, rollaxis, amin, amax, product, abs,
broadcast, atleast_2d, intp, asanyarray, isscalar, object_, ones
)
from numpy.core.multiarray import normalize_axis_index
from numpy.lib import triu, asfarray
from numpy.linalg import lapack_lite, _umath_linalg
from numpy.matrixlib.defmatrix import matrix_power
from numpy.compat import asbytes
# For Python2/3 compatibility
_N = asbytes('N')
_V = asbytes('V')
_A = asbytes('A')
_S = asbytes('S')
_L = asbytes('L')
fortran_int = intc
# Error object
class LinAlgError(Exception):
"""
Generic Python-exception-derived object raised by linalg functions.
General purpose exception class, derived from Python's exception.Exception
class, programmatically raised in linalg functions when a Linear
Algebra-related condition would prevent further correct execution of the
function.
Parameters
----------
None
Examples
--------
>>> from numpy import linalg as LA
>>> LA.inv(np.zeros((2,2)))
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
File "...linalg.py", line 350,
in inv return wrap(solve(a, identity(a.shape[0], dtype=a.dtype)))
File "...linalg.py", line 249,
in solve
raise LinAlgError('Singular matrix')
numpy.linalg.LinAlgError: Singular matrix
"""
pass
# Dealing with errors in _umath_linalg
_linalg_error_extobj = None
def _determine_error_states():
global _linalg_error_extobj
errobj = geterrobj()
bufsize = errobj[0]
with errstate(invalid='call', over='ignore',
divide='ignore', under='ignore'):
invalid_call_errmask = geterrobj()[1]
_linalg_error_extobj = [bufsize, invalid_call_errmask, None]
_determine_error_states()
def _raise_linalgerror_singular(err, flag):
raise LinAlgError("Singular matrix")
def _raise_linalgerror_nonposdef(err, flag):
raise LinAlgError("Matrix is not positive definite")
def _raise_linalgerror_eigenvalues_nonconvergence(err, flag):
raise LinAlgError("Eigenvalues did not converge")
def _raise_linalgerror_svd_nonconvergence(err, flag):
raise LinAlgError("SVD did not converge")
def get_linalg_error_extobj(callback):
extobj = list(_linalg_error_extobj)
extobj[2] = callback
return extobj
def _makearray(a):
new = asarray(a)
wrap = getattr(a, "__array_prepare__", new.__array_wrap__)
return new, wrap
def isComplexType(t):
return issubclass(t, complexfloating)
_real_types_map = {single : single,
double : double,
csingle : single,
cdouble : double}
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _realType(t, default=double):
return _real_types_map.get(t, default)
def _complexType(t, default=cdouble):
return _complex_types_map.get(t, default)
def _linalgRealType(t):
"""Cast the type t to either double or cdouble."""
return double
_complex_types_map = {single : csingle,
double : cdouble,
csingle : csingle,
cdouble : cdouble}
def _commonType(*arrays):
# in lite version, use higher precision (always double or cdouble)
result_type = single
is_complex = False
for a in arrays:
if issubclass(a.dtype.type, inexact):
if isComplexType(a.dtype.type):
is_complex = True
rt = _realType(a.dtype.type, default=None)
if rt is None:
# unsupported inexact scalar
raise TypeError("array type %s is unsupported in linalg" %
(a.dtype.name,))
else:
rt = double
if rt is double:
result_type = double
if is_complex:
t = cdouble
result_type = _complex_types_map[result_type]
else:
t = double
return t, result_type
# _fastCopyAndTranpose assumes the input is 2D (as all the calls in here are).
_fastCT = fastCopyAndTranspose
def _to_native_byte_order(*arrays):
ret = []
for arr in arrays:
if arr.dtype.byteorder not in ('=', '|'):
ret.append(asarray(arr, dtype=arr.dtype.newbyteorder('=')))
else:
ret.append(arr)
if len(ret) == 1:
return ret[0]
else:
return ret
def _fastCopyAndTranspose(type, *arrays):
cast_arrays = ()
for a in arrays:
if a.dtype.type is type:
cast_arrays = cast_arrays + (_fastCT(a),)
else:
cast_arrays = cast_arrays + (_fastCT(a.astype(type)),)
if len(cast_arrays) == 1:
return cast_arrays[0]
else:
return cast_arrays
def _assertRank2(*arrays):
for a in arrays:
if a.ndim != 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'two-dimensional' % a.ndim)
def _assertRankAtLeast2(*arrays):
for a in arrays:
if a.ndim < 2:
raise LinAlgError('%d-dimensional array given. Array must be '
'at least two-dimensional' % a.ndim)
def _assertSquareness(*arrays):
for a in arrays:
if max(a.shape) != min(a.shape):
raise LinAlgError('Array must be square')
def _assertNdSquareness(*arrays):
for a in arrays:
if max(a.shape[-2:]) != min(a.shape[-2:]):
raise LinAlgError('Last 2 dimensions of the array must be square')
def _assertFinite(*arrays):
for a in arrays:
if not (isfinite(a).all()):
raise LinAlgError("Array must not contain infs or NaNs")
def _isEmpty2d(arr):
# check size first for efficiency
return arr.size == 0 and product(arr.shape[-2:]) == 0
def _assertNoEmpty2d(*arrays):
for a in arrays:
if _isEmpty2d(a):
raise LinAlgError("Arrays cannot be empty")
# Linear equations
def tensorsolve(a, b, axes=None):
"""
Solve the tensor equation ``a x = b`` for x.
It is assumed that all indices of `x` are summed over in the product,
together with the rightmost indices of `a`, as is done in, for example,
``tensordot(a, x, axes=b.ndim)``.
Parameters
----------
a : array_like
Coefficient tensor, of shape ``b.shape + Q``. `Q`, a tuple, equals
the shape of that sub-tensor of `a` consisting of the appropriate
number of its rightmost indices, and must be such that
``prod(Q) == prod(b.shape)`` (in which sense `a` is said to be
'square').
b : array_like
Right-hand tensor, which can be of any shape.
axes : tuple of ints, optional
Axes in `a` to reorder to the right, before inversion.
If None (default), no reordering is done.
Returns
-------
x : ndarray, shape Q
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorinv, numpy.einsum
Examples
--------
>>> a = np.eye(2*3*4)
>>> a.shape = (2*3, 4, 2, 3, 4)
>>> b = np.random.randn(2*3, 4)
>>> x = np.linalg.tensorsolve(a, b)
>>> x.shape
(2, 3, 4)
>>> np.allclose(np.tensordot(a, x, axes=3), b)
True
"""
a, wrap = _makearray(a)
b = asarray(b)
an = a.ndim
if axes is not None:
allaxes = list(range(0, an))
for k in axes:
allaxes.remove(k)
allaxes.insert(an, k)
a = a.transpose(allaxes)
oldshape = a.shape[-(an-b.ndim):]
prod = 1
for k in oldshape:
prod *= k
a = a.reshape(-1, prod)
b = b.ravel()
res = wrap(solve(a, b))
res.shape = oldshape
return res
def solve(a, b):
"""
Solve a linear matrix equation, or system of linear scalar equations.
Computes the "exact" solution, `x`, of the well-determined, i.e., full
rank, linear matrix equation `ax = b`.
Parameters
----------
a : (..., M, M) array_like
Coefficient matrix.
b : {(..., M,), (..., M, K)}, array_like
Ordinate or "dependent variable" values.
Returns
-------
x : {(..., M,), (..., M, K)} ndarray
Solution to the system a x = b. Returned shape is identical to `b`.
Raises
------
LinAlgError
If `a` is singular or not square.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The solutions are computed using LAPACK routine _gesv
`a` must be square and of full-rank, i.e., all rows (or, equivalently,
columns) must be linearly independent; if either is not true, use
`lstsq` for the least-squares best "solution" of the
system/equation.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 22.
Examples
--------
Solve the system of equations ``3 * x0 + x1 = 9`` and ``x0 + 2 * x1 = 8``:
>>> a = np.array([[3,1], [1,2]])
>>> b = np.array([9,8])
>>> x = np.linalg.solve(a, b)
>>> x
array([ 2., 3.])
Check that the solution is correct:
>>> np.allclose(np.dot(a, x), b)
True
"""
a, _ = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
b, wrap = _makearray(b)
t, result_t = _commonType(a, b)
# We use the b = (..., M,) logic, only if the number of extra dimensions
# match exactly
if b.ndim == a.ndim - 1:
if a.shape[-1] == 0 and b.shape[-1] == 0:
# Legal, but the ufunc cannot handle the 0-sized inner dims
# let the ufunc handle all wrong cases.
a = a.reshape(a.shape[:-1])
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve1
else:
if b.size == 0:
if (a.shape[-1] == 0 and b.shape[-2] == 0) or b.shape[-1] == 0:
a = a[:,:1].reshape(a.shape[:-1] + (1,))
bc = broadcast(a, b)
return wrap(empty(bc.shape, dtype=result_t))
gufunc = _umath_linalg.solve
signature = 'DD->D' if isComplexType(t) else 'dd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
r = gufunc(a, b, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
def tensorinv(a, ind=2):
"""
Compute the 'inverse' of an N-dimensional array.
The result is an inverse for `a` relative to the tensordot operation
``tensordot(a, b, ind)``, i. e., up to floating-point accuracy,
``tensordot(tensorinv(a), a, ind)`` is the "identity" tensor for the
tensordot operation.
Parameters
----------
a : array_like
Tensor to 'invert'. Its shape must be 'square', i. e.,
``prod(a.shape[:ind]) == prod(a.shape[ind:])``.
ind : int, optional
Number of first indices that are involved in the inverse sum.
Must be a positive integer, default is 2.
Returns
-------
b : ndarray
`a`'s tensordot inverse, shape ``a.shape[ind:] + a.shape[:ind]``.
Raises
------
LinAlgError
If `a` is singular or not 'square' (in the above sense).
See Also
--------
numpy.tensordot, tensorsolve
Examples
--------
>>> a = np.eye(4*6)
>>> a.shape = (4, 6, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=2)
>>> ainv.shape
(8, 3, 4, 6)
>>> b = np.random.randn(4, 6)
>>> np.allclose(np.tensordot(ainv, b), np.linalg.tensorsolve(a, b))
True
>>> a = np.eye(4*6)
>>> a.shape = (24, 8, 3)
>>> ainv = np.linalg.tensorinv(a, ind=1)
>>> ainv.shape
(8, 3, 24)
>>> b = np.random.randn(24)
>>> np.allclose(np.tensordot(ainv, b, 1), np.linalg.tensorsolve(a, b))
True
"""
a = asarray(a)
oldshape = a.shape
prod = 1
if ind > 0:
invshape = oldshape[ind:] + oldshape[:ind]
for k in oldshape[ind:]:
prod *= k
else:
raise ValueError("Invalid ind argument.")
a = a.reshape(prod, -1)
ia = inv(a)
return ia.reshape(*invshape)
# Matrix inversion
def inv(a):
"""
Compute the (multiplicative) inverse of a matrix.
Given a square matrix `a`, return the matrix `ainv` satisfying
``dot(a, ainv) = dot(ainv, a) = eye(a.shape[0])``.
Parameters
----------
a : (..., M, M) array_like
Matrix to be inverted.
Returns
-------
ainv : (..., M, M) ndarray or matrix
(Multiplicative) inverse of the matrix `a`.
Raises
------
LinAlgError
If `a` is not square or inversion fails.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
Examples
--------
>>> from numpy.linalg import inv
>>> a = np.array([[1., 2.], [3., 4.]])
>>> ainv = inv(a)
>>> np.allclose(np.dot(a, ainv), np.eye(2))
True
>>> np.allclose(np.dot(ainv, a), np.eye(2))
True
If a is a matrix object, then the return value is a matrix as well:
>>> ainv = inv(np.matrix(a))
>>> ainv
matrix([[-2. , 1. ],
[ 1.5, -0.5]])
Inverses of several matrices can be computed at once:
>>> a = np.array([[[1., 2.], [3., 4.]], [[1, 3], [3, 5]]])
>>> inv(a)
array([[[-2. , 1. ],
[ 1.5, -0.5]],
[[-5. , 2. ],
[ 3. , -1. ]]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if a.shape[-1] == 0:
# The inner array is 0x0, the ufunc cannot handle this case
return wrap(empty_like(a, dtype=result_t))
signature = 'D->D' if isComplexType(t) else 'd->d'
extobj = get_linalg_error_extobj(_raise_linalgerror_singular)
ainv = _umath_linalg.inv(a, signature=signature, extobj=extobj)
return wrap(ainv.astype(result_t, copy=False))
# Cholesky decomposition
def cholesky(a):
"""
Cholesky decomposition.
Return the Cholesky decomposition, `L * L.H`, of the square matrix `a`,
where `L` is lower-triangular and .H is the conjugate transpose operator
(which is the ordinary transpose if `a` is real-valued). `a` must be
Hermitian (symmetric if real-valued) and positive-definite. Only `L` is
actually returned.
Parameters
----------
a : (..., M, M) array_like
Hermitian (symmetric if all elements are real), positive-definite
input matrix.
Returns
-------
L : (..., M, M) array_like
Upper or lower-triangular Cholesky factor of `a`. Returns a
matrix object if `a` is a matrix object.
Raises
------
LinAlgError
If the decomposition fails, for example, if `a` is not
positive-definite.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The Cholesky decomposition is often used as a fast way of solving
.. math:: A \\mathbf{x} = \\mathbf{b}
(when `A` is both Hermitian/symmetric and positive-definite).
First, we solve for :math:`\\mathbf{y}` in
.. math:: L \\mathbf{y} = \\mathbf{b},
and then for :math:`\\mathbf{x}` in
.. math:: L.H \\mathbf{x} = \\mathbf{y}.
Examples
--------
>>> A = np.array([[1,-2j],[2j,5]])
>>> A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> L = np.linalg.cholesky(A)
>>> L
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> np.dot(L, L.T.conj()) # verify that L * L.H = A
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> A = [[1,-2j],[2j,5]] # what happens if A is only array_like?
>>> np.linalg.cholesky(A) # an ndarray object is returned
array([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
>>> # But a matrix object is returned if A is a matrix object
>>> LA.cholesky(np.matrix(A))
matrix([[ 1.+0.j, 0.+0.j],
[ 0.+2.j, 1.+0.j]])
"""
extobj = get_linalg_error_extobj(_raise_linalgerror_nonposdef)
gufunc = _umath_linalg.cholesky_lo
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = gufunc(a, signature=signature, extobj=extobj)
return wrap(r.astype(result_t, copy=False))
# QR decompostion
def qr(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular.
Parameters
----------
a : array_like, shape (M, N)
Matrix to be factored.
mode : {'reduced', 'complete', 'r', 'raw', 'full', 'economic'}, optional
If K = min(M, N), then
'reduced' : returns q, r with dimensions (M, K), (K, N) (default)
'complete' : returns q, r with dimensions (M, M), (M, N)
'r' : returns r only with dimensions (K, N)
'raw' : returns h, tau with dimensions (N, M), (K,)
'full' : alias of 'reduced', deprecated
'economic' : returns h from 'raw', deprecated.
The options 'reduced', 'complete, and 'raw' are new in numpy 1.8,
see the notes for more information. The default is 'reduced' and to
maintain backward compatibility with earlier versions of numpy both
it and the old default 'full' can be omitted. Note that array h
returned in 'raw' mode is transposed for calling Fortran. The
'economic' mode is deprecated. The modes 'full' and 'economic' may
be passed using only the first letter for backwards compatibility,
but all others must be spelled out. See the Notes for more
explanation.
Returns
-------
q : ndarray of float or complex, optional
A matrix with orthonormal columns. When mode = 'complete' the
result is an orthogonal/unitary matrix depending on whether or not
a is real/complex. The determinant may be either +/- 1 in that
case.
r : ndarray of float or complex, optional
The upper-triangular matrix.
(h, tau) : ndarrays of np.double or np.cdouble, optional
The array h contains the Householder reflectors that generate q
along with r. The tau array contains scaling factors for the
reflectors. In the deprecated 'economic' mode only h is returned.
Raises
------
LinAlgError
If factoring fails.
Notes
-----
This is an interface to the LAPACK routines dgeqrf, zgeqrf,
dorgqr, and zungqr.
For more information on the qr factorization, see for example:
http://en.wikipedia.org/wiki/QR_factorization
Subclasses of `ndarray` are preserved except for the 'raw' mode. So if
`a` is of type `matrix`, all the return values will be matrices too.
New 'reduced', 'complete', and 'raw' options for mode were added in
NumPy 1.8.0 and the old option 'full' was made an alias of 'reduced'. In
addition the options 'full' and 'economic' were deprecated. Because
'full' was the previous default and 'reduced' is the new default,
backward compatibility can be maintained by letting `mode` default.
The 'raw' option was added so that LAPACK routines that can multiply
arrays by q using the Householder reflectors can be used. Note that in
this case the returned arrays are of type np.double or np.cdouble and
the h array is transposed to be FORTRAN compatible. No routines using
the 'raw' return are currently exposed by numpy, but some are available
in lapack_lite and just await the necessary work.
Examples
--------
>>> a = np.random.randn(9, 6)
>>> q, r = np.linalg.qr(a)
>>> np.allclose(a, np.dot(q, r)) # a does equal qr
True
>>> r2 = np.linalg.qr(a, mode='r')
>>> r3 = np.linalg.qr(a, mode='economic')
>>> np.allclose(r, r2) # mode='r' returns the same r as mode='full'
True
>>> # But only triu parts are guaranteed equal when mode='economic'
>>> np.allclose(r, np.triu(r3[:6,:6], k=0))
True
Example illustrating a common use of `qr`: solving of least squares
problems
What are the least-squares-best `m` and `y0` in ``y = y0 + mx`` for
the following data: {(0,1), (1,0), (1,2), (2,1)}. (Graph the points
and you'll see that it should be y0 = 0, m = 1.) The answer is provided
by solving the over-determined matrix equation ``Ax = b``, where::
A = array([[0, 1], [1, 1], [1, 1], [2, 1]])
x = array([[y0], [m]])
b = array([[1], [0], [2], [1]])
If A = qr such that q is orthonormal (which is always possible via
Gram-Schmidt), then ``x = inv(r) * (q.T) * b``. (In numpy practice,
however, we simply use `lstsq`.)
>>> A = np.array([[0, 1], [1, 1], [1, 1], [2, 1]])
>>> A
array([[0, 1],
[1, 1],
[1, 1],
[2, 1]])
>>> b = np.array([1, 0, 2, 1])
>>> q, r = LA.qr(A)
>>> p = np.dot(q.T, b)
>>> np.dot(LA.inv(r), p)
array([ 1.1e-16, 1.0e+00])
"""
if mode not in ('reduced', 'complete', 'r', 'raw'):
if mode in ('f', 'full'):
# 2013-04-01, 1.8
msg = "".join((
"The 'full' option is deprecated in favor of 'reduced'.\n",
"For backward compatibility let mode default."))
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'reduced'
elif mode in ('e', 'economic'):
# 2013-04-01, 1.8
msg = "The 'economic' option is deprecated."
warnings.warn(msg, DeprecationWarning, stacklevel=2)
mode = 'economic'
else:
raise ValueError("Unrecognized mode '%s'" % mode)
a, wrap = _makearray(a)
_assertRank2(a)
_assertNoEmpty2d(a)
m, n = a.shape
t, result_t = _commonType(a)
a = _fastCopyAndTranspose(t, a)
a = _to_native_byte_order(a)
mn = min(m, n)
tau = zeros((mn,), t)
if isComplexType(t):
lapack_routine = lapack_lite.zgeqrf
routine_name = 'zgeqrf'
else:
lapack_routine = lapack_lite.dgeqrf
routine_name = 'dgeqrf'
# calculate optimal size of work data 'work'
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# do qr decomposition
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, n, a, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# handle modes that don't return q
if mode == 'r':
r = _fastCopyAndTranspose(result_t, a[:, :mn])
return wrap(triu(r))
if mode == 'raw':
return a, tau
if mode == 'economic':
if t != result_t :
a = a.astype(result_t, copy=False)
return wrap(a.T)
# generate q from a
if mode == 'complete' and m > n:
mc = m
q = empty((m, m), t)
else:
mc = mn
q = empty((n, m), t)
q[:n] = a
if isComplexType(t):
lapack_routine = lapack_lite.zungqr
routine_name = 'zungqr'
else:
lapack_routine = lapack_lite.dorgqr
routine_name = 'dorgqr'
# determine optimal lwork
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, -1, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
# compute q
lwork = int(abs(work[0]))
work = zeros((lwork,), t)
results = lapack_routine(m, mc, mn, q, m, tau, work, lwork, 0)
if results['info'] != 0:
raise LinAlgError('%s returns %d' % (routine_name, results['info']))
q = _fastCopyAndTranspose(result_t, q[:mc])
r = _fastCopyAndTranspose(result_t, a[:, :mc])
return wrap(q), wrap(triu(r))
# Eigenvalues
def eigvals(a):
"""
Compute the eigenvalues of a general matrix.
Main difference between `eigvals` and `eig`: the eigenvectors aren't
returned.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues will be computed.
Returns
-------
w : (..., M,) ndarray
The eigenvalues, each repeated according to its multiplicity.
They are not necessarily ordered, nor are they necessarily
real for real matrices.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eig : eigenvalues and right eigenvectors of general arrays
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
Examples
--------
Illustration, using the fact that the eigenvalues of a diagonal matrix
are its diagonal elements, that multiplying a matrix on the left
by an orthogonal matrix, `Q`, and on the right by `Q.T` (the transpose
of `Q`), preserves the eigenvalues of the "middle" matrix. In other words,
if `Q` is orthogonal, then ``Q * A * Q.T`` has the same eigenvalues as
``A``:
>>> from numpy import linalg as LA
>>> x = np.random.random()
>>> Q = np.array([[np.cos(x), -np.sin(x)], [np.sin(x), np.cos(x)]])
>>> LA.norm(Q[0, :]), LA.norm(Q[1, :]), np.dot(Q[0, :],Q[1, :])
(1.0, 1.0, 0.0)
Now multiply a diagonal matrix by Q on one side and by Q.T on the other:
>>> D = np.diag((-1,1))
>>> LA.eigvals(D)
array([-1., 1.])
>>> A = np.dot(Q, D)
>>> A = np.dot(A, Q.T)
>>> LA.eigvals(A)
array([ 1., -1.])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
return empty(a.shape[-1:], dtype=result_t)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->D' if isComplexType(t) else 'd->D'
w = _umath_linalg.eigvals(a, signature=signature, extobj=extobj)
if not isComplexType(t):
if all(w.imag == 0):
w = w.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
return w.astype(result_t, copy=False)
def eigvalsh(a, UPLO='L'):
"""
Compute the eigenvalues of a Hermitian or real symmetric matrix.
Main difference from eigh: the eigenvectors are not computed.
Parameters
----------
a : (..., M, M) array_like
A complex- or real-valued matrix whose eigenvalues are to be
computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M,) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigh : eigenvalues and eigenvectors of symmetric/Hermitian arrays.
eigvals : eigenvalues of general real or complex arrays.
eig : eigenvalues and right eigenvectors of general real or complex
arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues are computed using LAPACK routines _syevd, _heevd
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> LA.eigvalsh(a)
array([ 0.17157288, 5.82842712])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eigvals()
>>> # with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa = LA.eigvalsh(a)
>>> wb = LA.eigvals(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigvalsh_lo
else:
gufunc = _umath_linalg.eigvalsh_up
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
return empty(a.shape[-1:], dtype=result_t)
signature = 'D->d' if isComplexType(t) else 'd->d'
w = gufunc(a, signature=signature, extobj=extobj)
return w.astype(_realType(result_t), copy=False)
def _convertarray(a):
t, result_t = _commonType(a)
a = _fastCT(a.astype(t))
return a, t, result_t
# Eigenvectors
def eig(a):
"""
Compute the eigenvalues and right eigenvectors of a square array.
Parameters
----------
a : (..., M, M) array
Matrices for which the eigenvalues and right eigenvectors will
be computed
Returns
-------
w : (..., M) array
The eigenvalues, each repeated according to its multiplicity.
The eigenvalues are not necessarily ordered. The resulting
array will be of complex type, unless the imaginary part is
zero in which case it will be cast to a real type. When `a`
is real the resulting eigenvalues will be real (0 imaginary
part) or occur in conjugate pairs
v : (..., M, M) array
The normalized (unit "length") eigenvectors, such that the
column ``v[:,i]`` is the eigenvector corresponding to the
eigenvalue ``w[i]``.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvals : eigenvalues of a non-symmetric array.
eigh : eigenvalues and eigenvectors of a symmetric or Hermitian
(conjugate symmetric) array.
eigvalsh : eigenvalues of a symmetric or Hermitian (conjugate symmetric)
array.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
This is implemented using the _geev LAPACK routines which compute
the eigenvalues and eigenvectors of general square arrays.
The number `w` is an eigenvalue of `a` if there exists a vector
`v` such that ``dot(a,v) = w * v``. Thus, the arrays `a`, `w`, and
`v` satisfy the equations ``dot(a[:,:], v[:,i]) = w[i] * v[:,i]``
for :math:`i \\in \\{0,...,M-1\\}`.
The array `v` of eigenvectors may not be of maximum rank, that is, some
of the columns may be linearly dependent, although round-off error may
obscure that fact. If the eigenvalues are all different, then theoretically
the eigenvectors are linearly independent. Likewise, the (complex-valued)
matrix of eigenvectors `v` is unitary if the matrix `a` is normal, i.e.,
if ``dot(a, a.H) = dot(a.H, a)``, where `a.H` denotes the conjugate
transpose of `a`.
Finally, it is emphasized that `v` consists of the *right* (as in
right-hand side) eigenvectors of `a`. A vector `y` satisfying
``dot(y.T, a) = z * y.T`` for some number `z` is called a *left*
eigenvector of `a`, and, in general, the left and right eigenvectors
of a matrix are not necessarily the (perhaps conjugate) transposes
of each other.
References
----------
G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando, FL,
Academic Press, Inc., 1980, Various pp.
Examples
--------
>>> from numpy import linalg as LA
(Almost) trivial example with real e-values and e-vectors.
>>> w, v = LA.eig(np.diag((1, 2, 3)))
>>> w; v
array([ 1., 2., 3.])
array([[ 1., 0., 0.],
[ 0., 1., 0.],
[ 0., 0., 1.]])
Real matrix possessing complex e-values and e-vectors; note that the
e-values are complex conjugates of each other.
>>> w, v = LA.eig(np.array([[1, -1], [1, 1]]))
>>> w; v
array([ 1. + 1.j, 1. - 1.j])
array([[ 0.70710678+0.j , 0.70710678+0.j ],
[ 0.00000000-0.70710678j, 0.00000000+0.70710678j]])
Complex-valued matrix with real e-values (but complex-valued e-vectors);
note that a.conj().T = a, i.e., a is Hermitian.
>>> a = np.array([[1, 1j], [-1j, 1]])
>>> w, v = LA.eig(a)
>>> w; v
array([ 2.00000000e+00+0.j, 5.98651912e-36+0.j]) # i.e., {2, 0}
array([[ 0.00000000+0.70710678j, 0.70710678+0.j ],
[ 0.70710678+0.j , 0.00000000+0.70710678j]])
Be careful about round-off error!
>>> a = np.array([[1 + 1e-9, 0], [0, 1 - 1e-9]])
>>> # Theor. e-values are 1 +/- 1e-9
>>> w, v = LA.eig(a)
>>> w; v
array([ 1., 1.])
array([[ 1., 0.],
[ 0., 1.]])
"""
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
_assertFinite(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
w = empty(a.shape[-1:], dtype=result_t)
vt = empty(a.shape, dtype=result_t)
return w, wrap(vt)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
signature = 'D->DD' if isComplexType(t) else 'd->DD'
w, vt = _umath_linalg.eig(a, signature=signature, extobj=extobj)
if not isComplexType(t) and all(w.imag == 0.0):
w = w.real
vt = vt.real
result_t = _realType(result_t)
else:
result_t = _complexType(result_t)
vt = vt.astype(result_t, copy=False)
return w.astype(result_t, copy=False), wrap(vt)
def eigh(a, UPLO='L'):
"""
Return the eigenvalues and eigenvectors of a Hermitian or symmetric matrix.
Returns two objects, a 1-D array containing the eigenvalues of `a`, and
a 2-D square array or matrix (depending on the input type) of the
corresponding eigenvectors (in columns).
Parameters
----------
a : (..., M, M) array
Hermitian/Symmetric matrices whose eigenvalues and
eigenvectors are to be computed.
UPLO : {'L', 'U'}, optional
Specifies whether the calculation is done with the lower triangular
part of `a` ('L', default) or the upper triangular part ('U').
Irrespective of this value only the real parts of the diagonal will
be considered in the computation to preserve the notion of a Hermitian
matrix. It therefore follows that the imaginary part of the diagonal
will always be treated as zero.
Returns
-------
w : (..., M) ndarray
The eigenvalues in ascending order, each repeated according to
its multiplicity.
v : {(..., M, M) ndarray, (..., M, M) matrix}
The column ``v[:, i]`` is the normalized eigenvector corresponding
to the eigenvalue ``w[i]``. Will return a matrix object if `a` is
a matrix object.
Raises
------
LinAlgError
If the eigenvalue computation does not converge.
See Also
--------
eigvalsh : eigenvalues of symmetric or Hermitian arrays.
eig : eigenvalues and right eigenvectors for non-symmetric arrays.
eigvals : eigenvalues of non-symmetric arrays.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The eigenvalues/eigenvectors are computed using LAPACK routines _syevd,
_heevd
The eigenvalues of real symmetric or complex Hermitian matrices are
always real. [1]_ The array `v` of (column) eigenvectors is unitary
and `a`, `w`, and `v` satisfy the equations
``dot(a, v[:, i]) = w[i] * v[:, i]``.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pg. 222.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, -2j], [2j, 5]])
>>> a
array([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(a)
>>> w; v
array([ 0.17157288, 5.82842712])
array([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> np.dot(a, v[:, 0]) - w[0] * v[:, 0] # verify 1st e-val/vec pair
array([2.77555756e-17 + 0.j, 0. + 1.38777878e-16j])
>>> np.dot(a, v[:, 1]) - w[1] * v[:, 1] # verify 2nd e-val/vec pair
array([ 0.+0.j, 0.+0.j])
>>> A = np.matrix(a) # what happens if input is a matrix object
>>> A
matrix([[ 1.+0.j, 0.-2.j],
[ 0.+2.j, 5.+0.j]])
>>> w, v = LA.eigh(A)
>>> w; v
array([ 0.17157288, 5.82842712])
matrix([[-0.92387953+0.j , -0.38268343+0.j ],
[ 0.00000000+0.38268343j, 0.00000000-0.92387953j]])
>>> # demonstrate the treatment of the imaginary part of the diagonal
>>> a = np.array([[5+2j, 9-2j], [0+2j, 2-1j]])
>>> a
array([[ 5.+2.j, 9.-2.j],
[ 0.+2.j, 2.-1.j]])
>>> # with UPLO='L' this is numerically equivalent to using LA.eig() with:
>>> b = np.array([[5.+0.j, 0.-2.j], [0.+2.j, 2.-0.j]])
>>> b
array([[ 5.+0.j, 0.-2.j],
[ 0.+2.j, 2.+0.j]])
>>> wa, va = LA.eigh(a)
>>> wb, vb = LA.eig(b)
>>> wa; wb
array([ 1., 6.])
array([ 6.+0.j, 1.+0.j])
>>> va; vb
array([[-0.44721360-0.j , -0.89442719+0.j ],
[ 0.00000000+0.89442719j, 0.00000000-0.4472136j ]])
array([[ 0.89442719+0.j , 0.00000000-0.4472136j],
[ 0.00000000-0.4472136j, 0.89442719+0.j ]])
"""
UPLO = UPLO.upper()
if UPLO not in ('L', 'U'):
raise ValueError("UPLO argument must be 'L' or 'U'")
a, wrap = _makearray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
if _isEmpty2d(a):
w = empty(a.shape[-1:], dtype=result_t)
vt = empty(a.shape, dtype=result_t)
return w, wrap(vt)
extobj = get_linalg_error_extobj(
_raise_linalgerror_eigenvalues_nonconvergence)
if UPLO == 'L':
gufunc = _umath_linalg.eigh_lo
else:
gufunc = _umath_linalg.eigh_up
signature = 'D->dD' if isComplexType(t) else 'd->dd'
w, vt = gufunc(a, signature=signature, extobj=extobj)
w = w.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return w, wrap(vt)
# Singular value decomposition
def svd(a, full_matrices=1, compute_uv=1):
"""
Singular Value Decomposition.
Factors the matrix `a` as ``u * np.diag(s) * v``, where `u` and `v`
are unitary and `s` is a 1-d array of `a`'s singular values.
Parameters
----------
a : (..., M, N) array_like
A real or complex matrix of shape (`M`, `N`) .
full_matrices : bool, optional
If True (default), `u` and `v` have the shapes (`M`, `M`) and
(`N`, `N`), respectively. Otherwise, the shapes are (`M`, `K`)
and (`K`, `N`), respectively, where `K` = min(`M`, `N`).
compute_uv : bool, optional
Whether or not to compute `u` and `v` in addition to `s`. True
by default.
Returns
-------
u : { (..., M, M), (..., M, K) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
s : (..., K) array
The singular values for every matrix, sorted in descending order.
v : { (..., N, N), (..., K, N) } array
Unitary matrices. The actual shape depends on the value of
``full_matrices``. Only returned when ``compute_uv`` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The decomposition is performed using LAPACK routine _gesdd
The SVD is commonly written as ``a = U S V.H``. The `v` returned
by this function is ``V.H`` and ``u = U``.
If ``U`` is a unitary matrix, it means that it
satisfies ``U.H = inv(U)``.
The rows of `v` are the eigenvectors of ``a.H a``. The columns
of `u` are the eigenvectors of ``a a.H``. For row ``i`` in
`v` and column ``i`` in `u`, the corresponding eigenvalue is
``s[i]**2``.
If `a` is a `matrix` object (as opposed to an `ndarray`), then so
are all the return values.
Examples
--------
>>> a = np.random.randn(9, 6) + 1j*np.random.randn(9, 6)
Reconstruction based on full SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=True)
>>> U.shape, V.shape, s.shape
((9, 9), (6, 6), (6,))
>>> S = np.zeros((9, 6), dtype=complex)
>>> S[:6, :6] = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
Reconstruction based on reduced SVD:
>>> U, s, V = np.linalg.svd(a, full_matrices=False)
>>> U.shape, V.shape, s.shape
((9, 6), (6, 6), (6,))
>>> S = np.diag(s)
>>> np.allclose(a, np.dot(U, np.dot(S, V)))
True
"""
a, wrap = _makearray(a)
_assertNoEmpty2d(a)
_assertRankAtLeast2(a)
t, result_t = _commonType(a)
extobj = get_linalg_error_extobj(_raise_linalgerror_svd_nonconvergence)
m = a.shape[-2]
n = a.shape[-1]
if compute_uv:
if full_matrices:
if m < n:
gufunc = _umath_linalg.svd_m_f
else:
gufunc = _umath_linalg.svd_n_f
else:
if m < n:
gufunc = _umath_linalg.svd_m_s
else:
gufunc = _umath_linalg.svd_n_s
signature = 'D->DdD' if isComplexType(t) else 'd->ddd'
u, s, vt = gufunc(a, signature=signature, extobj=extobj)
u = u.astype(result_t, copy=False)
s = s.astype(_realType(result_t), copy=False)
vt = vt.astype(result_t, copy=False)
return wrap(u), s, wrap(vt)
else:
if m < n:
gufunc = _umath_linalg.svd_m
else:
gufunc = _umath_linalg.svd_n
signature = 'D->d' if isComplexType(t) else 'd->d'
s = gufunc(a, signature=signature, extobj=extobj)
s = s.astype(_realType(result_t), copy=False)
return s
def cond(x, p=None):
"""
Compute the condition number of a matrix.
This function is capable of returning the condition number using
one of seven different norms, depending on the value of `p` (see
Parameters below).
Parameters
----------
x : (..., M, N) array_like
The matrix whose condition number is sought.
p : {None, 1, -1, 2, -2, inf, -inf, 'fro'}, optional
Order of the norm:
===== ============================
p norm for matrices
===== ============================
None 2-norm, computed directly using the ``SVD``
'fro' Frobenius norm
inf max(sum(abs(x), axis=1))
-inf min(sum(abs(x), axis=1))
1 max(sum(abs(x), axis=0))
-1 min(sum(abs(x), axis=0))
2 2-norm (largest sing. value)
-2 smallest singular value
===== ============================
inf means the numpy.inf object, and the Frobenius norm is
the root-of-sum-of-squares norm.
Returns
-------
c : {float, inf}
The condition number of the matrix. May be infinite.
See Also
--------
numpy.linalg.norm
Notes
-----
The condition number of `x` is defined as the norm of `x` times the
norm of the inverse of `x` [1]_; the norm can be the usual L2-norm
(root-of-sum-of-squares) or one of a number of other matrix norms.
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, Orlando, FL,
Academic Press, Inc., 1980, pg. 285.
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.array([[1, 0, -1], [0, 1, 0], [1, 0, 1]])
>>> a
array([[ 1, 0, -1],
[ 0, 1, 0],
[ 1, 0, 1]])
>>> LA.cond(a)
1.4142135623730951
>>> LA.cond(a, 'fro')
3.1622776601683795
>>> LA.cond(a, np.inf)
2.0
>>> LA.cond(a, -np.inf)
1.0
>>> LA.cond(a, 1)
2.0
>>> LA.cond(a, -1)
1.0
>>> LA.cond(a, 2)
1.4142135623730951
>>> LA.cond(a, -2)
0.70710678118654746
>>> min(LA.svd(a, compute_uv=0))*min(LA.svd(LA.inv(a), compute_uv=0))
0.70710678118654746
"""
x = asarray(x) # in case we have a matrix
if p is None:
s = svd(x, compute_uv=False)
return s[..., 0]/s[..., -1]
else:
return norm(x, p, axis=(-2, -1)) * norm(inv(x), p, axis=(-2, -1))
def matrix_rank(M, tol=None):
"""
Return matrix rank of array using SVD method
Rank of the array is the number of SVD singular values of the array that are
greater than `tol`.
Parameters
----------
M : {(M,), (M, N)} array_like
array of <=2 dimensions
tol : {None, float}, optional
threshold below which SVD values are considered zero. If `tol` is
None, and ``S`` is an array with singular values for `M`, and
``eps`` is the epsilon value for datatype of ``S``, then `tol` is
set to ``S.max() * max(M.shape) * eps``.
Notes
-----
The default threshold to detect rank deficiency is a test on the magnitude
of the singular values of `M`. By default, we identify singular values less
than ``S.max() * max(M.shape) * eps`` as indicating rank deficiency (with
the symbols defined above). This is the algorithm MATLAB uses [1]. It also
appears in *Numerical recipes* in the discussion of SVD solutions for linear
least squares [2].
This default threshold is designed to detect rank deficiency accounting for
the numerical errors of the SVD computation. Imagine that there is a column
in `M` that is an exact (in floating point) linear combination of other
columns in `M`. Computing the SVD on `M` will not produce a singular value
exactly equal to 0 in general: any difference of the smallest SVD value from
0 will be caused by numerical imprecision in the calculation of the SVD.
Our threshold for small SVD values takes this numerical imprecision into
account, and the default threshold will detect such numerical rank
deficiency. The threshold may declare a matrix `M` rank deficient even if
the linear combination of some columns of `M` is not exactly equal to
another column of `M` but only numerically very close to another column of
`M`.
We chose our default threshold because it is in wide use. Other thresholds
are possible. For example, elsewhere in the 2007 edition of *Numerical
recipes* there is an alternative threshold of ``S.max() *
np.finfo(M.dtype).eps / 2. * np.sqrt(m + n + 1.)``. The authors describe
this threshold as being based on "expected roundoff error" (p 71).
The thresholds above deal with floating point roundoff error in the
calculation of the SVD. However, you may have more information about the
sources of error in `M` that would make you consider other tolerance values
to detect *effective* rank deficiency. The most useful measure of the
tolerance depends on the operations you intend to use on your matrix. For
example, if your data come from uncertain measurements with uncertainties
greater than floating point epsilon, choosing a tolerance near that
uncertainty may be preferable. The tolerance may be absolute if the
uncertainties are absolute rather than relative.
References
----------
.. [1] MATLAB reference documention, "Rank"
http://www.mathworks.com/help/techdoc/ref/rank.html
.. [2] W. H. Press, S. A. Teukolsky, W. T. Vetterling and B. P. Flannery,
"Numerical Recipes (3rd edition)", Cambridge University Press, 2007,
page 795.
Examples
--------
>>> from numpy.linalg import matrix_rank
>>> matrix_rank(np.eye(4)) # Full rank matrix
4
>>> I=np.eye(4); I[-1,-1] = 0. # rank deficient matrix
>>> matrix_rank(I)
3
>>> matrix_rank(np.ones((4,))) # 1 dimension - rank 1 unless all 0
1
>>> matrix_rank(np.zeros((4,)))
0
"""
M = asarray(M)
if M.ndim > 2:
raise TypeError('array should have 2 or fewer dimensions')
if M.ndim < 2:
return int(not all(M==0))
S = svd(M, compute_uv=False)
if tol is None:
tol = S.max() * max(M.shape) * finfo(S.dtype).eps
return sum(S > tol)
# Generalized inverse
def pinv(a, rcond=1e-15 ):
"""
Compute the (Moore-Penrose) pseudo-inverse of a matrix.
Calculate the generalized inverse of a matrix using its
singular-value decomposition (SVD) and including all
*large* singular values.
Parameters
----------
a : (M, N) array_like
Matrix to be pseudo-inverted.
rcond : float
Cutoff for small singular values.
Singular values smaller (in modulus) than
`rcond` * largest_singular_value (again, in modulus)
are set to zero.
Returns
-------
B : (N, M) ndarray
The pseudo-inverse of `a`. If `a` is a `matrix` instance, then so
is `B`.
Raises
------
LinAlgError
If the SVD computation does not converge.
Notes
-----
The pseudo-inverse of a matrix A, denoted :math:`A^+`, is
defined as: "the matrix that 'solves' [the least-squares problem]
:math:`Ax = b`," i.e., if :math:`\\bar{x}` is said solution, then
:math:`A^+` is that matrix such that :math:`\\bar{x} = A^+b`.
It can be shown that if :math:`Q_1 \\Sigma Q_2^T = A` is the singular
value decomposition of A, then
:math:`A^+ = Q_2 \\Sigma^+ Q_1^T`, where :math:`Q_{1,2}` are
orthogonal matrices, :math:`\\Sigma` is a diagonal matrix consisting
of A's so-called singular values, (followed, typically, by
zeros), and then :math:`\\Sigma^+` is simply the diagonal matrix
consisting of the reciprocals of A's singular values
(again, followed by zeros). [1]_
References
----------
.. [1] G. Strang, *Linear Algebra and Its Applications*, 2nd Ed., Orlando,
FL, Academic Press, Inc., 1980, pp. 139-142.
Examples
--------
The following example checks that ``a * a+ * a == a`` and
``a+ * a * a+ == a+``:
>>> a = np.random.randn(9, 6)
>>> B = np.linalg.pinv(a)
>>> np.allclose(a, np.dot(a, np.dot(B, a)))
True
>>> np.allclose(B, np.dot(B, np.dot(a, B)))
True
"""
a, wrap = _makearray(a)
if _isEmpty2d(a):
res = empty(a.shape[:-2] + (a.shape[-1], a.shape[-2]), dtype=a.dtype)
return wrap(res)
a = a.conjugate()
u, s, vt = svd(a, 0)
m = u.shape[0]
n = vt.shape[1]
cutoff = rcond*maximum.reduce(s)
for i in range(min(n, m)):
if s[i] > cutoff:
s[i] = 1./s[i]
else:
s[i] = 0.
res = dot(transpose(vt), multiply(s[:, newaxis], transpose(u)))
return wrap(res)
# Determinant
def slogdet(a):
"""
Compute the sign and (natural) logarithm of the determinant of an array.
If an array has a very small or very large determinant, then a call to
`det` may overflow or underflow. This routine is more robust against such
issues, because it computes the logarithm of the determinant rather than
the determinant itself.
Parameters
----------
a : (..., M, M) array_like
Input array, has to be a square 2-D array.
Returns
-------
sign : (...) array_like
A number representing the sign of the determinant. For a real matrix,
this is 1, 0, or -1. For a complex matrix, this is a complex number
with absolute value 1 (i.e., it is on the unit circle), or else 0.
logdet : (...) array_like
The natural log of the absolute value of the determinant.
If the determinant is zero, then `sign` will be 0 and `logdet` will be
-Inf. In all cases, the determinant is equal to ``sign * np.exp(logdet)``.
See Also
--------
det
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
.. versionadded:: 1.6.0
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array ``[[a, b], [c, d]]`` is ``ad - bc``:
>>> a = np.array([[1, 2], [3, 4]])
>>> (sign, logdet) = np.linalg.slogdet(a)
>>> (sign, logdet)
(-1, 0.69314718055994529)
>>> sign * np.exp(logdet)
-2.0
Computing log-determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> sign, logdet = np.linalg.slogdet(a)
>>> (sign, logdet)
(array([-1., -1., -1.]), array([ 0.69314718, 1.09861229, 2.07944154]))
>>> sign * np.exp(logdet)
array([-2., -3., -8.])
This routine succeeds where ordinary `det` does not:
>>> np.linalg.det(np.eye(500) * 0.1)
0.0
>>> np.linalg.slogdet(np.eye(500) * 0.1)
(1, -1151.2925464970228)
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
real_t = _realType(result_t)
if _isEmpty2d(a):
# determinant of empty matrix is 1
sign = ones(a.shape[:-2], dtype=result_t)
logdet = zeros(a.shape[:-2], dtype=real_t)
return sign, logdet
signature = 'D->Dd' if isComplexType(t) else 'd->dd'
sign, logdet = _umath_linalg.slogdet(a, signature=signature)
if isscalar(sign):
sign = sign.astype(result_t)
else:
sign = sign.astype(result_t, copy=False)
if isscalar(logdet):
logdet = logdet.astype(real_t)
else:
logdet = logdet.astype(real_t, copy=False)
return sign, logdet
def det(a):
"""
Compute the determinant of an array.
Parameters
----------
a : (..., M, M) array_like
Input array to compute determinants for.
Returns
-------
det : (...) array_like
Determinant of `a`.
See Also
--------
slogdet : Another way to representing the determinant, more suitable
for large matrices where underflow/overflow may occur.
Notes
-----
.. versionadded:: 1.8.0
Broadcasting rules apply, see the `numpy.linalg` documentation for
details.
The determinant is computed via LU factorization using the LAPACK
routine z/dgetrf.
Examples
--------
The determinant of a 2-D array [[a, b], [c, d]] is ad - bc:
>>> a = np.array([[1, 2], [3, 4]])
>>> np.linalg.det(a)
-2.0
Computing determinants for a stack of matrices:
>>> a = np.array([ [[1, 2], [3, 4]], [[1, 2], [2, 1]], [[1, 3], [3, 1]] ])
>>> a.shape
(3, 2, 2)
>>> np.linalg.det(a)
array([-2., -3., -8.])
"""
a = asarray(a)
_assertRankAtLeast2(a)
_assertNdSquareness(a)
t, result_t = _commonType(a)
# 0x0 matrices have determinant 1
if _isEmpty2d(a):
return ones(a.shape[:-2], dtype=result_t)
signature = 'D->D' if isComplexType(t) else 'd->d'
r = _umath_linalg.det(a, signature=signature)
if isscalar(r):
r = r.astype(result_t)
else:
r = r.astype(result_t, copy=False)
return r
# Linear Least Squares
def lstsq(a, b, rcond=-1):
"""
Return the least-squares solution to a linear matrix equation.
Solves the equation `a x = b` by computing a vector `x` that
minimizes the Euclidean 2-norm `|| b - a x ||^2`. The equation may
be under-, well-, or over- determined (i.e., the number of
linearly independent rows of `a` can be less than, equal to, or
greater than its number of linearly independent columns). If `a`
is square and of full rank, then `x` (but for round-off error) is
the "exact" solution of the equation.
Parameters
----------
a : (M, N) array_like
"Coefficient" matrix.
b : {(M,), (M, K)} array_like
Ordinate or "dependent variable" values. If `b` is two-dimensional,
the least-squares solution is calculated for each of the `K` columns
of `b`.
rcond : float, optional
Cut-off ratio for small singular values of `a`.
For the purposes of rank determination, singular values are treated
as zero if they are smaller than `rcond` times the largest singular
value of `a`.
Returns
-------
x : {(N,), (N, K)} ndarray
Least-squares solution. If `b` is two-dimensional,
the solutions are in the `K` columns of `x`.
residuals : {(), (1,), (K,)} ndarray
Sums of residuals; squared Euclidean 2-norm for each column in
``b - a*x``.
If the rank of `a` is < N or M <= N, this is an empty array.
If `b` is 1-dimensional, this is a (1,) shape array.
Otherwise the shape is (K,).
rank : int
Rank of matrix `a`.
s : (min(M, N),) ndarray
Singular values of `a`.
Raises
------
LinAlgError
If computation does not converge.
Notes
-----
If `b` is a matrix, then all array results are returned as matrices.
Examples
--------
Fit a line, ``y = mx + c``, through some noisy data-points:
>>> x = np.array([0, 1, 2, 3])
>>> y = np.array([-1, 0.2, 0.9, 2.1])
By examining the coefficients, we see that the line should have a
gradient of roughly 1 and cut the y-axis at, more or less, -1.
We can rewrite the line equation as ``y = Ap``, where ``A = [[x 1]]``
and ``p = [[m], [c]]``. Now use `lstsq` to solve for `p`:
>>> A = np.vstack([x, np.ones(len(x))]).T
>>> A
array([[ 0., 1.],
[ 1., 1.],
[ 2., 1.],
[ 3., 1.]])
>>> m, c = np.linalg.lstsq(A, y)[0]
>>> print(m, c)
1.0 -0.95
Plot the data along with the fitted line:
>>> import matplotlib.pyplot as plt
>>> plt.plot(x, y, 'o', label='Original data', markersize=10)
>>> plt.plot(x, m*x + c, 'r', label='Fitted line')
>>> plt.legend()
>>> plt.show()
"""
import math
a, _ = _makearray(a)
b, wrap = _makearray(b)
is_1d = b.ndim == 1
if is_1d:
b = b[:, newaxis]
_assertRank2(a, b)
_assertNoEmpty2d(a, b) # TODO: relax this constraint
m = a.shape[0]
n = a.shape[1]
n_rhs = b.shape[1]
ldb = max(n, m)
if m != b.shape[0]:
raise LinAlgError('Incompatible dimensions')
t, result_t = _commonType(a, b)
result_real_t = _realType(result_t)
real_t = _linalgRealType(t)
bstar = zeros((ldb, n_rhs), t)
bstar[:b.shape[0], :n_rhs] = b.copy()
a, bstar = _fastCopyAndTranspose(t, a, bstar)
a, bstar = _to_native_byte_order(a, bstar)
s = zeros((min(m, n),), real_t)
# This line:
# * is incorrect, according to the LAPACK documentation
# * raises a ValueError if min(m,n) == 0
# * should not be calculated here anyway, as LAPACK should calculate
# `liwork` for us. But that only works if our version of lapack does
# not have this bug:
# http://icl.cs.utk.edu/lapack-forum/archives/lapack/msg00899.html
# Lapack_lite does have that bug...
nlvl = max( 0, int( math.log( float(min(m, n))/2. ) ) + 1 )
iwork = zeros((3*min(m, n)*nlvl+11*min(m, n),), fortran_int)
if isComplexType(t):
lapack_routine = lapack_lite.zgelsd
lwork = 1
rwork = zeros((lwork,), real_t)
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, rwork, iwork, 0)
lwork = int(abs(work[0]))
rwork = zeros((lwork,), real_t)
a_real = zeros((m, n), real_t)
bstar_real = zeros((ldb, n_rhs,), real_t)
results = lapack_lite.dgelsd(m, n, n_rhs, a_real, m,
bstar_real, ldb, s, rcond,
0, rwork, -1, iwork, 0)
lrwork = int(rwork[0])
work = zeros((lwork,), t)
rwork = zeros((lrwork,), real_t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, rwork, iwork, 0)
else:
lapack_routine = lapack_lite.dgelsd
lwork = 1
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, -1, iwork, 0)
lwork = int(work[0])
work = zeros((lwork,), t)
results = lapack_routine(m, n, n_rhs, a, m, bstar, ldb, s, rcond,
0, work, lwork, iwork, 0)
if results['info'] > 0:
raise LinAlgError('SVD did not converge in Linear Least Squares')
resids = array([], result_real_t)
if is_1d:
x = array(ravel(bstar)[:n], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = array([sum(abs(ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
resids = array([sum((ravel(bstar)[n:])**2)],
dtype=result_real_t)
else:
x = array(transpose(bstar)[:n,:], dtype=result_t, copy=True)
if results['rank'] == n and m > n:
if isComplexType(t):
resids = sum(abs(transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
else:
resids = sum((transpose(bstar)[n:,:])**2, axis=0).astype(
result_real_t, copy=False)
st = s[:min(n, m)].astype(result_real_t, copy=True)
return wrap(x), wrap(resids), results['rank'], st
def _multi_svd_norm(x, row_axis, col_axis, op):
"""Compute a function of the singular values of the 2-D matrices in `x`.
This is a private utility function used by numpy.linalg.norm().
Parameters
----------
x : ndarray
row_axis, col_axis : int
The axes of `x` that hold the 2-D matrices.
op : callable
This should be either numpy.amin or numpy.amax or numpy.sum.
Returns
-------
result : float or ndarray
If `x` is 2-D, the return values is a float.
Otherwise, it is an array with ``x.ndim - 2`` dimensions.
The return values are either the minimum or maximum or sum of the
singular values of the matrices, depending on whether `op`
is `numpy.amin` or `numpy.amax` or `numpy.sum`.
"""
if row_axis > col_axis:
row_axis -= 1
y = rollaxis(rollaxis(x, col_axis, x.ndim), row_axis, -1)
result = op(svd(y, compute_uv=0), axis=-1)
return result
def norm(x, ord=None, axis=None, keepdims=False):
"""
Matrix or vector norm.
This function is able to return one of eight different matrix norms,
or one of an infinite number of vector norms (described below), depending
on the value of the ``ord`` parameter.
Parameters
----------
x : array_like
Input array. If `axis` is None, `x` must be 1-D or 2-D.
ord : {non-zero int, inf, -inf, 'fro', 'nuc'}, optional
Order of the norm (see table under ``Notes``). inf means numpy's
`inf` object.
axis : {int, 2-tuple of ints, None}, optional
If `axis` is an integer, it specifies the axis of `x` along which to
compute the vector norms. If `axis` is a 2-tuple, it specifies the
axes that hold 2-D matrices, and the matrix norms of these matrices
are computed. If `axis` is None then either a vector norm (when `x`
is 1-D) or a matrix norm (when `x` is 2-D) is returned.
keepdims : bool, optional
If this is set to True, the axes which are normed over are left in the
result as dimensions with size one. With this option the result will
broadcast correctly against the original `x`.
.. versionadded:: 1.10.0
Returns
-------
n : float or ndarray
Norm of the matrix or vector(s).
Notes
-----
For values of ``ord <= 0``, the result is, strictly speaking, not a
mathematical 'norm', but it may still be useful for various numerical
purposes.
The following norms can be calculated:
===== ============================ ==========================
ord norm for matrices norm for vectors
===== ============================ ==========================
None Frobenius norm 2-norm
'fro' Frobenius norm --
'nuc' nuclear norm --
inf max(sum(abs(x), axis=1)) max(abs(x))
-inf min(sum(abs(x), axis=1)) min(abs(x))
0 -- sum(x != 0)
1 max(sum(abs(x), axis=0)) as below
-1 min(sum(abs(x), axis=0)) as below
2 2-norm (largest sing. value) as below
-2 smallest singular value as below
other -- sum(abs(x)**ord)**(1./ord)
===== ============================ ==========================
The Frobenius norm is given by [1]_:
:math:`||A||_F = [\\sum_{i,j} abs(a_{i,j})^2]^{1/2}`
The nuclear norm is the sum of the singular values.
References
----------
.. [1] G. H. Golub and C. F. Van Loan, *Matrix Computations*,
Baltimore, MD, Johns Hopkins University Press, 1985, pg. 15
Examples
--------
>>> from numpy import linalg as LA
>>> a = np.arange(9) - 4
>>> a
array([-4, -3, -2, -1, 0, 1, 2, 3, 4])
>>> b = a.reshape((3, 3))
>>> b
array([[-4, -3, -2],
[-1, 0, 1],
[ 2, 3, 4]])
>>> LA.norm(a)
7.745966692414834
>>> LA.norm(b)
7.745966692414834
>>> LA.norm(b, 'fro')
7.745966692414834
>>> LA.norm(a, np.inf)
4.0
>>> LA.norm(b, np.inf)
9.0
>>> LA.norm(a, -np.inf)
0.0
>>> LA.norm(b, -np.inf)
2.0
>>> LA.norm(a, 1)
20.0
>>> LA.norm(b, 1)
7.0
>>> LA.norm(a, -1)
-4.6566128774142013e-010
>>> LA.norm(b, -1)
6.0
>>> LA.norm(a, 2)
7.745966692414834
>>> LA.norm(b, 2)
7.3484692283495345
>>> LA.norm(a, -2)
nan
>>> LA.norm(b, -2)
1.8570331885190563e-016
>>> LA.norm(a, 3)
5.8480354764257312
>>> LA.norm(a, -3)
nan
Using the `axis` argument to compute vector norms:
>>> c = np.array([[ 1, 2, 3],
... [-1, 1, 4]])
>>> LA.norm(c, axis=0)
array([ 1.41421356, 2.23606798, 5. ])
>>> LA.norm(c, axis=1)
array([ 3.74165739, 4.24264069])
>>> LA.norm(c, ord=1, axis=1)
array([ 6., 6.])
Using the `axis` argument to compute matrix norms:
>>> m = np.arange(8).reshape(2,2,2)
>>> LA.norm(m, axis=(1,2))
array([ 3.74165739, 11.22497216])
>>> LA.norm(m[0, :, :]), LA.norm(m[1, :, :])
(3.7416573867739413, 11.224972160321824)
"""
x = asarray(x)
if not issubclass(x.dtype.type, (inexact, object_)):
x = x.astype(float)
# Immediately handle some default, simple, fast, and common cases.
if axis is None:
ndim = x.ndim
if ((ord is None) or
(ord in ('f', 'fro') and ndim == 2) or
(ord == 2 and ndim == 1)):
x = x.ravel(order='K')
if isComplexType(x.dtype.type):
sqnorm = dot(x.real, x.real) + dot(x.imag, x.imag)
else:
sqnorm = dot(x, x)
ret = sqrt(sqnorm)
if keepdims:
ret = ret.reshape(ndim*[1])
return ret
# Normalize the `axis` argument to a tuple.
nd = x.ndim
if axis is None:
axis = tuple(range(nd))
elif not isinstance(axis, tuple):
try:
axis = int(axis)
except:
raise TypeError("'axis' must be None, an integer or a tuple of integers")
axis = (axis,)
if len(axis) == 1:
if ord == Inf:
return abs(x).max(axis=axis, keepdims=keepdims)
elif ord == -Inf:
return abs(x).min(axis=axis, keepdims=keepdims)
elif ord == 0:
# Zero norm
return (x != 0).astype(float).sum(axis=axis, keepdims=keepdims)
elif ord == 1:
# special case for speedup
return add.reduce(abs(x), axis=axis, keepdims=keepdims)
elif ord is None or ord == 2:
# special case for speedup
s = (x.conj() * x).real
return sqrt(add.reduce(s, axis=axis, keepdims=keepdims))
else:
try:
ord + 1
except TypeError:
raise ValueError("Invalid norm order for vectors.")
if x.dtype.type is longdouble:
# Convert to a float type, so integer arrays give
# float results. Don't apply asfarray to longdouble arrays,
# because it will downcast to float64.
absx = abs(x)
else:
absx = x if isComplexType(x.dtype.type) else asfarray(x)
if absx.dtype is x.dtype:
absx = abs(absx)
else:
# if the type changed, we can safely overwrite absx
abs(absx, out=absx)
absx **= ord
return add.reduce(absx, axis=axis, keepdims=keepdims) ** (1.0 / ord)
elif len(axis) == 2:
row_axis, col_axis = axis
row_axis = normalize_axis_index(row_axis, nd)
col_axis = normalize_axis_index(col_axis, nd)
if row_axis == col_axis:
raise ValueError('Duplicate axes given.')
if ord == 2:
ret = _multi_svd_norm(x, row_axis, col_axis, amax)
elif ord == -2:
ret = _multi_svd_norm(x, row_axis, col_axis, amin)
elif ord == 1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).max(axis=col_axis)
elif ord == Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).max(axis=row_axis)
elif ord == -1:
if col_axis > row_axis:
col_axis -= 1
ret = add.reduce(abs(x), axis=row_axis).min(axis=col_axis)
elif ord == -Inf:
if row_axis > col_axis:
row_axis -= 1
ret = add.reduce(abs(x), axis=col_axis).min(axis=row_axis)
elif ord in [None, 'fro', 'f']:
ret = sqrt(add.reduce((x.conj() * x).real, axis=axis))
elif ord == 'nuc':
ret = _multi_svd_norm(x, row_axis, col_axis, sum)
else:
raise ValueError("Invalid norm order for matrices.")
if keepdims:
ret_shape = list(x.shape)
ret_shape[axis[0]] = 1
ret_shape[axis[1]] = 1
ret = ret.reshape(ret_shape)
return ret
else:
raise ValueError("Improper number of dimensions to norm.")
# multi_dot
def multi_dot(arrays):
"""
Compute the dot product of two or more arrays in a single function call,
while automatically selecting the fastest evaluation order.
`multi_dot` chains `numpy.dot` and uses optimal parenthesization
of the matrices [1]_ [2]_. Depending on the shapes of the matrices,
this can speed up the multiplication a lot.
If the first argument is 1-D it is treated as a row vector.
If the last argument is 1-D it is treated as a column vector.
The other arguments must be 2-D.
Think of `multi_dot` as::
def multi_dot(arrays): return functools.reduce(np.dot, arrays)
Parameters
----------
arrays : sequence of array_like
If the first argument is 1-D it is treated as row vector.
If the last argument is 1-D it is treated as column vector.
The other arguments must be 2-D.
Returns
-------
output : ndarray
Returns the dot product of the supplied arrays.
See Also
--------
dot : dot multiplication with two arguments.
References
----------
.. [1] Cormen, "Introduction to Algorithms", Chapter 15.2, p. 370-378
.. [2] http://en.wikipedia.org/wiki/Matrix_chain_multiplication
Examples
--------
`multi_dot` allows you to write::
>>> from numpy.linalg import multi_dot
>>> # Prepare some data
>>> A = np.random.random(10000, 100)
>>> B = np.random.random(100, 1000)
>>> C = np.random.random(1000, 5)
>>> D = np.random.random(5, 333)
>>> # the actual dot multiplication
>>> multi_dot([A, B, C, D])
instead of::
>>> np.dot(np.dot(np.dot(A, B), C), D)
>>> # or
>>> A.dot(B).dot(C).dot(D)
Example: multiplication costs of different parenthesizations
------------------------------------------------------------
The cost for a matrix multiplication can be calculated with the
following function::
def cost(A, B): return A.shape[0] * A.shape[1] * B.shape[1]
Let's assume we have three matrices
:math:`A_{10x100}, B_{100x5}, C_{5x50}$`.
The costs for the two different parenthesizations are as follows::
cost((AB)C) = 10*100*5 + 10*5*50 = 5000 + 2500 = 7500
cost(A(BC)) = 10*100*50 + 100*5*50 = 50000 + 25000 = 75000
"""
n = len(arrays)
# optimization only makes sense for len(arrays) > 2
if n < 2:
raise ValueError("Expecting at least two arrays.")
elif n == 2:
return dot(arrays[0], arrays[1])
arrays = [asanyarray(a) for a in arrays]
# save original ndim to reshape the result array into the proper form later
ndim_first, ndim_last = arrays[0].ndim, arrays[-1].ndim
# Explicitly convert vectors to 2D arrays to keep the logic of the internal
# _multi_dot_* functions as simple as possible.
if arrays[0].ndim == 1:
arrays[0] = atleast_2d(arrays[0])
if arrays[-1].ndim == 1:
arrays[-1] = atleast_2d(arrays[-1]).T
_assertRank2(*arrays)
# _multi_dot_three is much faster than _multi_dot_matrix_chain_order
if n == 3:
result = _multi_dot_three(arrays[0], arrays[1], arrays[2])
else:
order = _multi_dot_matrix_chain_order(arrays)
result = _multi_dot(arrays, order, 0, n - 1)
# return proper shape
if ndim_first == 1 and ndim_last == 1:
return result[0, 0] # scalar
elif ndim_first == 1 or ndim_last == 1:
return result.ravel() # 1-D
else:
return result
def _multi_dot_three(A, B, C):
"""
Find the best order for three arrays and do the multiplication.
For three arguments `_multi_dot_three` is approximately 15 times faster
than `_multi_dot_matrix_chain_order`
"""
a0, a1b0 = A.shape
b1c0, c1 = C.shape
# cost1 = cost((AB)C) = a0*a1b0*b1c0 + a0*b1c0*c1
cost1 = a0 * b1c0 * (a1b0 + c1)
# cost2 = cost(A(BC)) = a1b0*b1c0*c1 + a0*a1b0*c1
cost2 = a1b0 * c1 * (a0 + b1c0)
if cost1 < cost2:
return dot(dot(A, B), C)
else:
return dot(A, dot(B, C))
def _multi_dot_matrix_chain_order(arrays, return_costs=False):
"""
Return a np.array that encodes the optimal order of mutiplications.
The optimal order array is then used by `_multi_dot()` to do the
multiplication.
Also return the cost matrix if `return_costs` is `True`
The implementation CLOSELY follows Cormen, "Introduction to Algorithms",
Chapter 15.2, p. 370-378. Note that Cormen uses 1-based indices.
cost[i, j] = min([
cost[prefix] + cost[suffix] + cost_mult(prefix, suffix)
for k in range(i, j)])
"""
n = len(arrays)
# p stores the dimensions of the matrices
# Example for p: A_{10x100}, B_{100x5}, C_{5x50} --> p = [10, 100, 5, 50]
p = [a.shape[0] for a in arrays] + [arrays[-1].shape[1]]
# m is a matrix of costs of the subproblems
# m[i,j]: min number of scalar multiplications needed to compute A_{i..j}
m = zeros((n, n), dtype=double)
# s is the actual ordering
# s[i, j] is the value of k at which we split the product A_i..A_j
s = empty((n, n), dtype=intp)
for l in range(1, n):
for i in range(n - l):
j = i + l
m[i, j] = Inf
for k in range(i, j):
q = m[i, k] + m[k+1, j] + p[i]*p[k+1]*p[j+1]
if q < m[i, j]:
m[i, j] = q
s[i, j] = k # Note that Cormen uses 1-based index
return (s, m) if return_costs else s
def _multi_dot(arrays, order, i, j):
"""Actually do the multiplication with the given order."""
if i == j:
return arrays[i]
else:
return dot(_multi_dot(arrays, order, i, order[i, j]),
_multi_dot(arrays, order, order[i, j] + 1, j))
| ssanderson/numpy | numpy/linalg/linalg.py | Python | bsd-3-clause | 78,991 |
import os
from setuptools import setup
README = open(os.path.join(os.path.dirname(__file__), 'README.rst')).read()
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
setup(
name='django-HTMLtoPDF',
version='0.1',
packages=['django_htmlToPDF'],
include_package_data=True,
license='BSD License', # example license
description='A simple Django app to create PDF files from Html using xhtml2pdf.',
long_description=README,
url='http://github.com/Daiech',
author='Mauricio Aizaga',
author_email='mauricioaizaga@gmail.com',
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License', # example license
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
],
) | Daiech/django-HTMLtoPDF | setup.py | Python | bsd-3-clause | 1,128 |
import zeit.cms.generation
import zeit.cms.generation.install
import zeit.calendar.calendar
import zeit.calendar.interfaces
@zeit.cms.generation.get_root
def evolve(root):
zeit.cms.generation.install.installLocalUtility(
root, zeit.calendar.calendar.Calendar,
'calendar', zeit.calendar.interfaces.ICalendar)
| ZeitOnline/zeit.calendar | src/zeit/calendar/generation/install.py | Python | bsd-3-clause | 331 |
__author__ = 'mdavid'
| netkicorp/wns-api-server | netki/util/__init__.py | Python | bsd-3-clause | 23 |
from __future__ import print_function
from math import pi
from bokeh.client import push_session
from bokeh.document import Document
from bokeh.models.glyphs import Line, Quad
from bokeh.models import (
Plot, ColumnDataSource, DataRange1d, FactorRange,
LinearAxis, CategoricalAxis, Grid, Legend,
SingleIntervalTicker
)
from bokeh.sampledata.population import load_population
from bokeh.models.widgets import Select
from bokeh.models.layouts import WidgetBox, Column
document = Document()
session = push_session(document)
df = load_population()
revision = 2012
year = 2010
location = "World"
years = [str(x) for x in sorted(df.Year.unique())]
locations = sorted(df.Location.unique())
source_pyramid = ColumnDataSource(data=dict(female=[], male=[], groups=[], shifted=[]))
def pyramid():
xdr = DataRange1d()
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr, plot_width=600, plot_height=500, toolbar_location=None)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
yaxis = LinearAxis(ticker=SingleIntervalTicker(interval=5))
plot.add_layout(yaxis, 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
plot.add_layout(Grid(dimension=1, ticker=yaxis.ticker))
male_quad = Quad(left="male", right=0, bottom="groups", top="shifted", fill_color="#3B8686")
male_quad_glyph = plot.add_glyph(source_pyramid, male_quad)
female_quad = Quad(left=0, right="female", bottom="groups", top="shifted", fill_color="#CFF09E")
female_quad_glyph = plot.add_glyph(source_pyramid, female_quad)
plot.add_layout(Legend(items=[
("Male" , [male_quad_glyph]),
("Female" , [female_quad_glyph]),
]))
return plot
source_known = ColumnDataSource(data=dict(x=[], y=[]))
source_predicted = ColumnDataSource(data=dict(x=[], y=[]))
def population():
xdr = FactorRange(factors=years)
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr, plot_width=600, plot_height=150, toolbar_location=None)
plot.add_layout(CategoricalAxis(major_label_orientation=pi / 4), 'below')
line_known = Line(x="x", y="y", line_color="violet", line_width=2)
line_known_glyph = plot.add_glyph(source_known, line_known)
line_predicted = Line(x="x", y="y", line_color="violet", line_width=2, line_dash="dashed")
line_predicted_glyph = plot.add_glyph(source_predicted, line_predicted)
plot.add_layout(
Legend(
location="bottom_right",
items=[("known", [line_known_glyph]), ("predicted", [line_predicted_glyph])],
)
)
return plot
def update_pyramid():
pyramid = df[(df.Location == location) & (df.Year == year)]
male = pyramid[pyramid.Sex == "Male"]
female = pyramid[pyramid.Sex == "Female"]
total = male.Value.sum() + female.Value.sum()
male_percent = -male.Value / total
female_percent = female.Value / total
groups = male.AgeGrpStart.tolist()
shifted = groups[1:] + [groups[-1] + 5]
source_pyramid.data = dict(
groups=groups,
shifted=shifted,
male=male_percent,
female=female_percent,
)
def update_population():
population = df[df.Location == location].groupby(df.Year).Value.sum()
aligned_revision = revision // 10 * 10
known = population[population.index <= aligned_revision]
predicted = population[population.index >= aligned_revision]
source_known.data = dict(x=known.index.map(str), y=known.values)
source_predicted.data = dict(x=predicted.index.map(str), y=predicted.values)
def update_data():
update_population()
update_pyramid()
def on_year_change(attr, old, new):
global year
year = int(new)
update_data()
def on_location_change(attr, old, new):
global location
location = new
update_data()
def create_layout():
year_select = Select(title="Year:", value="2010", options=years)
location_select = Select(title="Location:", value="World", options=locations)
year_select.on_change('value', on_year_change)
location_select.on_change('value', on_location_change)
controls = WidgetBox(children=[year_select, location_select], height=150, width=600)
layout = Column(children=[controls, pyramid(), population()])
return layout
layout = create_layout()
update_data()
document.add_root(layout)
session.show(layout)
if __name__ == "__main__":
document.validate()
print("\npress ctrl-C to exit")
session.loop_until_closed()
| azjps/bokeh | examples/models/population_server.py | Python | bsd-3-clause | 4,476 |
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
DEPS = [
'depot_tools/bot_update',
'chromium',
'depot_tools/gclient',
'recipe_engine/json',
'recipe_engine/properties',
'recipe_engine/python',
'recipe_engine/step',
]
def Android_Debug__Nexus_9__steps(api):
# update scripts step; implicitly run by recipe engine.
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "src"
soln.url = "https://chromium.googlesource.com/chromium/src.git"
soln.custom_deps = {'src/third_party/WebKit/LayoutTests': None}
soln.custom_vars = {'webkit_trunk': 'http://src.chromium.org/blink/trunk',
'googlecode_url': 'http://%s.googlecode.com/svn',
'nacl_trunk': 'http://src.chromium.org/native_client/trunk',
'sourceforge_url': 'https://svn.code.sf.net/p/%(repo)s/code',
'llvm_url': 'http://llvm.org/svn/llvm-project'}
src_cfg.target_os = set(['android'])
src_cfg.got_revision_mapping.update({'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
api.bot_update.ensure_checkout(force=True)
# gclient revert step
api.gclient.revert()
# cleanup_temp step
api.chromium.cleanup_temp()
# slave_steps step
api.python("slave_steps", "src/build/android/buildbot/bb_run_bot.py",
args=['--build-properties=%s' % api.json.dumps(api.properties.legacy(),
separators=(',', ':')),
'--factory-properties={"GYP_DEFINES":" component=shared_library",'+\
'"android_bot_id":"gpu-builder-tests-dbg","clobber":false,'+\
'"gclient_env":{},"gclient_timeout":3600,"target":"Debug",'+\
'"target_os":"android"}'], allow_subannotations=True)
def Android_Debug__Nexus_5__steps(api):
# update scripts step; implicitly run by recipe engine.
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "src"
soln.url = "https://chromium.googlesource.com/chromium/src.git"
soln.custom_deps = {'src/third_party/WebKit/LayoutTests': None}
soln.custom_vars = {'webkit_trunk': 'http://src.chromium.org/blink/trunk',
'googlecode_url': 'http://%s.googlecode.com/svn',
'nacl_trunk': 'http://src.chromium.org/native_client/trunk',
'sourceforge_url': 'https://svn.code.sf.net/p/%(repo)s/code',
'llvm_url': 'http://llvm.org/svn/llvm-project'}
src_cfg.target_os = set(['android'])
src_cfg.got_revision_mapping.update({'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
api.bot_update.ensure_checkout(force=True)
# gclient revert step
api.gclient.revert()
# cleanup_temp step
api.chromium.cleanup_temp()
# slave_steps step
api.python("slave_steps", "src/build/android/buildbot/bb_run_bot.py",
args=['--build-properties=%s' % api.json.dumps(api.properties.legacy(),
separators=(',', ':')),
'--factory-properties={"GYP_DEFINES":" component=shared_library",'+\
'"android_bot_id":"gpu-builder-tests-dbg","clobber":false,'+\
'"gclient_env":{},"gclient_timeout":3600,"target":"Debug",'+\
'"target_os":"android"}'], allow_subannotations=True)
def Android_Debug__Nexus_6__steps(api):
# update scripts step; implicitly run by recipe engine.
# bot_update step
src_cfg = api.gclient.make_config(GIT_MODE=True)
soln = src_cfg.solutions.add()
soln.name = "src"
soln.url = "https://chromium.googlesource.com/chromium/src.git"
soln.custom_deps = {'src/third_party/WebKit/LayoutTests': None}
soln.custom_vars = {'webkit_trunk': 'http://src.chromium.org/blink/trunk',
'googlecode_url': 'http://%s.googlecode.com/svn',
'nacl_trunk': 'http://src.chromium.org/native_client/trunk',
'sourceforge_url': 'https://svn.code.sf.net/p/%(repo)s/code',
'llvm_url': 'http://llvm.org/svn/llvm-project'}
src_cfg.target_os = set(['android'])
src_cfg.got_revision_mapping.update({'src': 'got_revision',
'src/third_party/WebKit': 'got_webkit_revision',
'src/tools/swarming_client': 'got_swarming_client_revision',
'src/v8': 'got_v8_revision'})
api.gclient.c = src_cfg
api.bot_update.ensure_checkout(force=True)
# gclient revert step
api.gclient.revert()
# cleanup_temp step
api.chromium.cleanup_temp()
# slave_steps step
api.python("slave_steps", "src/build/android/buildbot/bb_run_bot.py",
args=['--build-properties=%s' % api.json.dumps(api.properties.legacy(),
separators=(',', ':')),
'--factory-properties={"GYP_DEFINES":" component=shared_library",'+\
'"android_bot_id":"gpu-builder-tests-dbg","clobber":false,'+\
'"gclient_env":{},"gclient_timeout":3600,"target":"Debug",'+\
'"target_os":"android"}'], allow_subannotations=True)
dispatch_directory = {
'Android Debug (Nexus 9)': Android_Debug__Nexus_9__steps,
'Android Debug (Nexus 5)': Android_Debug__Nexus_5__steps,
'Android Debug (Nexus 6)': Android_Debug__Nexus_6__steps,
}
def RunSteps(api):
if api.properties["buildername"] not in dispatch_directory:
raise api.step.StepFailure("Builder unsupported by recipe.")
else:
dispatch_directory[api.properties["buildername"]](api)
def GenTests(api):
yield (api.test('Android_Debug__Nexus_9_') +
api.properties(mastername='chromium.gpu') +
api.properties(buildername='Android Debug (Nexus 9)') +
api.properties(slavename='TestSlave')
)
yield (api.test('Android_Debug__Nexus_5_') +
api.properties(mastername='chromium.gpu') +
api.properties(buildername='Android Debug (Nexus 5)') +
api.properties(slavename='TestSlave')
)
yield (api.test('Android_Debug__Nexus_6_') +
api.properties(mastername='chromium.gpu') +
api.properties(buildername='Android Debug (Nexus 6)') +
api.properties(slavename='TestSlave')
)
yield (api.test('builder_not_in_dispatch_directory') +
api.properties(mastername='chromium.gpu') +
api.properties(buildername='nonexistent_builder') +
api.properties(slavename='TestSlave')
)
| eunchong/build | scripts/slave/recipes/chromium.gpu.recipe_autogen.py | Python | bsd-3-clause | 6,588 |
from scrapy.http import Response
from scrapy.selector import Selector
def xmliter_lxml(obj, nodename, namespace=None, prefix='x'):
from lxml import etree
reader = _StreamReader(obj)
tag = '{%s}%s' % (namespace, nodename) if namespace else nodename
iterable = etree.iterparse(reader, tag=tag, encoding=reader.encoding)
selxpath = '//' + ('%s:%s' % (prefix, nodename) if namespace else nodename)
for _, node in iterable:
nodetext = etree.tostring(node)
node.clear()
xs = Selector(text=nodetext, type='xml')
if namespace:
xs.register_namespace(prefix, namespace)
yield xs.xpath(selxpath)[0]
class _StreamReader(object):
def __init__(self, obj):
self._ptr = 0
if isinstance(obj, Response):
self._text, self.encoding = obj.body, obj.encoding
else:
self._text, self.encoding = obj, 'utf-8'
self._is_unicode = isinstance(self._text, unicode)
def read(self, n=65535):
self.read = self._read_unicode if self._is_unicode else self._read_string
return self.read(n).lstrip()
def _read_string(self, n=65535):
s, e = self._ptr, self._ptr + n
self._ptr = e
return self._text[s:e]
def _read_unicode(self, n=65535):
s, e = self._ptr, self._ptr + n
self._ptr = e
return self._text[s:e].encode('utf-8')
| Partoo/scrapy | scrapy/contrib_exp/iterators.py | Python | bsd-3-clause | 1,404 |
import pyaf.Bench.TS_datasets as tsds
import tests.artificial.process_artificial_dataset as art
art.process_dataset(N = 32 , FREQ = 'D', seed = 0, trendtype = "MovingAverage", cycle_length = 5, transform = "RelativeDifference", sigma = 0.0, exog_count = 20, ar_order = 12); | antoinecarme/pyaf | tests/artificial/transf_RelativeDifference/trend_MovingAverage/cycle_5/ar_12/test_artificial_32_RelativeDifference_MovingAverage_5_12_20.py | Python | bsd-3-clause | 277 |
# This file is part of h5py, a Python interface to the HDF5 library.
#
# http://www.h5py.org
#
# Copyright 2008-2013 Andrew Collette and contributors
#
# License: Standard 3-clause BSD; see "license.txt" for full license terms
# and contributor agreement.
import unittest as ut
from h5py import h5p, h5f, version
from .common import TestCase
class TestLibver(TestCase):
"""
Feature: Setting/getting lib ver bounds
"""
def test_libver(self):
""" Test libver bounds set/get """
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_EARLIEST, h5f.LIBVER_LATEST)
self.assertEqual((h5f.LIBVER_EARLIEST, h5f.LIBVER_LATEST),
plist.get_libver_bounds())
@ut.skipIf(version.hdf5_version_tuple < (1, 10, 2),
'Requires HDF5 1.10.2 or later')
def test_libver_v18(self):
""" Test libver bounds set/get for H5F_LIBVER_V18"""
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_EARLIEST, h5f.LIBVER_V18)
self.assertEqual((h5f.LIBVER_EARLIEST, h5f.LIBVER_V18),
plist.get_libver_bounds())
@ut.skipIf(version.hdf5_version_tuple < (1, 10, 2),
'Requires HDF5 1.10.2 or later')
def test_libver_v110(self):
""" Test libver bounds set/get for H5F_LIBVER_V110"""
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_V18, h5f.LIBVER_V110)
self.assertEqual((h5f.LIBVER_V18, h5f.LIBVER_V110),
plist.get_libver_bounds())
@ut.skipIf(version.hdf5_version_tuple < (1, 11, 4),
'Requires HDF5 1.11.4 or later')
def test_libver_v112(self):
""" Test libver bounds set/get for H5F_LIBVER_V112"""
plist = h5p.create(h5p.FILE_ACCESS)
plist.set_libver_bounds(h5f.LIBVER_V18, h5f.LIBVER_V112)
self.assertEqual((h5f.LIBVER_V18, h5f.LIBVER_V112),
plist.get_libver_bounds())
class TestDA(TestCase):
'''
Feature: setting/getting chunk cache size on a dataset access property list
'''
def test_chunk_cache(self):
'''test get/set chunk cache '''
dalist = h5p.create(h5p.DATASET_ACCESS)
nslots = 10000 # 40kb hash table
nbytes = 1000000 # 1MB cache size
w0 = .5 # even blend of eviction strategy
dalist.set_chunk_cache(nslots, nbytes, w0)
self.assertEqual((nslots, nbytes, w0),
dalist.get_chunk_cache())
class TestFA(TestCase):
'''
Feature: setting/getting mdc config on a file access property list
'''
def test_mdc_config(self):
'''test get/set mdc config '''
falist = h5p.create(h5p.FILE_ACCESS)
config = falist.get_mdc_config()
falist.set_mdc_config(config)
def test_set_alignment(self):
'''test get/set chunk cache '''
falist = h5p.create(h5p.FILE_ACCESS)
threshold = 10 * 1024 # threshold of 10kiB
alignment = 1024 * 1024 # threshold of 1kiB
falist.set_alignment(threshold, alignment)
self.assertEqual((threshold, alignment),
falist.get_alignment())
@ut.skipUnless(
version.hdf5_version_tuple >= (1, 12, 1) or
(version.hdf5_version_tuple[:2] == (1, 10) and version.hdf5_version_tuple[2] >= 7),
'Requires HDF5 1.12.1 or later or 1.10.x >= 1.10.7')
def test_set_file_locking(self):
'''test get/set file locking'''
falist = h5p.create(h5p.FILE_ACCESS)
use_file_locking = False
ignore_when_disabled = False
falist.set_file_locking(use_file_locking, ignore_when_disabled)
self.assertEqual((use_file_locking, ignore_when_disabled),
falist.get_file_locking())
class TestPL(TestCase):
def test_obj_track_times(self):
"""
tests if the object track times set/get
"""
# test for groups
gcid = h5p.create(h5p.GROUP_CREATE)
gcid.set_obj_track_times(False)
self.assertEqual(False, gcid.get_obj_track_times())
gcid.set_obj_track_times(True)
self.assertEqual(True, gcid.get_obj_track_times())
# test for datasets
dcid = h5p.create(h5p.DATASET_CREATE)
dcid.set_obj_track_times(False)
self.assertEqual(False, dcid.get_obj_track_times())
dcid.set_obj_track_times(True)
self.assertEqual(True, dcid.get_obj_track_times())
# test for generic objects
ocid = h5p.create(h5p.OBJECT_CREATE)
ocid.set_obj_track_times(False)
self.assertEqual(False, ocid.get_obj_track_times())
ocid.set_obj_track_times(True)
self.assertEqual(True, ocid.get_obj_track_times())
def test_link_creation_tracking(self):
"""
tests the link creation order set/get
"""
gcid = h5p.create(h5p.GROUP_CREATE)
gcid.set_link_creation_order(0)
self.assertEqual(0, gcid.get_link_creation_order())
flags = h5p.CRT_ORDER_TRACKED | h5p.CRT_ORDER_INDEXED
gcid.set_link_creation_order(flags)
self.assertEqual(flags, gcid.get_link_creation_order())
# test for file creation
fcpl = h5p.create(h5p.FILE_CREATE)
fcpl.set_link_creation_order(flags)
self.assertEqual(flags, fcpl.get_link_creation_order())
def test_attr_phase_change(self):
"""
test the attribute phase change
"""
cid = h5p.create(h5p.OBJECT_CREATE)
# test default value
ret = cid.get_attr_phase_change()
self.assertEqual((8,6), ret)
# max_compact must < 65536 (64kb)
with self.assertRaises(ValueError):
cid.set_attr_phase_change(65536, 6)
# Using dense attributes storage to avoid 64kb size limitation
# for a single attribute in compact attribute storage.
cid.set_attr_phase_change(0, 0)
self.assertEqual((0,0), cid.get_attr_phase_change())
| h5py/h5py | h5py/tests/test_h5p.py | Python | bsd-3-clause | 6,042 |
import math
import ctypes
from parse import *
class Matrix(object):
def __init__(self, string=None):
self.values = [1, 0, 0, 1, 0, 0] #Identity matrix seems a sensible default
if isinstance(string, str):
if string.startswith('matrix('):
self.values = [float(x) for x in parse_list(string[7:-1])]
elif string.startswith('translate('):
x, y = [float(x) for x in parse_list(string[10:-1])]
self.values = [1, 0, 0, 1, x, y]
elif string.startswith('scale('):
sx, sy = [float(x) for x in parse_list(string[6:-1])]
self.values = [sx, 0, 0, sy, 0, 0]
elif string is not None:
self.values = list(string)
def __call__(self, other):
return (self.values[0]*other[0] + self.values[2]*other[1] + self.values[4],
self.values[1]*other[0] + self.values[3]*other[1] + self.values[5])
def __str__(self):
return str(self.values)
def to_mat4(self):
v = self.values
return [v[0], v[1], 0.0, 0.0,
v[2], v[3], 0.0, 0.0,
0.0, 0.0, 1.0, 0.0,
v[4], v[5], 0.0, 1.0]
def inverse(self):
d = float(self.values[0]*self.values[3] - self.values[1]*self.values[2])
return Matrix([self.values[3]/d, -self.values[1]/d, -self.values[2]/d, self.values[0]/d,
(self.values[2]*self.values[5] - self.values[3]*self.values[4])/d,
(self.values[1]*self.values[4] - self.values[0]*self.values[5])/d])
def __mul__(self, other):
a, b, c, d, e, f = self.values
u, v, w, x, y, z = other.values
return Matrix([a*u + c*v, b*u + d*v, a*w + c*x, b*w + d*x, a*y + c*z + e, b*y + d*z + f])
def svg_matrix_to_gl_matrix(matrix):
v = matrix.values
return [v[0], v[1], 0.0, v[2], v[3], 0.0, v[4], v[5], 1.0]
def as_c_matrix(values):
matrix_type = ctypes.c_float * len(values)
matrix = matrix_type(*values)
return ctypes.cast(matrix, ctypes.POINTER(ctypes.c_float) ) | fathat/squirtle | squirtle/matrix.py | Python | bsd-3-clause | 2,127 |
# MAEC Behavior Class
# Copyright (c) 2018, The MITRE Corporation
# All rights reserved
from mixbox import fields
from mixbox import idgen
import maec
from . import _namespace
import maec.bindings.maec_bundle as bundle_binding
from cybox.core.action_reference import ActionReference
from cybox.common.measuresource import MeasureSource
from cybox.common.platform_specification import PlatformSpecification
from cybox.objects.code_object import Code
class BehavioralActionEquivalenceReference(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionEquivalenceReferenceType
_namespace = _namespace
action_equivalence_idref = fields.TypedField('action_equivalence_idref')
behavioral_ordering = fields.TypedField('behavioral_ordering')
class BehavioralActionReference(ActionReference):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionReferenceType
_namespace = _namespace
behavioral_ordering = fields.TypedField('behavioral_ordering')
class BehavioralAction(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionType
_namespace = _namespace
behavioral_ordering = fields.TypedField('behavioral_ordering')
class BehavioralActions(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehavioralActionsType
_namespace = _namespace
#TODO: action_collection.type_ is set below to avoid circular import.
action_collection = fields.TypedField('Action_Collection', None, multiple=True)
action = fields.TypedField('Action', BehavioralAction, multiple=True)
action_reference = fields.TypedField('Action_Reference', BehavioralActionReference, multiple=True)
action_equivalence_reference = fields.TypedField('Action_Equivalence_Reference', BehavioralActionEquivalenceReference, multiple=True)
class PlatformList(maec.EntityList):
_binding = bundle_binding
_binding_class = bundle_binding.PlatformListType
_namespace = _namespace
platform = fields.TypedField("Platform", PlatformSpecification, multiple=True)
class CVEVulnerability(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.CVEVulnerabilityType
_namespace = _namespace
cve_id = fields.TypedField('cve_id')
description = fields.TypedField('Description')
class Exploit(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.ExploitType
_namespace = _namespace
known_vulnerability = fields.TypedField('known_vulnerability')
cve = fields.TypedField('CVE', CVEVulnerability)
cwe_id = fields.TypedField('CWE_ID', multiple=True)
targeted_platforms = fields.TypedField('Targeted_Platforms', PlatformList)
class BehaviorPurpose(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehaviorPurposeType
_namespace = _namespace
description = fields.TypedField('Description')
vulnerability_exploit = fields.TypedField('Vulnerability_Exploit', Exploit)
class AssociatedCode(maec.EntityList):
_binding = bundle_binding
_binding_class = bundle_binding.AssociatedCodeType
_namespace = _namespace
code_snippet = fields.TypedField("Code_Snippet", Code, multiple=True)
class Behavior(maec.Entity):
_binding = bundle_binding
_binding_class = bundle_binding.BehaviorType
_namespace = _namespace
id_ = fields.TypedField('id')
ordinal_position = fields.TypedField('ordinal_position')
status = fields.TypedField('status')
duration = fields.TypedField('duration')
purpose = fields.TypedField('Purpose', BehaviorPurpose)
description = fields.TypedField('Description')
discovery_method = fields.TypedField('Discovery_Method', MeasureSource)
action_composition = fields.TypedField('Action_Composition', BehavioralActions)
associated_code = fields.TypedField('Associated_Code', AssociatedCode)
#relationships = fields.TypedField('Relationships', BehaviorRelationshipList) # TODO: implement
def __init__(self, id = None, description = None):
super(Behavior, self).__init__()
if id:
self.id_ = id
else:
self.id_ = idgen.create_id(prefix="behavior")
self.description = description
from maec.bundle.bundle import ActionCollection
BehavioralActions.action_collection.type_ = ActionCollection
| MAECProject/python-maec | maec/bundle/behavior.py | Python | bsd-3-clause | 4,501 |
#!/usr/bin/env python
from Crypto import Random
from M2Crypto import EVP
from io_helper import stream
from padding import pad_pkcs5, unpad_pkcs5
from chunk_buffer import ChunkBuffer
ALGORITHM = 'aes_256_cbc'
# AES has a fixed block size of 16 bytes regardless of key size
BLOCK_SIZE = 16
ENC=1
DEC=0
def encrypt(in_file, out_file, key, iv, pad=True, chunk_size=stream.DEFAULT_CHUNK_SIZE, alg=ALGORITHM):
cipher = EVP.Cipher(alg=alg, key=key, iv=iv, op=ENC)
size = 0
for chunk in stream.chunk_iter(in_file):
out_file.write(cipher.update(chunk))
size += len(chunk)
if pad:
padding = pad_pkcs5(size, BLOCK_SIZE)
out_file.write(cipher.update(padding))
out_file.write(cipher.final())
def decrypt(in_file, out_file, key, iv, unpad=True, chunk_size=stream.DEFAULT_CHUNK_SIZE):
cipher = EVP.Cipher(alg=ALGORITHM, key=key, iv=iv, op=DEC)
buf = ChunkBuffer(
min_size=BLOCK_SIZE,
evict_fn=lambda chunk: out_file.write(chunk)
)
for chunk in stream.chunk_iter(in_file):
buf.append(cipher.update(chunk))
buf.append(cipher.final())
remainder = buf.getvalue()
if unpad:
out_file.write(unpad_pkcs5(remainder))
else:
out_file.write(remainder)
| zulu7/pylib | crypto/stream_crypto.py | Python | bsd-3-clause | 1,198 |
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from integration_tests import chrome_proxy_measurements as measurements
from integration_tests import chrome_proxy_pagesets as pagesets
from telemetry import benchmark
from telemetry.core.backends.chrome import android_browser_finder
ANDROID_CHROME_BROWSERS = [
browser for browser in android_browser_finder.CHROME_PACKAGE_NAMES
if 'webview' not in browser]
class ChromeProxyLatency(benchmark.Benchmark):
tag = 'latency'
test = measurements.ChromeProxyLatency
page_set = pagesets.Top20PageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.latency.top_20'
class ChromeProxyLatencyDirect(benchmark.Benchmark):
tag = 'latency_direct'
test = measurements.ChromeProxyLatency
page_set = pagesets.Top20PageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.latency_direct.top_20'
class ChromeProxyLatencySynthetic(ChromeProxyLatency):
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.latency.synthetic'
class ChromeProxyLatencySyntheticDirect(ChromeProxyLatencyDirect):
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.latency_direct.synthetic'
class ChromeProxyDataSaving(benchmark.Benchmark):
tag = 'data_saving'
test = measurements.ChromeProxyDataSaving
page_set = pagesets.Top20PageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.data_saving.top_20'
class ChromeProxyDataSavingDirect(benchmark.Benchmark):
tag = 'data_saving_direct'
test = measurements.ChromeProxyDataSaving
page_set = pagesets.Top20PageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.data_saving_direct.top_20'
class ChromeProxyDataSavingSynthetic(ChromeProxyDataSaving):
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.data_saving.synthetic'
class ChromeProxyDataSavingSyntheticDirect(ChromeProxyDataSavingDirect):
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.data_saving_direct.synthetic'
class ChromeProxyHeaderValidation(benchmark.Benchmark):
tag = 'header_validation'
test = measurements.ChromeProxyHeaders
page_set = pagesets.Top20PageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.header_validation.top_20'
class ChromeProxyClientVersion(benchmark.Benchmark):
tag = 'client_version'
test = measurements.ChromeProxyClientVersion
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.client_version.synthetic'
class ChromeProxyClientType(benchmark.Benchmark):
tag = 'client_type'
test = measurements.ChromeProxyClientType
page_set = pagesets.ClientTypePageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.client_type.client_type'
class ChromeProxyLoFi(benchmark.Benchmark):
tag = 'lo_fi'
test = measurements.ChromeProxyLoFi
page_set = pagesets.LoFiPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.lo_fi.lo_fi'
class ChromeProxyExpDirective(benchmark.Benchmark):
tag = 'exp_directive'
test = measurements.ChromeProxyExpDirective
page_set = pagesets.ExpDirectivePageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.exp_directive.exp_directive'
class ChromeProxyBypass(benchmark.Benchmark):
tag = 'bypass'
test = measurements.ChromeProxyBypass
page_set = pagesets.BypassPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.bypass.bypass'
class ChromeProxyCorsBypass(benchmark.Benchmark):
tag = 'bypass'
test = measurements.ChromeProxyCorsBypass
page_set = pagesets.CorsBypassPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.bypass.corsbypass'
class ChromeProxyBlockOnce(benchmark.Benchmark):
tag = 'block_once'
test = measurements.ChromeProxyBlockOnce
page_set = pagesets.BlockOncePageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.block_once.block_once'
@benchmark.Enabled(*ANDROID_CHROME_BROWSERS)
# Safebrowsing is enabled for Android and iOS.
class ChromeProxySafeBrowsingOn(benchmark.Benchmark):
tag = 'safebrowsing_on'
test = measurements.ChromeProxySafebrowsingOn
page_set = pagesets.SafebrowsingPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.safebrowsing_on.safebrowsing'
@benchmark.Disabled(*ANDROID_CHROME_BROWSERS)
# Safebrowsing is switched off for Android Webview and all desktop platforms.
class ChromeProxySafeBrowsingOff(benchmark.Benchmark):
tag = 'safebrowsing_off'
test = measurements.ChromeProxySafebrowsingOff
page_set = pagesets.SafebrowsingPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.safebrowsing_off.safebrowsing'
class ChromeProxyHTTPFallbackProbeURL(benchmark.Benchmark):
tag = 'fallback_probe'
test = measurements.ChromeProxyHTTPFallbackProbeURL
page_set = pagesets.SyntheticPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.fallback_probe.synthetic'
class ChromeProxyHTTPFallbackViaHeader(benchmark.Benchmark):
tag = 'fallback_viaheader'
test = measurements.ChromeProxyHTTPFallbackViaHeader
page_set = pagesets.FallbackViaHeaderPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.fallback_viaheader.fallback_viaheader'
class ChromeProxyHTTPToDirectFallback(benchmark.Benchmark):
tag = 'http_to_direct_fallback'
test = measurements.ChromeProxyHTTPToDirectFallback
page_set = pagesets.HTTPToDirectFallbackPageSet
@classmethod
def Name(cls):
return ('chrome_proxy_benchmark.http_to_direct_fallback.'
'http_to_direct_fallback')
class ChromeProxyReenableAfterBypass(benchmark.Benchmark):
tag = 'reenable_after_bypass'
test = measurements.ChromeProxyReenableAfterBypass
page_set = pagesets.ReenableAfterBypassPageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.reenable_after_bypass.reenable_after_bypass'
class ChromeProxySmoke(benchmark.Benchmark):
tag = 'smoke'
test = measurements.ChromeProxySmoke
page_set = pagesets.SmokePageSet
@classmethod
def Name(cls):
return 'chrome_proxy_benchmark.smoke.smoke'
| ltilve/chromium | tools/chrome_proxy/integration_tests/chrome_proxy_benchmark.py | Python | bsd-3-clause | 6,442 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from setuptools import setup
import re
import os
import sys
def get_version(package):
"""
Return package version as listed in `__version__` in `init.py`.
"""
init_py = open(os.path.join(package, '__init__.py')).read()
return re.match("__version__ = ['\"]([^'\"]+)['\"]", init_py).group(1)
def get_packages(package):
"""
Return root package and all sub-packages.
"""
return [dirpath
for dirpath, dirnames, filenames in os.walk(package)
if os.path.exists(os.path.join(dirpath, '__init__.py'))]
def get_package_data(package):
"""
Return all files under the root package, that are not in a
package themselves.
"""
walk = [(dirpath.replace(package + os.sep, '', 1), filenames)
for dirpath, dirnames, filenames in os.walk(package)
if not os.path.exists(os.path.join(dirpath, '__init__.py'))]
filepaths = []
for base, filenames in walk:
filepaths.extend([os.path.join(base, filename)
for filename in filenames])
return {package: filepaths}
version = get_version('blog')
if sys.argv[-1] == 'publish':
os.system("python setup.py sdist upload")
print("You probably want to also tag the version now:")
print(" git tag -a %s -m 'version %s'" % (version, version))
print(" git push --tags")
sys.exit()
setup(
name='django-blog-sl',
version=version,
url='http://github.com/simonluijk/django-blog',
license='BSD',
description='Yet another django blog.',
author='Simon Luijk',
author_email='simon@simonluijk.com', # SEE NOTE BELOW (*)
packages=get_packages('blog'),
package_data=get_package_data('blog'),
test_suite='blog.runtests.runtests.main',
install_requires=['django-mptt', 'django-markdown-deux'],
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Topic :: Internet :: WWW/HTTP',
]
)
# (*) Please direct queries to the discussion group, rather than to me directly
# Doing so helps ensure your question is helpful to other users.
# Queries directly to my email are likely to receive a canned response.
#
# Many thanks for your understanding.
| simonluijk/django-blog | setup.py | Python | bsd-3-clause | 2,546 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from six import with_metaclass
from decimal import Decimal
from django.core.exceptions import ImproperlyConfigured, PermissionDenied
from django.db import models, transaction
from django.db.models.aggregates import Sum
from django.utils.encoding import python_2_unicode_compatible
from django.utils.module_loading import import_string
from django.utils.functional import cached_property
from django.utils.translation import ugettext_lazy as _, pgettext_lazy, get_language_from_request
from django.utils.six.moves.urllib.parse import urljoin
from jsonfield.fields import JSONField
from ipware.ip import get_ip
from django_fsm import FSMField, transition
from cms.models import Page
from shop import settings as shop_settings
from shop.models.cart import CartItemModel
from shop.money.fields import MoneyField, MoneyMaker
from .product import BaseProduct
from . import deferred
class OrderManager(models.Manager):
@transaction.atomic
def create_from_cart(self, cart, request):
"""
This creates a new Order object with all its OrderItems using the current Cart object
with its Cart Items. Whenever on Order Item is created from a Cart Item, that item is
removed from the Cart.
"""
cart.update(request)
order = self.model(customer=cart.customer, currency=cart.total.currency,
_subtotal=Decimal(0), _total=Decimal(0), stored_request=self.stored_request(request))
order.get_or_assign_number()
order.save()
order.customer.get_or_assign_number()
for cart_item in cart.items.all():
cart_item.update(request)
order_item = OrderItemModel(order=order)
try:
order_item.populate_from_cart_item(cart_item, request)
order_item.save()
cart_item.delete()
except CartItemModel.DoesNotExist:
pass
order.populate_from_cart(cart, request)
order.save()
return order
def stored_request(self, request):
"""
Extract useful information about the request to be used for emulating a Django request
during offline rendering.
"""
return {
'language': get_language_from_request(request),
'absolute_base_uri': request.build_absolute_uri('/'),
'remote_ip': get_ip(request),
'user_agent': request.META.get('HTTP_USER_AGENT'),
}
def filter_from_request(self, request):
"""
Return a queryset containing the orders for the customer associated with the given
request object.
"""
if request.customer.is_visitor():
msg = _("Only signed in customers can view their orders")
raise PermissionDenied(msg)
return self.get_queryset().filter(customer=request.customer).order_by('-updated_at',)
def get_summary_url(self):
"""
Returns the URL of the page with the list view for all orders related to the current customer
"""
if not hasattr(self, '_summary_url'):
try:
page = Page.objects.public().get(reverse_id='shop-order')
except Page.DoesNotExist:
page = Page.objects.public().filter(application_urls='OrderApp').first()
finally:
self._summary_url = page and page.get_absolute_url() or 'cms-page-with--reverse_id=shop-order--does-not-exist/'
return self._summary_url
def get_latest_url(self):
"""
Returns the URL of the page with the detail view for the latest order related to the
current customer. This normally is the thank-you view.
"""
try:
return Page.objects.public().get(reverse_id='shop-order-last').get_absolute_url()
except Page.DoesNotExist:
pass # TODO: could be retrieved by last order
return 'cms-page-with--reverse_id=shop-order-last--does-not-exist/'
class WorkflowMixinMetaclass(deferred.ForeignKeyBuilder):
"""
Add configured Workflow mixin classes to `OrderModel` and `OrderPayment` to customize
all kinds of state transitions in a pluggable manner.
"""
def __new__(cls, name, bases, attrs):
if 'BaseOrder' in (b.__name__ for b in bases):
bases = tuple(import_string(mc) for mc in shop_settings.ORDER_WORKFLOWS) + bases
# merge the dicts of TRANSITION_TARGETS
attrs.update(_transition_targets={}, _auto_transitions={})
for b in reversed(bases):
TRANSITION_TARGETS = getattr(b, 'TRANSITION_TARGETS', {})
delattr(b, 'TRANSITION_TARGETS')
if set(TRANSITION_TARGETS.keys()).intersection(attrs['_transition_targets']):
msg = "Mixin class {} already contains a transition named '{}'"
raise ImproperlyConfigured(msg.format(b.__name__, ', '.join(TRANSITION_TARGETS.keys())))
attrs['_transition_targets'].update(TRANSITION_TARGETS)
attrs['_auto_transitions'].update(cls.add_to_auto_transitions(b))
Model = super(WorkflowMixinMetaclass, cls).__new__(cls, name, bases, attrs)
return Model
@staticmethod
def add_to_auto_transitions(base):
result = {}
for name, method in base.__dict__.items():
if callable(method) and hasattr(method, '_django_fsm'):
for name, transition in method._django_fsm.transitions.items():
if transition.custom.get('auto'):
result.update({name: method})
return result
@python_2_unicode_compatible
class BaseOrder(with_metaclass(WorkflowMixinMetaclass, models.Model)):
"""
An Order is the "in process" counterpart of the shopping cart, which freezes the state of the
cart on the moment of purchase. It also holds stuff like the shipping and billing addresses,
and keeps all the additional entities, as determined by the cart modifiers.
"""
TRANSITION_TARGETS = {
'new': _("New order without content"),
'created': _("Order freshly created"),
'payment_confirmed': _("Payment confirmed"),
}
decimalfield_kwargs = {
'max_digits': 30,
'decimal_places': 2,
}
decimal_exp = Decimal('.' + '0' * decimalfield_kwargs['decimal_places'])
customer = deferred.ForeignKey('BaseCustomer', verbose_name=_("Customer"), related_name='orders')
status = FSMField(default='new', protected=True, verbose_name=_("Status"))
currency = models.CharField(max_length=7, editable=False,
help_text=_("Currency in which this order was concluded"))
_subtotal = models.DecimalField(_("Subtotal"), **decimalfield_kwargs)
_total = models.DecimalField(_("Total"), **decimalfield_kwargs)
created_at = models.DateTimeField(_("Created at"), auto_now_add=True)
updated_at = models.DateTimeField(_("Updated at"), auto_now=True)
extra = JSONField(verbose_name=_("Extra fields"), default={},
help_text=_("Arbitrary information for this order object on the moment of purchase."))
stored_request = JSONField(default={},
help_text=_("Parts of the Request objects on the moment of purchase."))
objects = OrderManager()
class Meta:
abstract = True
def __str__(self):
return self.get_number()
def __repr__(self):
return "<{}(pk={})>".format(self.__class__.__name__, self.pk)
def get_or_assign_number(self):
"""
Hook to get or to assign the order number. It shall be invoked, every time an Order
object is created. If you prefer to use an order number which differs from the primary
key, then override this method.
"""
return self.get_number()
def get_number(self):
"""
Hook to get the order number.
"""
return str(self.id)
@cached_property
def subtotal(self):
"""
The summed up amount for all ordered items excluding extra order lines.
"""
return MoneyMaker(self.currency)(self._subtotal)
@cached_property
def total(self):
"""
The final total to charge for this order.
"""
return MoneyMaker(self.currency)(self._total)
@classmethod
def round_amount(cls, amount):
if amount.is_finite():
return Decimal(amount).quantize(cls.decimal_exp)
def get_absolute_url(self):
"""
Returns the URL for the detail view of this order
"""
return urljoin(OrderModel.objects.get_summary_url(), str(self.id))
@transition(field=status, source='new', target='created')
def populate_from_cart(self, cart, request):
"""
Populate the order object with the fields from the given cart. Override this method,
in case a customized cart has some fields which have to be transfered to the cart.
"""
self._subtotal = Decimal(cart.subtotal)
self._total = Decimal(cart.total)
self.extra = dict(cart.extra)
self.extra.update(rows=[(modifier, extra_row.data) for modifier, extra_row in cart.extra_rows.items()])
def save(self, **kwargs):
"""
Before saving the Order object to the database, round the total to the given decimal_places
"""
auto_transition = self._auto_transitions.get(self.status)
if callable(auto_transition):
auto_transition(self)
self._subtotal = BaseOrder.round_amount(self._subtotal)
self._total = BaseOrder.round_amount(self._total)
super(BaseOrder, self).save(**kwargs)
@cached_property
def amount_paid(self):
"""
The amount paid is the sum of related orderpayments
"""
amount = self.orderpayment_set.aggregate(amount=Sum('amount'))['amount']
if amount is None:
amount = MoneyMaker(self.currency)()
return amount
@property
def outstanding_amount(self):
"""
Return the outstanding amount paid for this order
"""
return self.total - self.amount_paid
def is_fully_paid(self):
return self.amount_paid >= self.total
@transition(field='status', source='*', target='payment_confirmed', conditions=[is_fully_paid])
def acknowledge_payment(self, by=None):
"""
Change status to `payment_confirmed`. This status code is known globally and can be used
by all external plugins to check, if an Order object has been fully paid.
"""
@classmethod
def get_transition_name(cls, target):
"""Return the human readable name for a given transition target"""
return cls._transition_targets.get(target, target)
def status_name(self):
"""Return the human readable name for the current transition state"""
return self._transition_targets.get(self.status, self.status)
status_name.short_description = pgettext_lazy('order_models', "State")
OrderModel = deferred.MaterializedModel(BaseOrder)
class OrderPayment(with_metaclass(deferred.ForeignKeyBuilder, models.Model)):
"""
A model to hold received payments for a given order.
"""
order = deferred.ForeignKey(BaseOrder, verbose_name=_("Order"))
amount = MoneyField(_("Amount paid"),
help_text=_("How much was paid with this particular transfer."))
transaction_id = models.CharField(_("Transaction ID"), max_length=255,
help_text=_("The transaction processor's reference"))
created_at = models.DateTimeField(_("Received at"), auto_now_add=True)
payment_method = models.CharField(_("Payment method"), max_length=50,
help_text=_("The payment backend used to process the purchase"))
class Meta:
verbose_name = pgettext_lazy('order_models', "Order payment")
verbose_name_plural = pgettext_lazy('order_models', "Order payments")
@python_2_unicode_compatible
class BaseOrderItem(with_metaclass(deferred.ForeignKeyBuilder, models.Model)):
"""
An item for an order.
"""
order = deferred.ForeignKey(BaseOrder, related_name='items', verbose_name=_("Order"))
product_name = models.CharField(_("Product name"), max_length=255, null=True, blank=True,
help_text=_("Product name at the moment of purchase."))
product_code = models.CharField(_("Product code"), max_length=255, null=True, blank=True,
help_text=_("Product code at the moment of purchase."))
product = deferred.ForeignKey(BaseProduct, null=True, blank=True, on_delete=models.SET_NULL,
verbose_name=_("Product"))
_unit_price = models.DecimalField(_("Unit price"), null=True, # may be NaN
help_text=_("Products unit price at the moment of purchase."), **BaseOrder.decimalfield_kwargs)
_line_total = models.DecimalField(_("Line Total"), null=True, # may be NaN
help_text=_("Line total on the invoice at the moment of purchase."), **BaseOrder.decimalfield_kwargs)
extra = JSONField(verbose_name=_("Extra fields"), default={},
help_text=_("Arbitrary information for this order item"))
class Meta:
abstract = True
verbose_name = _("Order item")
verbose_name_plural = _("Order items")
def __str__(self):
return self.product_name
@classmethod
def perform_model_checks(cls):
try:
cart_field = [f for f in CartItemModel._meta.fields if f.attname == 'quantity'][0]
order_field = [f for f in cls._meta.fields if f.attname == 'quantity'][0]
if order_field.get_internal_type() != cart_field.get_internal_type():
msg = "Field `{}.quantity` must be of one same type `{}.quantity`."
raise ImproperlyConfigured(msg.format(cls.__name__, CartItemModel.__name__))
except IndexError:
msg = "Class `{}` must implement a field named `quantity`."
raise ImproperlyConfigured(msg.format(cls.__name__))
@property
def unit_price(self):
return MoneyMaker(self.order.currency)(self._unit_price)
@property
def line_total(self):
return MoneyMaker(self.order.currency)(self._line_total)
def populate_from_cart_item(self, cart_item, request):
"""
From a given cart item, populate the current order item.
If the operation was successful, the given item shall be removed from the cart.
If a CartItem.DoesNotExist exception is raised, discard the order item.
"""
if cart_item.quantity == 0:
raise CartItemModel.DoesNotExist("Cart Item is on the Wish List")
self.product = cart_item.product
# for historical integrity, store the product's name and price at the moment of purchase
self.product_name = cart_item.product.product_name
self._unit_price = Decimal(cart_item.product.get_price(request))
self._line_total = Decimal(cart_item.line_total)
self.quantity = cart_item.quantity
self.extra = dict(cart_item.extra)
extra_rows = [(modifier, extra_row.data) for modifier, extra_row in cart_item.extra_rows.items()]
self.extra.update(rows=extra_rows)
def save(self, *args, **kwargs):
"""
Before saving the OrderItem object to the database, round the amounts to the given decimal places
"""
self._unit_price = BaseOrder.round_amount(self._unit_price)
self._line_total = BaseOrder.round_amount(self._line_total)
super(BaseOrderItem, self).save(*args, **kwargs)
OrderItemModel = deferred.MaterializedModel(BaseOrderItem)
| rfleschenberg/django-shop | shop/models/order.py | Python | bsd-3-clause | 15,637 |
from __future__ import absolute_import
import os
from pymongo import MongoClient
from test_helpers import bases, mixins, mongo
class WhenCreatingTemporaryDatabase(bases.BaseTest):
@classmethod
def configure(cls):
super(WhenCreatingTemporaryDatabase, cls).configure()
cls.database = mongo.TemporaryDatabase(host='localhost', port='27017')
@classmethod
def execute(cls):
cls.database.create()
def should_create_database(self):
mongodb = MongoClient(host='localhost', port=27017)
self.assertIn(self.database.database_name, mongodb.database_names())
class WhenDroppingTemporaryDatabase(bases.BaseTest):
@classmethod
def configure(cls):
super(WhenDroppingTemporaryDatabase, cls).configure()
cls.database = mongo.TemporaryDatabase(host='localhost', port='27017')
cls.database.create()
@classmethod
def execute(cls):
cls.database.drop()
def should_drop_database(self):
mongodb = MongoClient(host='localhost', port=27017)
self.assertNotIn(self.database.database_name, mongodb.database_names())
class WhenCreatingTemporaryDatabaseAndExportingEnv(
mixins.EnvironmentMixin, bases.BaseTest):
@classmethod
def configure(cls):
super(WhenCreatingTemporaryDatabaseAndExportingEnv, cls).configure()
cls.unset_environment_variable('MONGOHOST')
cls.unset_environment_variable('MONGOPORT')
cls.unset_environment_variable('MONGODATABASE')
cls.database = mongo.TemporaryDatabase(host='localhost', port='27017')
cls.database.create()
@classmethod
def execute(cls):
cls.database.set_environment()
def should_export_mongohost(self):
self.assertEqual(os.environ['MONGOHOST'], self.database.host)
def should_export_mongoport(self):
self.assertEqual(os.environ['MONGOPORT'], str(self.database.port))
def should_export_mongodatabase(self):
self.assertEqual(os.environ['MONGODATABASE'], self.database.database_name)
| aweber/test-helpers | tests/integration/test_mongo.py | Python | bsd-3-clause | 2,050 |
from base import MediaFile
from fields import MediaFileField
from widgets import AdminMediaFileWidget
| aino/aino-convert | convert/__init__.py | Python | bsd-3-clause | 105 |
import os
from os.path import join, getctime, dirname
import glob
import subprocess
import argparse
def newest_file(root):
path = join(root, "*tar.bz2")
newest = max(glob.iglob(path), key=getctime)
return newest
def upload(pkg, user):
cmd = ["binstar", "upload", "--force","-u", user, pkg]
subprocess.check_call(cmd)
def build(recipe, build_path, pythons=[], platforms=[], binstar_user=None):
print (recipe, pythons, platforms)
for p in pythons:
cmd = ["conda", "build", recipe, "--python", p]
subprocess.check_call(cmd)
pkg = newest_file(build_path)
if binstar_user:
upload(pkg, binstar_user)
for plat in platforms:
cmd = ["conda", "convert", "-p", plat, pkg]
subprocess.check_call(cmd)
if binstar_user:
to_upload = newest_file(plat)
upload(to_upload, binstar_user)
if __name__ == "__main__":
p = argparse.ArgumentParser()
p.add_argument("build_dir")
p.add_argument("--py", action="append", default=[])
p.add_argument("--plat", action="append", default=[])
p.add_argument("-u", "--binstar-user", help="binstar user")
args = p.parse_args()
build_dir = p.add_argument("build_dir")
build("conda.recipe", args.build_dir, args.py, args.plat, args.binstar_user)
#build("../into/conda.recipe", args.build_dir, args.py, args.plat, args.binstar_user)
"""
python build.py /opt/anaconda/conda-bld/linux-64 --py 27 --py 34 --plat osx-64 --plat win-64 -u hugo
"""
| ContinuumIO/multiuserblazeserver | build.py | Python | bsd-3-clause | 1,553 |
""" Database-related functionality for Minos. """
from flask_sqlalchemy import SQLAlchemy
from .app import app, cache
db = SQLAlchemy()
class SonosConfig(db.Model):
""" Database-class that contains the configuration for Sonos funcionality. """
__tablename__ = 'sonos_config'
key = db.Column(db.String, nullable=False, primary_key=True)
value = db.Column(db.String)
_type = db.Column('type', db.String)
class OAuthConfig(db.Model):
""" Configuration of OAuth providers. """
__tablename__ = 'oauth_settings'
id = db.Column(db.Integer, primary_key=True)
provider_name = db.Column(db.String, nullable=False, index=True)
key = db.Column(db.String, nullable=False)
value = db.Column(db.String, nullable=False)
__table_args__ = (db.UniqueConstraint('provider_name', 'key', name='oauth_settings_provider_key_uq'),)
# Track user roles in a table.
user_roles = db.Table(
'user_roles',
db.Column(
'user_id',
db.Integer,
db.ForeignKey('users.id', ondelete='CASCADE'),
primary_key=True
),
db.Column(
'role_id',
db.Integer,
db.ForeignKey('roles.id', ondelete='CASCADE'),
primary_key=True
)
)
class Role(db.Model):
""" A role represents a type of user in the system.
Roles do no support inheritence and are simply flat permission classes
instead of a hierarchy.
"""
__tablename__ = 'roles'
# Columns.
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String, unique=True, nullable=False)
# Relationships and constraints.
users = db.relationship(
'User',
secondary=user_roles,
back_populates='roles'
)
class User(db.Model):
__tablename__ = 'users'
# Columns.
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(length=50), unique=True, nullable=False)
provider = db.Column(db.String, nullable=False)
provider_token = db.Column(db.String, nullable=False)
provider_token_secret = db.Column(db.String, nullable=False)
# Relationships and constraints.
roles = db.relationship(
'Role',
secondary=user_roles,
back_populates='users'
)
@app.cache.memoize(timeout=300)
def has_role(self, role_name):
""" Check if a user has a role. """
try:
from flask import session
# If the user is not logged in, bail out right away.
if not session.get('logged_in', False):
return False
except:
pass
# If any of the role names match ours then we have that role.
return any(map(lambda r: r.name == role_name, self.roles))
class UserVote(db.Model):
__tablename__ = 'votes'
__table_args__ = (db.PrimaryKeyConstraint('uid', 'uri', name='uservotes_pk'),)
uid = db.Column(db.ForeignKey('users.id'))
uri = db.Column(db.String(), nullable=False, index=True)
speaker = db.Column(db.String, nullable=False)
direction = db.Column(db.Integer, nullable=False)
class Sessions(db.Model):
""" Session object for Flask-Session. """
__tablename__ = 'sessions'
id = db.Column(db.Integer, primary_key=True)
session_id = db.Column(db.String(256), unique=True)
data = db.Column(db.LargeBinary)
expiry = db.Column(db.DateTime) | andpe/minos | minos/database.py | Python | bsd-3-clause | 3,364 |
# -*- coding: utf-8 -*-
import contextlib
import logging
import os
import os.path
import yaml
from bravado_core.spec import is_yaml
from six.moves import urllib
from six.moves.urllib import parse as urlparse
from bravado.compat import json
from bravado.requests_client import RequestsClient
log = logging.getLogger(__name__)
def is_file_scheme_uri(url):
return urlparse.urlparse(url).scheme == u'file'
class FileEventual(object):
"""Adaptor which supports the :class:`crochet.EventualResult`
interface for retrieving api docs from a local file.
"""
class FileResponse(object):
def __init__(self, data):
self.text = data
self.headers = {}
def json(self):
return json.loads(self.text)
def __init__(self, path):
self.path = path
self.is_yaml = is_yaml(path)
def get_path(self):
if not self.path.endswith('.json') and not self.is_yaml:
return self.path + '.json'
return self.path
def wait(self, timeout=None):
with contextlib.closing(urllib.request.urlopen(self.get_path())) as fp:
content = fp.read()
return self.FileResponse(content)
def result(self, *args, **kwargs):
return self.wait(*args, **kwargs)
def cancel(self):
pass
def request(http_client, url, headers):
"""Download and parse JSON from a URL.
:param http_client: a :class:`bravado.http_client.HttpClient`
:param url: url for api docs
:return: an object with a :func`wait` method which returns the api docs
"""
if is_file_scheme_uri(url):
return FileEventual(url)
request_params = {
'method': 'GET',
'url': url,
'headers': headers,
}
return http_client.request(request_params)
class Loader(object):
"""Abstraction for loading Swagger API's.
:param http_client: HTTP client interface.
:type http_client: http_client.HttpClient
:param request_headers: dict of request headers
"""
def __init__(self, http_client, request_headers=None):
self.http_client = http_client
self.request_headers = request_headers or {}
def load_spec(self, spec_url, base_url=None):
"""Load a Swagger Spec from the given URL
:param spec_url: URL to swagger.json
:param base_url: TODO: need this?
:returns: json spec in dict form
"""
response = request(
self.http_client,
spec_url,
self.request_headers,
).result()
content_type = response.headers.get('content-type', '').lower()
if is_yaml(spec_url, content_type):
return self.load_yaml(response.text)
else:
return response.json()
def load_yaml(self, text):
"""Load a YAML Swagger spec from the given string, transforming
integer response status codes to strings. This is to keep
compatibility with the existing YAML spec examples in
https://github.com/OAI/OpenAPI-Specification/tree/master/examples/v2.0/yaml
:param text: String from which to parse the YAML.
:type text: basestring
:return: Python dictionary representing the spec.
:raise: yaml.parser.ParserError: If the text is not valid YAML.
"""
data = yaml.safe_load(text)
for path, methods in iter(data.get('paths', {}).items()):
for method, operation in iter(methods.items()):
if 'responses' in operation:
operation['responses'] = dict(
(str(code), response)
for code, response in iter(
operation['responses'].items()
)
)
return data
# TODO: Adding the file scheme here just adds complexity to request()
# Is there a better way to handle this?
def load_file(spec_file, http_client=None):
"""Loads a spec file
:param spec_file: Path to swagger.json.
:param http_client: HTTP client interface.
:return: validated json spec in dict form
:raise: IOError: On error reading swagger.json.
"""
file_path = os.path.abspath(spec_file)
url = urlparse.urljoin(u'file:', urllib.request.pathname2url(file_path))
# When loading from files, everything is relative to the spec file
dir_path = os.path.dirname(file_path)
base_url = urlparse.urljoin(u'file:', urllib.request.pathname2url(dir_path))
return load_url(url, http_client=http_client, base_url=base_url)
def load_url(spec_url, http_client=None, base_url=None):
"""Loads a Swagger spec.
:param spec_url: URL for swagger.json.
:param http_client: HTTP client interface.
:param base_url: Optional URL to be the base URL for finding API
declarations. If not specified, 'basePath' from the
resource listing is used.
:return: validated spec in dict form
:raise: IOError, URLError: On error reading api-docs.
"""
if http_client is None:
http_client = RequestsClient()
loader = Loader(http_client=http_client)
return loader.load_spec(spec_url, base_url=base_url)
| analogue/bravado | bravado/swagger_model.py | Python | bsd-3-clause | 5,223 |
from django.conf.urls.defaults import *
from .views import *
urlpatterns = patterns('',
url(r'^signup/$',
view=SignupLoginView.as_view(
featured_form_mixin_class=SignupMultipleFormMixin),
name='accounts_signup'
),
url(r'^login/$',
view=SignupLoginView.as_view(
featured_form_mixin_class=LoginMultipleFormMixin),
name='accounts_login'
),
url(r'^signup-login/$',
view=SignupLoginView.as_view(),
name='accounts_signup_login'
),
url(r'^iframes/signup/$',
view=SignupLoginIframeView.as_view(
featured_form_mixin_class=SignupIframeMultipleFormMixin),
name='accounts_signup_iframe'
),
url(r'^iframes/login/$',
view=SignupLoginIframeView.as_view(
featured_form_mixin_class=LoginIframeMultipleFormMixin),
name='accounts_login_iframe'
),
url(r'^iframes/signup-login/$',
view=SignupLoginIframeView.as_view(),
name='accounts_signup_login_iframe'
),
url(r'^iframes/signup-login/success/$',
view=SignupLoginSuccessIframeView.as_view(),
name='accounts_signup_login_success_iframe'
),
url(r'^logout/$',
view=LogoutView.as_view(),
name='accounts_logout'
),
)
| mfogel/django-signup-login | signup_login/urls.py | Python | bsd-3-clause | 1,287 |
import urllib
import telnetlib
import logging
import cjson
from models import BinaryResource
from django.http import HttpResponse
from django.shortcuts import render_to_response
from django import forms
from django.contrib.auth import authenticate, login
from sana.mrs.openmrs import sendToOpenMRS
from sana.mrs.util import enable_logging
from sana.mrs.models import Notification
from sana.mrs.util import enable_logging
def chunk( seq, size, pad=None ):
"""Slice a list into consecutive disjoint 'chunks' of
length equal to size. The last chunk is padded if necessary.
Example: ::
>>> list(chunk(range(1,10),3))
[[1, 2, 3], [4, 5, 6], [7, 8, 9]]
>>> list(chunk(range(1,9),3))
[[1, 2, 3], [4, 5, 6], [7, 8, None]]
>>> list(chunk(range(1,8),3))
[[1, 2, 3], [4, 5, 6], [7, None, None]]
>>> list(chunk(range(1,10),1))
[[1], [2], [3], [4], [5], [6], [7], [8], [9]]
>>> list(chunk(range(1,10),9))
[[1, 2, 3, 4, 5, 6, 7, 8, 9]]
>>> for X in chunk([],3): print X
>>>
Parmeters:
seq
The sequence to slice
size
The size of each chunk
pad
The size to pad each chunk to.
"""
n = len(seq)
mod = n % size
for i in xrange(0, n-mod, size):
yield seq[i:i+size]
if mod:
padding = [pad] * (size-mod)
yield seq[-mod:] + padding
class FakeProcedureSubmitForm(forms.Form):
"""Encounter form for testing"""
responses = forms.CharField(required=True,
help_text='question,answer,question,answer,..')
procedure_id = forms.IntegerField(required=True, help_text="integers only")
phone_id = forms.CharField(max_length=255)
patient_id = forms.CharField(max_length=255)
#data = forms.FileField(required=True)
def procedure_submit(request):
"""For testing encounter submission"""
upload = request.FILES.get('data', None)
print upload
if request.method == 'POST' and upload is not None:
form = FakeProcedureSubmitForm(request.POST)
else:
form = FakeProcedureSubmitForm()
if form.is_valid():
print "valid"
print form.cleaned_data
phoneId = form.cleaned_data['phone_id']
patientId = form.cleaned_data['patient_id']
procedureId = form.cleaned_data['procedure_id']
responses = form.cleaned_data['responses']
binary = BinaryResource(element_id='test',
content_type='',
procedure=procedureId)
binary.data.save(upload.name, upload)
binary.save()
qas = {}
for q,a in chunk(responses.split(','),2, pad=''):
qas[q] = a
if procedureId == 1:
procedureId = "Diagnose Cervical Cancer"
sendToOpenMRS(patientId, phoneId, procedureId, str(binary.data.path), qas)
return render_to_response("procedure_submit.html",
{'form': form})
def notification_submit(request):
return render_to_response("notification_submit.html")
@enable_logging
def list_notifications(request):
"""For synching notifications with mobile clients.
Request Params
username
A valid username.
password
A valid password.
Parameters:
request
A client request for patient list
"""
logging.info("entering notification list proc")
username = request.REQUEST.get('username',None)
password = request.REQUEST.get('password',None)
user = authenticate(username=username, password=password)
if user is not None:
try:
data = Notification.objects.all()
logging.info("we finished getting the notification list")
response = {'status': 'SUCCESS',
'data': [cjson.decode(d.to_json()) for d in data],
}
except Exception, e:
et, val, tb = sys.exc_info()
trace = traceback.format_tb(tb)
error = "Exception : %s %s %s" % (et, val, trace[0])
for tbm in trace:
logging.error(tbm)
logging.error("Got exception while fetching notification list: %s" % e)
response = {
'status': 'FAILURE',
'data': "Problem while getting notification list: %s" % e,
}
else:
logging.error('User not authenticated')
response = {
'status': 'FAILURE',
'data': 'User not authenticated',
}
return HttpResponse(cjson.encode(response), content_type=("application/json; charset=utf-8"))
def home(request):
"""Top level url
Displays ::
Sanamobile MDS : Online
"""
return HttpResponse('Sanamobile MDS : Online')
| addisclinic/mobile-dispatch-server | sana/mrs/views.py | Python | bsd-3-clause | 4,834 |
from django import template
from system.models import Configuration
register = template.Library()
@register.assignment_tag
def get_config(conf_name=None):
if conf_name is None:
raise Exception("Invalid config name")
c = Configuration.get_by_name_all_fields(conf_name)
if not c:
return None
return {
"name": c.name,
"value": c.value,
"description": c.description,
"hash": c.hash
}
| globocom/database-as-a-service | dbaas/admin/templatetags/config_tags.py | Python | bsd-3-clause | 453 |
from corehq.util.spreadsheets.excel import WorkbookJSONReader
from soil import DownloadBase
class UnknownFileRefException(Exception):
pass
class ExcelImporter(object):
"""
Base class for `SingleExcelImporter` and `MultiExcelImporter`.
This is not meant to be used directly.
"""
def __init__(self, task, file_ref_id):
self.task = task
self.progress = 0
if self.task:
DownloadBase.set_progress(self.task, 0, 100)
download_ref = DownloadBase.get(file_ref_id)
if download_ref is None:
raise UnknownFileRefException("Could not find file wih ref %s. It may have expired" % file_ref_id)
self.workbook = WorkbookJSONReader(download_ref.get_filename())
def mark_complete(self):
if self.task:
DownloadBase.set_progress(self.task, 100, 100)
def add_progress(self, count=1):
self.progress += count
if self.task:
DownloadBase.set_progress(self.task, self.progress, self.total_rows)
class SingleExcelImporter(ExcelImporter):
"""
Manage importing from an excel file with only one
worksheet.
"""
def __init__(self, task, file_ref_id):
super(SingleExcelImporter, self).__init__(task, file_ref_id)
self.worksheet = self.workbook.worksheets[0]
self.total_rows = self.worksheet.worksheet.get_highest_row()
class MultiExcelImporter(ExcelImporter):
"""
Manage importing from an excel file with multiple
relevant worksheets.
"""
def __init__(self, task, file_ref_id):
super(MultiExcelImporter, self).__init__(task, file_ref_id)
self.worksheets = self.workbook.worksheets
self.total_rows = sum(ws.worksheet.get_highest_row() for ws in self.worksheets)
| qedsoftware/commcare-hq | corehq/util/spreadsheets/excel_importer.py | Python | bsd-3-clause | 1,783 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
## Based on learner.js (by Blake Allen and Michael Becker)
import itertools
import collections
from collections import defaultdict
import pdb
import phoment
class Change(object):
def __init__(self, change_type, position, input_material, output_material):
self.change_type = change_type
self.position = position
self.input_material = input_material
self.output_material = output_material
def __repr__(self):
# needs aesthetic improvement
return '{0} {1} to {2} at {3}'.format(self.change_type, self.input_material, self.output_material, self.position)
def __str__(self):
return self.__repr__()
class Sublexicon(object):
"""Starts off as a hypothesis; will grow and compete with others, potentially becoming a sublexicon of the final grammar
"""
def __init__(self, changes, associated_forms):
self.changes = tuple(sorted(changes, key=lambda c:str(c)))
self.associated_forms = associated_forms
self.constraint_names = None
self.weights = None
self.megatableau = None
self.relative_size = 0.0
def __repr__(self):
# needs aesthetic improvement
example_count = min(5, len(self.associated_forms))
return str(self.changes)
def __str__(self):
return self.__repr__()
def create_and_reduce_hypotheses(alignments, pre_reduction_cutoff, orientation='product'):
unfiltered_hypotheses = []
all_pairs = []
for alignment in alignments:
base = linearize_word([column['elem1'] for column in alignment['alignment']])
derivative = linearize_word([column['elem2'] for column in alignment['alignment']])
basic_changes = find_basic_changes(alignment['alignment'])
grouped_changes = group_changes(basic_changes)
possibilities_for_all_changes = [create_change_possibilities(c, base) for c in grouped_changes]
product = list(itertools.product(*possibilities_for_all_changes))
for cp in product:
unfiltered_hypotheses.append(Sublexicon(cp, [{'base':base, 'derivative':derivative, 'probability':alignment['probability'], 'lexeme':alignment['lexeme']}]))
all_pairs.append({'base':base, 'derivative':derivative, 'probability':alignment['probability'], 'lexeme':alignment['lexeme']})
combined_hypotheses = combine_identical_hypotheses(unfiltered_hypotheses)
combined_hypotheses.sort(key=lambda h: len(h.associated_forms))
combined_hypotheses.reverse()
if pre_reduction_cutoff:
combined_hypotheses = [h for h in combined_hypotheses if len(h.associated_forms) >= pre_reduction_cutoff]
print('Hypotheses ready for reduction. Pre-reduction hypothesis count: {}'.format(str(len(combined_hypotheses))))
reduced_hypotheses = reduce_hypotheses(combined_hypotheses, all_pairs, orientation)
sublexicon_sizes = [sum([af['probability'] for af in h.associated_forms]) for h in reduced_hypotheses]
size_total = sum(sublexicon_sizes)
for h, size in zip(reduced_hypotheses, sublexicon_sizes):
h.relative_size = size / size_total
h.total_probability = sum([af['probability'] for af in h.associated_forms])
return reduced_hypotheses
def find_basic_changes(alignment):
"""Find the differences between the aligned base and derivative.
Return differences as Changes with positive indices as positions.
"""
changes = []
surface_i = 0
for column in alignment:
if column['elem1'] != column['elem2']:
if column['elem1'] == None:
changes.append(Change('insert', surface_i*2, [column['elem1']], [column['elem2']]))
# surface_i does not increment
elif column['elem2'] == None:
changes.append(Change('delete', surface_i*2+1, [column['elem1']], [column['elem2']]))
surface_i += 1
else:
changes.append(Change('mutate', surface_i*2+1, [column['elem1']], [column['elem2']]))
surface_i += 1
else:
surface_i += 1
return changes
def create_change_possibilities(change, base, side='both'):
"""Given a change with segments as input and output and a positive index as position,
return a list of changes with different positions/inputs/outputs.
"""
change_possibilities = []
if side in ['left', 'both']:
change_possibilities.append(change)
if side in ['right', 'both']:
noned_base = add_nones(base)
new_change = Change(change.change_type, -(len(noned_base)-change.position), change.input_material, change.output_material)
change_possibilities.append(new_change)
return change_possibilities
def group_changes(changes):
"""Consolidate same-position insertions and deletions into single changes.
"""
insertions = [c for c in changes if c.change_type == 'insert']
deletions = [c for c in changes if c.change_type == 'delete']
mutations = [c for c in changes if c.change_type == 'mutate']
inserted_locations = [ins.position for ins in insertions]
grouped_insertions = []
for i, ins in enumerate(insertions):
if i > 0:
if ins.position == insertions[i-1].position:
grouped_insertions[-1].output_material += ins.output_material
continue
grouped_insertions.append(ins)
grouped_deletions = []
for i, dlt in enumerate(deletions):
if i > 0:
if dlt.position == deletions[i-1].position+2 and dlt.position-1 not in inserted_locations:
grouped_deletions[-1].input_material += dlt.input_material
continue
grouped_deletions.append(dlt)
return sorted(grouped_insertions + grouped_deletions + mutations, key=lambda x: x.position)
def combine_identical_hypotheses(hypotheses):
"""Combine hypotheses with the same Change objects, yielding hypotheses with associated assoc_forms
that are the superset of component hypotheses.
"""
temp_dict = defaultdict(list)
for h in hypotheses:
temp_dict[str(h.changes)].append(h)
grouped_hypotheses = []
for gh in temp_dict:
assoc_forms = [h.associated_forms[0] for h in temp_dict[gh]]
grouped_hypotheses.append(Sublexicon(temp_dict[gh][0].changes, assoc_forms))
return grouped_hypotheses
def add_nones(word):
"""Change word into a list and add None at its beginning, end, and between every other pair of elements. Works whether the word is a str or a list.
"""
def yield_it(word_string):
yield None
it = iter(word_string)
yield next(it)
for x in it:
yield None
yield x
yield None
if isinstance(word, str):
return list(yield_it(word.split(' ')))
else:
return list(yield_it(word))
def apply_change(current_base, current_derivative, change, orientation):
"""Use the given set of changes to derive a new form from the base word.
May be only one intermediate step in the application of multiple
changes associated with a single hypothesis/sublexicon.
"""
change_position = make_index_positive(current_base, change.position)
changed_base = current_base[:]
changed_derivative = current_derivative[:]
if change.change_type == 'insert':
changed_base[change_position] = [None for s in change.output_material]
changed_derivative[change_position] = change.output_material
if change.change_type == 'delete':
for i, s in enumerate(change.input_material):
if orientation == 'source' and current_base[change_position+(i*2)] != s:
raise Exception('Deletion incompatible with base: no {} to delete.'.format(s))
changed_derivative[change_position+(i*2)] = None
if change.change_type == 'mutate':
for i, s in enumerate(change.output_material):
if orientation == 'source' and current_base[change_position+(i*2)] != change.input_material[i]:
raise Exception('Mutation incompatible with base: no {} to mutate.'.format(s))
changed_derivative[change_position+(i*2)] = s
return (changed_base, changed_derivative)
def apply_hypothesis(word, hypothesis, orientation='product'):
"""Apply the changes in a hypothesis to a (base) word. Base word can be either
a list of segments (no Nones) or a space-spaced string.
"""
current_base = list(add_nones(word))
current_derivative = list(add_nones(word))
try:
for change in hypothesis.changes:
# if word == 'n e p e n' and change.change_type=='mutate' and change.input_material==['b']:
# pdb.set_trace()
current_base, current_derivative = apply_change(current_base, current_derivative, change, orientation)
except:
return 'incompatible'
return linearize_word(current_derivative)
def apply_operation(word, operation, orientation='product'):
"""Apply the changes in a psublexicon's operation to a (base) word. Base word can be either
a list of segments (no Nones) or a space-spaced string.
"""
current_base = list(add_nones(word))
current_derivative = list(add_nones(word))
try:
for change in operation:
current_base, current_derivative = apply_change(current_base, current_derivative, change, orientation)
except:
return 'incompatible'
return linearize_word(current_derivative)
def make_index_positive(word, index):
"""Return positive index based on word.
"""
if index >= 0:
return index
else:
return len(word) + index
def linearize_word(word):
"""Create a space-spaced string from a list-formatted word (even one with Nones).
"""
def flatten(l):
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, str):
for sub in flatten(el):
yield sub
else:
yield el
flat_noneless = [s for s in list(flatten(word)) if s != None]
return ' '.join(flat_noneless)
def account_for_all(hypotheses, all_pairs, orientation):
for pair in all_pairs:
accounted_for_by_each = [apply_hypothesis(pair['base'], h, orientation) == pair['derivative'] for h in hypotheses]
if True not in accounted_for_by_each:
return False
return True
def reduce_hypotheses(hypotheses, all_pairs, orientation='product'):
"""Condenses the list of hypotheses about the entire dataset into the
minimum number required to account for all base-derivative pairs.
"""
reversed_hypotheses = hypotheses[::-1]
print('Carrying out single reduction...')
# First step: check to see if any small hypotheses can be consumed by any single larger one
for j, large in enumerate(hypotheses): # j = potential consumer, will be at least as large as consumed (i)
# print('Checking large hypothesis {} out of {}...'.format(j, len(hypotheses)))
for i, small in enumerate(reversed_hypotheses): # can be consumed
if small != 'purgeable' and large != 'purgeable' and small != large:
consumabilities = [] # could probably be refactored so as not to need to determine all consumabilities (only until failure)
for associated_form in small.associated_forms:
small_base = associated_form['base']
small_derivative = associated_form['derivative']
large_predicted_derivative = apply_hypothesis(small_base, large, orientation)
consumabilities.append(small_derivative == large_predicted_derivative)
if False not in consumabilities: # if there are no forms in small that large cannot account for
for bd in small.associated_forms:
if bd not in large.associated_forms:
large.associated_forms.append(bd)
hypotheses[-(i+1)] = 'purgeable'
# print('Purging small hypothesis {} out of {} (reversed)...'.format(i, len(hypotheses)))
reversed_hypotheses[i] = 'purgeable'
hypotheses = [h for h in hypotheses if h != 'purgeable']
# Second step: check for smallest number of adequate hypotheses
print('Moving to multiple reduction...')
combinations = itertools.chain.from_iterable([itertools.combinations(hypotheses, n) for n in range(1,len(hypotheses))])
for combo in combinations:
if account_for_all(combo, all_pairs, orientation):
# winner found! Add missing contexts to their respective winners
for pair in all_pairs:
for hypothesis in combo:
if apply_hypothesis(pair['base'], hypothesis, orientation) == pair['derivative']:
form = pair
if form not in hypothesis.associated_forms:
hypothesis.associated_forms.append(form) # does combo actually get modified here? Double-check!
break
return combo
return [h for h in hypotheses if h != 'purgeable']
def add_zero_probability_forms(hypotheses):
"""Add every form from every hypothesis A to every other hypothesis B with a probability of 0 if the form is not already in hypothesis B.
"""
all_lexemes_and_bases = [(af['lexeme'], af['base']) for hypothesis in hypotheses for af in hypothesis.associated_forms]
for hypothesis in hypotheses:
these_lexemes = [af['lexeme'] for af in hypothesis.associated_forms]
for lexeme, base in all_lexemes_and_bases:
if lexeme not in these_lexemes:
hypothesis.associated_forms.append({'base':base, 'derivative':apply_hypothesis(base,hypothesis), 'probability': 0.0, 'lexeme': lexeme})
return hypotheses
def add_grammar(sublexicon, constraints, l1_mult = 0.0, l2_mult = 0.001):
mt = phoment.MegaTableau(sublexicon, constraints)
sublexicon.weights = phoment.learn_weights(mt, l1_mult, l2_mult)
sublexicon.constraint_names = constraints
sublexicon.megatableau = mt
z = sorted(zip(sublexicon.weights, sublexicon.constraint_names), key=lambda x: abs(x[0]), reverse=True)
# print()
# print(sublexicon)
# print(str([(af['base'], af['derivative']) for af in sublexicon.associated_forms if af['probability'] > 0.0][:8]) + '...')
# for w,n in z[:8]:
# print('{}\t{}'.format(str(n),str(w)))
return sublexicon
# def create_mapping_tableau(sublexicons, megatableaux):
# new_tableau = {}
# for s,m in zip(sublexicons, megatableaux):
# for af in s.associated_forms:
# if af['lexeme'] in new_tableau:
# if af['derivative'] in new_tableau[af['lexeme']]:
# new_tableau[af['lexeme']][af['derivative']] += m.tableau[''][af['lexeme']][2]
# else:
# if af['derivative'] != 'incompatible':
# new_tableau[af['lexeme']][af['derivative']] = m.tableau[''][af['lexeme']][2]
# else:
# new_tableau[af['lexeme']] = {}
# if af['derivative'] != 'incompatible':
# new_tableau[af['lexeme']][af['derivative']] = m.tableau[''][af['lexeme']][2]
# for lexeme in new_tableau:
# total = 0
# ordered_derivatives = sorted([d for d in new_tableau[lexeme]])
# for derivative in ordered_derivatives:
# total += new_tableau[lexeme][derivative]
# for derivative in new_tableau[lexeme]:
# new_tableau[lexeme][derivative] /= total
# return new_tableau
| bhallen/pyparadigms | hypothesize.py | Python | bsd-3-clause | 15,896 |
# -*-coding:Utf-8 -*
# Copyright (c) 2010 LE GOFF Vincent
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le contexte éditeur EdtStats"""
from primaires.interpreteur.editeur import Editeur
from primaires.format.fonctions import contient
class EdtStats(Editeur):
"""Classe définissant le contexte éditeur 'stats'.
Ce contexte permet d'éditer les stats d'une race.
"""
def __init__(self, pere, objet=None, attribut=None):
"""Constructeur de l'éditeur"""
Editeur.__init__(self, pere, objet, attribut)
def accueil(self):
"""Message d'accueil"""
msg = \
"Entrez le |ent|nom|ff| de la stat, un signe |ent|/|ff| " \
"et la valeur pour modifier une stat.\nExemple : |cmd|force / " \
"45|ff|\n\nEntrez |ent|/|ff| pour revenir à la fenêtre parente\n\n"
stats = self.objet
msg += "+-" + "-" * 20 + "-+-" + "-" * 6 + "-+\n"
msg += "| " + "Nom".ljust(20) + " | " + "Valeur".ljust(6) + " |\n"
msg += "| " + " ".ljust(20) + " | " + " ".ljust(6) + " |"
for stat in stats:
if not stat.max:
msg += "\n| |ent|" + stat.nom.ljust(20) + "|ff| | "
msg += str(stat.defaut).rjust(6) + " |"
return msg
def interpreter(self, msg):
"""Interprétation du message"""
try:
nom_stat, valeur = msg.split(" / ")
except ValueError:
self.pere << "|err|Syntaxe invalide.|ff|"
else:
# On cherche la stat
stat = None
for t_stat in self.objet:
if not t_stat.max and contient(t_stat.nom, nom_stat):
stat = t_stat
break
if not stat:
self.pere << "|err|Cette stat est introuvable.|ff|"
else:
# Convertion
try:
valeur = int(valeur)
assert valeur > 0
assert valeur >= stat.marge_min
assert valeur <= stat.marge_max
except (ValueError, AssertionError):
self.pere << "|err|Valeur invalide.|ff|"
else:
stat.defaut = valeur
stat.courante = valeur
self.actualiser()
| stormi/tsunami | src/primaires/pnj/editeurs/pedit/edt_stats.py | Python | bsd-3-clause | 3,848 |
# Copyright (c) 2012 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Unittests for the stage results."""
from __future__ import print_function
import mock
import os
import signal
import StringIO
import time
from chromite.cbuildbot import cbuildbot_config
from chromite.cbuildbot import failures_lib
from chromite.cbuildbot import results_lib
from chromite.cbuildbot import cbuildbot_run
from chromite.cbuildbot.builders import simple_builders
from chromite.cbuildbot.stages import generic_stages
from chromite.cbuildbot.stages import sync_stages
from chromite.lib import cros_build_lib
from chromite.lib import cros_test_lib
from chromite.lib import parallel
class PassStage(generic_stages.BuilderStage):
"""PassStage always works"""
class Pass2Stage(generic_stages.BuilderStage):
"""Pass2Stage always works"""
class FailStage(generic_stages.BuilderStage):
"""FailStage always throws an exception"""
FAIL_EXCEPTION = failures_lib.StepFailure("Fail stage needs to fail.")
def PerformStage(self):
"""Throw the exception to make us fail."""
raise self.FAIL_EXCEPTION
class SkipStage(generic_stages.BuilderStage):
"""SkipStage is skipped."""
config_name = 'signer_tests'
class SneakyFailStage(generic_stages.BuilderStage):
"""SneakyFailStage exits with an error."""
def PerformStage(self):
"""Exit without reporting back."""
# pylint: disable=protected-access
os._exit(1)
class SuicideStage(generic_stages.BuilderStage):
"""SuicideStage kills itself with kill -9."""
def PerformStage(self):
"""Exit without reporting back."""
os.kill(os.getpid(), signal.SIGKILL)
class SetAttrStage(generic_stages.BuilderStage):
"""Stage that sets requested run attribute to a value."""
DEFAULT_ATTR = 'unittest_value'
VALUE = 'HereTakeThis'
def __init__(self, builder_run, delay=2, attr=DEFAULT_ATTR, *args, **kwargs):
super(SetAttrStage, self).__init__(builder_run, *args, **kwargs)
self.delay = delay
self.attr = attr
def PerformStage(self):
"""Wait self.delay seconds then set requested run attribute."""
time.sleep(self.delay)
self._run.attrs.SetParallel(self.attr, self.VALUE)
def QueueableException(self):
return cbuildbot_run.ParallelAttributeError(self.attr)
class GetAttrStage(generic_stages.BuilderStage):
"""Stage that accesses requested run attribute and confirms value."""
DEFAULT_ATTR = 'unittest_value'
def __init__(self, builder_run, tester=None, timeout=5, attr=DEFAULT_ATTR,
*args, **kwargs):
super(GetAttrStage, self).__init__(builder_run, *args, **kwargs)
self.tester = tester
self.timeout = timeout
self.attr = attr
def PerformStage(self):
"""Wait for attrs.test value to show up."""
assert not self._run.attrs.HasParallel(self.attr)
value = self._run.attrs.GetParallel(self.attr, self.timeout)
if self.tester:
self.tester(value)
def QueueableException(self):
return cbuildbot_run.ParallelAttributeError(self.attr)
def TimeoutException(self):
return cbuildbot_run.AttrTimeoutError(self.attr)
class BuildStagesResultsTest(cros_test_lib.TestCase):
"""Tests for stage results and reporting."""
def setUp(self):
# Always stub RunCommmand out as we use it in every method.
self._bot_id = 'x86-generic-paladin'
build_config = cbuildbot_config.GetConfig()[self._bot_id]
self.build_root = '/fake_root'
# Create a class to hold
class Options(object):
"""Dummy class to hold option values."""
options = Options()
options.archive_base = 'gs://dontcare'
options.buildroot = self.build_root
options.debug = False
options.prebuilts = False
options.clobber = False
options.nosdk = False
options.remote_trybot = False
options.latest_toolchain = False
options.buildnumber = 1234
options.chrome_rev = None
options.branch = 'dontcare'
options.chrome_root = False
self._manager = parallel.Manager()
self._manager.__enter__()
self._run = cbuildbot_run.BuilderRun(options, build_config, self._manager)
results_lib.Results.Clear()
def tearDown(self):
# Mimic exiting with statement for self._manager.
self._manager.__exit__(None, None, None)
def _runStages(self):
"""Run a couple of stages so we can capture the results"""
# Run two pass stages, and one fail stage.
PassStage(self._run).Run()
Pass2Stage(self._run).Run()
self.assertRaises(
failures_lib.StepFailure,
FailStage(self._run).Run)
def _verifyRunResults(self, expectedResults, max_time=2.0):
actualResults = results_lib.Results.Get()
# Break out the asserts to be per item to make debugging easier
self.assertEqual(len(expectedResults), len(actualResults))
for i in xrange(len(expectedResults)):
entry = actualResults[i]
xname, xresult = expectedResults[i]
if entry.result not in results_lib.Results.NON_FAILURE_TYPES:
self.assertTrue(isinstance(entry.result, BaseException))
if isinstance(entry.result, failures_lib.StepFailure):
self.assertEqual(str(entry.result), entry.description)
self.assertTrue(entry.time >= 0 and entry.time < max_time)
self.assertEqual(xname, entry.name)
self.assertEqual(type(xresult), type(entry.result))
self.assertEqual(repr(xresult), repr(entry.result))
def _PassString(self):
record = results_lib.Result('Pass', results_lib.Results.SUCCESS, 'None',
'Pass', '', '0')
return results_lib.Results.SPLIT_TOKEN.join(record) + '\n'
def testRunStages(self):
"""Run some stages and verify the captured results"""
self.assertEqual(results_lib.Results.Get(), [])
self._runStages()
# Verify that the results are what we expect.
expectedResults = [
('Pass', results_lib.Results.SUCCESS),
('Pass2', results_lib.Results.SUCCESS),
('Fail', FailStage.FAIL_EXCEPTION),
]
self._verifyRunResults(expectedResults)
def testSuccessTest(self):
"""Run some stages and verify the captured results"""
results_lib.Results.Record('Pass', results_lib.Results.SUCCESS)
self.assertTrue(results_lib.Results.BuildSucceededSoFar())
results_lib.Results.Record('Fail', FailStage.FAIL_EXCEPTION, time=1)
self.assertFalse(results_lib.Results.BuildSucceededSoFar())
results_lib.Results.Record('Pass2', results_lib.Results.SUCCESS)
self.assertFalse(results_lib.Results.BuildSucceededSoFar())
def _TestParallelStages(self, stage_objs):
builder = simple_builders.SimpleBuilder(self._run)
error = None
# pylint: disable=protected-access
with mock.patch.multiple(parallel._BackgroundTask, PRINT_INTERVAL=0.01):
try:
builder._RunParallelStages(stage_objs)
except parallel.BackgroundFailure as ex:
error = ex
return error
def testParallelStages(self):
stage_objs = [stage(self._run) for stage in
(PassStage, SneakyFailStage, FailStage, SuicideStage,
Pass2Stage)]
error = self._TestParallelStages(stage_objs)
self.assertTrue(error)
expectedResults = [
('Pass', results_lib.Results.SUCCESS),
('Fail', FailStage.FAIL_EXCEPTION),
('Pass2', results_lib.Results.SUCCESS),
('SneakyFail', error),
('Suicide', error),
]
self._verifyRunResults(expectedResults)
def testParallelStageCommunicationOK(self):
"""Test run attr communication betweeen parallel stages."""
def assert_test(value):
self.assertEqual(value, SetAttrStage.VALUE,
'Expected value %r to be passed between stages, but'
' got %r.' % (SetAttrStage.VALUE, value))
stage_objs = [
SetAttrStage(self._run),
GetAttrStage(self._run, assert_test, timeout=30),
GetAttrStage(self._run, assert_test, timeout=30),
]
error = self._TestParallelStages(stage_objs)
self.assertFalse(error)
expectedResults = [
('SetAttr', results_lib.Results.SUCCESS),
('GetAttr', results_lib.Results.SUCCESS),
('GetAttr', results_lib.Results.SUCCESS),
]
self._verifyRunResults(expectedResults, max_time=30.0)
# Make sure run attribute propagated up to the top, too.
value = self._run.attrs.GetParallel('unittest_value')
self.assertEqual(SetAttrStage.VALUE, value)
def testParallelStageCommunicationTimeout(self):
"""Test run attr communication between parallel stages that times out."""
def assert_test(value):
self.assertEqual(value, SetAttrStage.VALUE,
'Expected value %r to be passed between stages, but'
' got %r.' % (SetAttrStage.VALUE, value))
stage_objs = [SetAttrStage(self._run, delay=11),
GetAttrStage(self._run, assert_test, timeout=1),
]
error = self._TestParallelStages(stage_objs)
self.assertTrue(error)
expectedResults = [
('SetAttr', results_lib.Results.SUCCESS),
('GetAttr', stage_objs[1].TimeoutException()),
]
self._verifyRunResults(expectedResults, max_time=12.0)
def testParallelStageCommunicationNotQueueable(self):
"""Test setting non-queueable run attr in parallel stage."""
stage_objs = [SetAttrStage(self._run, attr='release_tag'),
GetAttrStage(self._run, timeout=2),
]
error = self._TestParallelStages(stage_objs)
self.assertTrue(error)
expectedResults = [
('SetAttr', stage_objs[0].QueueableException()),
('GetAttr', stage_objs[1].TimeoutException()),
]
self._verifyRunResults(expectedResults, max_time=12.0)
def testStagesReportSuccess(self):
"""Tests Stage reporting."""
sync_stages.ManifestVersionedSyncStage.manifest_manager = None
# Store off a known set of results and generate a report
results_lib.Results.Record('Sync', results_lib.Results.SUCCESS, time=1)
results_lib.Results.Record('Build', results_lib.Results.SUCCESS, time=2)
results_lib.Results.Record('Test', FailStage.FAIL_EXCEPTION, time=3)
results_lib.Results.Record('SignerTests', results_lib.Results.SKIPPED)
result = cros_build_lib.CommandResult(cmd=['/bin/false', '/nosuchdir'],
returncode=2)
results_lib.Results.Record(
'Archive',
cros_build_lib.RunCommandError(
'Command "/bin/false /nosuchdir" failed.\n',
result), time=4)
results = StringIO.StringIO()
results_lib.Results.Report(results)
expectedResults = (
"************************************************************\n"
"** Stage Results\n"
"************************************************************\n"
"** PASS Sync (0:00:01)\n"
"************************************************************\n"
"** PASS Build (0:00:02)\n"
"************************************************************\n"
"** FAIL Test (0:00:03) with StepFailure\n"
"************************************************************\n"
"** FAIL Archive (0:00:04) in /bin/false\n"
"************************************************************\n"
)
expectedLines = expectedResults.split('\n')
actualLines = results.getvalue().split('\n')
# Break out the asserts to be per item to make debugging easier
for i in xrange(min(len(actualLines), len(expectedLines))):
self.assertEqual(expectedLines[i], actualLines[i])
self.assertEqual(len(expectedLines), len(actualLines))
def testStagesReportError(self):
"""Tests Stage reporting with exceptions."""
sync_stages.ManifestVersionedSyncStage.manifest_manager = None
# Store off a known set of results and generate a report
results_lib.Results.Record('Sync', results_lib.Results.SUCCESS, time=1)
results_lib.Results.Record('Build', results_lib.Results.SUCCESS, time=2)
results_lib.Results.Record('Test', FailStage.FAIL_EXCEPTION,
'failException Msg\nLine 2', time=3)
result = cros_build_lib.CommandResult(cmd=['/bin/false', '/nosuchdir'],
returncode=2)
results_lib.Results.Record(
'Archive',
cros_build_lib.RunCommandError(
'Command "/bin/false /nosuchdir" failed.\n',
result),
'FailRunCommand msg', time=4)
results = StringIO.StringIO()
results_lib.Results.Report(results)
expectedResults = (
"************************************************************\n"
"** Stage Results\n"
"************************************************************\n"
"** PASS Sync (0:00:01)\n"
"************************************************************\n"
"** PASS Build (0:00:02)\n"
"************************************************************\n"
"** FAIL Test (0:00:03) with StepFailure\n"
"************************************************************\n"
"** FAIL Archive (0:00:04) in /bin/false\n"
"************************************************************\n"
"\n"
"Failed in stage Test:\n"
"\n"
"failException Msg\n"
"Line 2\n"
"\n"
"Failed in stage Archive:\n"
"\n"
"FailRunCommand msg\n"
)
expectedLines = expectedResults.split('\n')
actualLines = results.getvalue().split('\n')
# Break out the asserts to be per item to make debugging easier
for i in xrange(min(len(actualLines), len(expectedLines))):
self.assertEqual(expectedLines[i], actualLines[i])
self.assertEqual(len(expectedLines), len(actualLines))
def testStagesReportReleaseTag(self):
"""Tests Release Tag entry in stages report."""
current_version = "release_tag_string"
archive_urls = {
'board1': 'http://foo.com/bucket/bot-id1/version/index.html',
'board2': 'http://foo.com/bucket/bot-id2/version/index.html',}
# Store off a known set of results and generate a report
results_lib.Results.Record('Pass', results_lib.Results.SUCCESS, time=1)
results = StringIO.StringIO()
results_lib.Results.Report(results, archive_urls, current_version)
expectedResults = (
"************************************************************\n"
"** RELEASE VERSION: release_tag_string\n"
"************************************************************\n"
"** Stage Results\n"
"************************************************************\n"
"** PASS Pass (0:00:01)\n"
"************************************************************\n"
"** BUILD ARTIFACTS FOR THIS BUILD CAN BE FOUND AT:\n"
"** board1: %s\n"
"@@@STEP_LINK@Artifacts[board1]: bot-id1/version@%s@@@\n"
"** board2: %s\n"
"@@@STEP_LINK@Artifacts[board2]: bot-id2/version@%s@@@\n"
"************************************************************\n"
% (archive_urls['board1'], archive_urls['board1'],
archive_urls['board2'], archive_urls['board2']))
expectedLines = expectedResults.split('\n')
actualLines = results.getvalue().split('\n')
# Break out the asserts to be per item to make debugging easier
for i in xrange(len(expectedLines)):
self.assertEqual(expectedLines[i], actualLines[i])
self.assertEqual(len(expectedLines), len(actualLines))
def testSaveCompletedStages(self):
"""Tests that we can save out completed stages."""
# Run this again to make sure we have the expected results stored
results_lib.Results.Record('Pass', results_lib.Results.SUCCESS)
results_lib.Results.Record('Fail', FailStage.FAIL_EXCEPTION)
results_lib.Results.Record('Pass2', results_lib.Results.SUCCESS)
saveFile = StringIO.StringIO()
results_lib.Results.SaveCompletedStages(saveFile)
self.assertEqual(saveFile.getvalue(), self._PassString())
def testRestoreCompletedStages(self):
"""Tests that we can read in completed stages."""
results_lib.Results.RestoreCompletedStages(
StringIO.StringIO(self._PassString()))
previous = results_lib.Results.GetPrevious()
self.assertEqual(previous.keys(), ['Pass'])
def testRunAfterRestore(self):
"""Tests that we skip previously completed stages."""
# Fake results_lib.Results.RestoreCompletedStages
results_lib.Results.RestoreCompletedStages(
StringIO.StringIO(self._PassString()))
self._runStages()
# Verify that the results are what we expect.
expectedResults = [
('Pass', results_lib.Results.SUCCESS),
('Pass2', results_lib.Results.SUCCESS),
('Fail', FailStage.FAIL_EXCEPTION),
]
self._verifyRunResults(expectedResults)
def testFailedButForgiven(self):
"""Tests that warnings are flagged as such."""
results_lib.Results.Record('Warn', results_lib.Results.FORGIVEN, time=1)
results = StringIO.StringIO()
results_lib.Results.Report(results)
self.assertTrue('@@@STEP_WARNINGS@@@' in results.getvalue())
| guorendong/iridium-browser-ubuntu | third_party/chromite/cbuildbot/stages/stage_results_unittest.py | Python | bsd-3-clause | 17,223 |
#!/Users/keith.hamilton/Documents/GitHub/keithhamilton/blackmaas/bin/python
#
# The Python Imaging Library.
# $Id$
#
# convert image files
#
# History:
# 0.1 96-04-20 fl Created
# 0.2 96-10-04 fl Use draft mode when converting images
# 0.3 96-12-30 fl Optimize output (PNG, JPEG)
# 0.4 97-01-18 fl Made optimize an option (PNG, JPEG)
# 0.5 98-12-30 fl Fixed -f option (from Anthony Baxter)
#
from __future__ import print_function
import site
import getopt, string, sys
from PIL import Image
def usage():
print("PIL Convert 0.5/1998-12-30 -- convert image files")
print("Usage: pilconvert [option] infile outfile")
print()
print("Options:")
print()
print(" -c <format> convert to format (default is given by extension)")
print()
print(" -g convert to greyscale")
print(" -p convert to palette image (using standard palette)")
print(" -r convert to rgb")
print()
print(" -o optimize output (trade speed for size)")
print(" -q <value> set compression quality (0-100, JPEG only)")
print()
print(" -f list supported file formats")
sys.exit(1)
if len(sys.argv) == 1:
usage()
try:
opt, argv = getopt.getopt(sys.argv[1:], "c:dfgopq:r")
except getopt.error as v:
print(v)
sys.exit(1)
format = None
convert = None
options = { }
for o, a in opt:
if o == "-f":
Image.init()
id = sorted(Image.ID)
print("Supported formats (* indicates output format):")
for i in id:
if i in Image.SAVE:
print(i+"*", end=' ')
else:
print(i, end=' ')
sys.exit(1)
elif o == "-c":
format = a
if o == "-g":
convert = "L"
elif o == "-p":
convert = "P"
elif o == "-r":
convert = "RGB"
elif o == "-o":
options["optimize"] = 1
elif o == "-q":
options["quality"] = string.atoi(a)
if len(argv) != 2:
usage()
try:
im = Image.open(argv[0])
if convert and im.mode != convert:
im.draft(convert, im.size)
im = im.convert(convert)
if format:
im.save(argv[1], format, **options)
else:
im.save(argv[1], **options)
except:
print("cannot convert image", end=' ')
print("(%s:%s)" % (sys.exc_info()[0], sys.exc_info()[1]))
| keithhamilton/blackmaas | bin/pilconvert.py | Python | bsd-3-clause | 2,387 |
# -*- coding: utf-8 -*-
try:
import json
except ImportError:
import simplejson as json
import math
import pytz
import pytest
import time
import datetime
import calendar
import re
import decimal
import dateutil
from functools import partial
from pandas.compat import range, zip, StringIO, u
import pandas._libs.json as ujson
import pandas.compat as compat
import numpy as np
from pandas import DataFrame, Series, Index, NaT, DatetimeIndex
import pandas.util.testing as tm
json_unicode = (json.dumps if compat.PY3
else partial(json.dumps, encoding="utf-8"))
class TestUltraJSONTests(object):
@pytest.mark.skipif(compat.is_platform_32bit(),
reason="not compliant on 32-bit, xref #15865")
def test_encodeDecimal(self):
sut = decimal.Decimal("1337.1337")
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert decoded == 1337.1337
sut = decimal.Decimal("0.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.94")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "0.9"
decoded = ujson.decode(encoded)
assert decoded == 0.9
sut = decimal.Decimal("1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "2.0"
decoded = ujson.decode(encoded)
assert decoded == 2.0
sut = decimal.Decimal("-1.95")
encoded = ujson.encode(sut, double_precision=1)
assert encoded == "-2.0"
decoded = ujson.decode(encoded)
assert decoded == -2.0
sut = decimal.Decimal("0.995")
encoded = ujson.encode(sut, double_precision=2)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.9995")
encoded = ujson.encode(sut, double_precision=3)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
sut = decimal.Decimal("0.99999999999999944")
encoded = ujson.encode(sut, double_precision=15)
assert encoded == "1.0"
decoded = ujson.decode(encoded)
assert decoded == 1.0
def test_encodeStringConversion(self):
input = "A string \\ / \b \f \n \r \t </script> &"
not_html_encoded = ('"A string \\\\ \\/ \\b \\f \\n '
'\\r \\t <\\/script> &"')
html_encoded = ('"A string \\\\ \\/ \\b \\f \\n \\r \\t '
'\\u003c\\/script\\u003e \\u0026"')
def helper(expected_output, **encode_kwargs):
output = ujson.encode(input, **encode_kwargs)
assert input == json.loads(output)
assert output == expected_output
assert input == ujson.decode(output)
# Default behavior assumes encode_html_chars=False.
helper(not_html_encoded, ensure_ascii=True)
helper(not_html_encoded, ensure_ascii=False)
# Make sure explicit encode_html_chars=False works.
helper(not_html_encoded, ensure_ascii=True, encode_html_chars=False)
helper(not_html_encoded, ensure_ascii=False, encode_html_chars=False)
# Make sure explicit encode_html_chars=True does the encoding.
helper(html_encoded, ensure_ascii=True, encode_html_chars=True)
helper(html_encoded, ensure_ascii=False, encode_html_chars=True)
def test_doubleLongIssue(self):
sut = {u('a'): -4342969734183514}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
assert sut == decoded
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_doubleLongDecimalIssue(self):
sut = {u('a'): -12345678901234.56789012}
encoded = json.dumps(sut)
decoded = json.loads(encoded)
assert sut == decoded
encoded = ujson.encode(sut, double_precision=15)
decoded = ujson.decode(encoded)
assert sut == decoded
def test_encodeNonCLocale(self):
import locale
savedlocale = locale.getlocale(locale.LC_NUMERIC)
try:
locale.setlocale(locale.LC_NUMERIC, 'it_IT.UTF-8')
except:
try:
locale.setlocale(locale.LC_NUMERIC, 'Italian_Italy')
except:
pytest.skip('Could not set locale for testing')
assert ujson.loads(ujson.dumps(4.78e60)) == 4.78e60
assert ujson.loads('4.78', precise_float=True) == 4.78
locale.setlocale(locale.LC_NUMERIC, savedlocale)
def test_encodeDecodeLongDecimal(self):
sut = {u('a'): -528656961.4399388}
encoded = ujson.dumps(sut, double_precision=15)
ujson.decode(encoded)
def test_decimalDecodeTestPrecise(self):
sut = {u('a'): 4.56}
encoded = ujson.encode(sut)
decoded = ujson.decode(encoded, precise_float=True)
assert sut == decoded
@pytest.mark.skipif(compat.is_platform_windows() and not compat.PY3,
reason="buggy on win-64 for py2")
def test_encodeDoubleTinyExponential(self):
num = 1e-40
assert num == ujson.decode(ujson.encode(num))
num = 1e-100
assert num == ujson.decode(ujson.encode(num))
num = -1e-45
assert num == ujson.decode(ujson.encode(num))
num = -1e-145
assert np.allclose(num, ujson.decode(ujson.encode(num)))
def test_encodeDictWithUnicodeKeys(self):
input = {u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1"),
u("key1"): u("value1"), u("key1"):
u("value1"), u("key1"): u("value1")}
output = ujson.encode(input)
input = {u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1"), u("بن"): u("value1"),
u("بن"): u("value1")}
output = ujson.encode(input) # noqa
def test_encodeDoubleConversion(self):
input = math.pi
output = ujson.encode(input)
assert round(input, 5) == round(json.loads(output), 5)
assert round(input, 5) == round(ujson.decode(output), 5)
def test_encodeWithDecimal(self):
input = 1.0
output = ujson.encode(input)
assert output == "1.0"
def test_encodeDoubleNegConversion(self):
input = -math.pi
output = ujson.encode(input)
assert round(input, 5) == round(json.loads(output), 5)
assert round(input, 5) == round(ujson.decode(output), 5)
def test_encodeArrayOfNestedArrays(self):
input = [[[[]]]] * 20
output = ujson.encode(input)
assert input == json.loads(output)
# assert output == json.dumps(input)
assert input == ujson.decode(output)
input = np.array(input)
tm.assert_numpy_array_equal(input, ujson.decode(
output, numpy=True, dtype=input.dtype))
def test_encodeArrayOfDoubles(self):
input = [31337.31337, 31337.31337, 31337.31337, 31337.31337] * 10
output = ujson.encode(input)
assert input == json.loads(output)
# assert output == json.dumps(input)
assert input == ujson.decode(output)
tm.assert_numpy_array_equal(
np.array(input), ujson.decode(output, numpy=True))
def test_doublePrecisionTest(self):
input = 30.012345678901234
output = ujson.encode(input, double_precision=15)
assert input == json.loads(output)
assert input == ujson.decode(output)
output = ujson.encode(input, double_precision=9)
assert round(input, 9) == json.loads(output)
assert round(input, 9) == ujson.decode(output)
output = ujson.encode(input, double_precision=3)
assert round(input, 3) == json.loads(output)
assert round(input, 3) == ujson.decode(output)
def test_invalidDoublePrecision(self):
input = 30.12345678901234567890
pytest.raises(ValueError, ujson.encode, input, double_precision=20)
pytest.raises(ValueError, ujson.encode, input, double_precision=-1)
# will throw typeError
pytest.raises(TypeError, ujson.encode, input, double_precision='9')
# will throw typeError
pytest.raises(TypeError, ujson.encode,
input, double_precision=None)
def test_encodeStringConversion2(self):
input = "A string \\ / \b \f \n \r \t"
output = ujson.encode(input)
assert input == json.loads(output)
assert output == '"A string \\\\ \\/ \\b \\f \\n \\r \\t"'
assert input == ujson.decode(output)
pass
def test_decodeUnicodeConversion(self):
pass
def test_encodeUnicodeConversion1(self):
input = "Räksmörgås اسامة بن محمد بن عوض بن لادن"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert enc == json_unicode(input)
assert dec == json.loads(enc)
def test_encodeControlEscaping(self):
input = "\x19"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert input == dec
assert enc == json_unicode(input)
def test_encodeUnicodeConversion2(self):
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert enc == json_unicode(input)
assert dec == json.loads(enc)
def test_encodeUnicodeSurrogatePair(self):
input = "\xf0\x90\x8d\x86"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert enc == json_unicode(input)
assert dec == json.loads(enc)
def test_encodeUnicode4BytesUTF8(self):
input = "\xf0\x91\x80\xb0TRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert enc == json_unicode(input)
assert dec == json.loads(enc)
def test_encodeUnicode4BytesUTF8Highest(self):
input = "\xf3\xbf\xbf\xbfTRAILINGNORMAL"
enc = ujson.encode(input)
dec = ujson.decode(enc)
assert enc == json_unicode(input)
assert dec == json.loads(enc)
def test_encodeArrayInArray(self):
input = [[[[]]]]
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
tm.assert_numpy_array_equal(
np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeIntConversion(self):
input = 31337
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
pass
def test_encodeIntNegConversion(self):
input = -31337
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
pass
def test_encodeLongNegConversion(self):
input = -9223372036854775808
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
def test_encodeListConversion(self):
input = [1, 2, 3, 4]
output = ujson.encode(input)
assert input == json.loads(output)
assert input == ujson.decode(output)
tm.assert_numpy_array_equal(
np.array(input), ujson.decode(output, numpy=True))
pass
def test_encodeDictConversion(self):
input = {"k1": 1, "k2": 2, "k3": 3, "k4": 4}
output = ujson.encode(input) # noqa
assert input == json.loads(output)
assert input == ujson.decode(output)
assert input == ujson.decode(output)
pass
def test_encodeNoneConversion(self):
input = None
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
pass
def test_encodeTrueConversion(self):
input = True
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
pass
def test_encodeFalseConversion(self):
input = False
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
def test_encodeDatetimeConversion(self):
ts = time.time()
input = datetime.datetime.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
expected = calendar.timegm(input.utctimetuple())
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encodeDateConversion(self):
ts = time.time()
input = datetime.date.fromtimestamp(ts)
output = ujson.encode(input, date_unit='s')
tup = (input.year, input.month, input.day, 0, 0, 0)
expected = calendar.timegm(tup)
assert int(expected) == json.loads(output)
assert int(expected) == ujson.decode(output)
def test_encodeTimeConversion(self):
tests = [
datetime.time(),
datetime.time(1, 2, 3),
datetime.time(10, 12, 15, 343243),
]
for test in tests:
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encodeTimeConversion_pytz(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, pytz.utc)
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_encodeTimeConversion_dateutil(self):
# see gh-11473: to_json segfaults with timezone-aware datetimes
test = datetime.time(10, 12, 15, 343243, dateutil.tz.tzutc())
output = ujson.encode(test)
expected = '"{iso}"'.format(iso=test.isoformat())
assert expected == output
def test_nat(self):
input = NaT
assert ujson.encode(input) == 'null', "Expected null"
def test_npy_nat(self):
from distutils.version import LooseVersion
if LooseVersion(np.__version__) < LooseVersion('1.7.0'):
pytest.skip("numpy version < 1.7.0, is "
"{0}".format(np.__version__))
input = np.datetime64('NaT')
assert ujson.encode(input) == 'null', "Expected null"
def test_datetime_units(self):
from pandas._libs.tslib import Timestamp
val = datetime.datetime(2013, 8, 17, 21, 17, 12, 215504)
stamp = Timestamp(val)
roundtrip = ujson.decode(ujson.encode(val, date_unit='s'))
assert roundtrip == stamp.value // 10**9
roundtrip = ujson.decode(ujson.encode(val, date_unit='ms'))
assert roundtrip == stamp.value // 10**6
roundtrip = ujson.decode(ujson.encode(val, date_unit='us'))
assert roundtrip == stamp.value // 10**3
roundtrip = ujson.decode(ujson.encode(val, date_unit='ns'))
assert roundtrip == stamp.value
pytest.raises(ValueError, ujson.encode, val, date_unit='foo')
def test_encodeToUTF8(self):
input = "\xe6\x97\xa5\xd1\x88"
enc = ujson.encode(input, ensure_ascii=False)
dec = ujson.decode(enc)
assert enc == json_unicode(input, ensure_ascii=False)
assert dec == json.loads(enc)
def test_decodeFromUnicode(self):
input = u("{\"obj\": 31337}")
dec1 = ujson.decode(input)
dec2 = ujson.decode(str(input))
assert dec1 == dec2
def test_encodeRecursionMax(self):
# 8 is the max recursion depth
class O2(object):
member = 0
pass
class O1(object):
member = 0
pass
input = O1()
input.member = O2()
input.member.member = input
try:
output = ujson.encode(input) # noqa
assert False, "Expected overflow exception"
except(OverflowError):
pass
def test_encodeDoubleNan(self):
input = np.nan
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleInf(self):
input = np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_encodeDoubleNegInf(self):
input = -np.inf
assert ujson.encode(input) == 'null', "Expected null"
def test_decodeJibberish(self):
input = "fdsa sda v9sa fdsa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayStart(self):
input = "["
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectStart(self):
input = "{"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenArrayEnd(self):
input = "]"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeArrayDepthTooBig(self):
input = '[' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenObjectEnd(self):
input = "}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeObjectDepthTooBig(self):
input = '{' * (1024 * 1024)
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUnterminated(self):
input = "\"TESTING"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringUntermEscapeSequence(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeStringBadEscape(self):
input = "\"TESTING\\\""
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeTrueBroken(self):
input = "tru"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeFalseBroken(self):
input = "fa"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNullBroken(self):
input = "n"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeBrokenDictKeyTypeLeakTest(self):
input = '{{1337:""}}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except ValueError:
continue
assert False, "Wrong exception"
def test_decodeBrokenDictLeakTest(self):
input = '{{"key":"}'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeBrokenListLeakTest(self):
input = '[[[true'
for x in range(1000):
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
continue
assert False, "Wrong exception"
def test_decodeDictWithNoKey(self):
input = "{{{{31337}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoColonOrValue(self):
input = "{{{{\"key\"}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeDictWithNoValue(self):
input = "{{{{\"key\":}}}}"
try:
ujson.decode(input)
assert False, "Expected exception!"
except(ValueError):
return
assert False, "Wrong exception"
def test_decodeNumericIntPos(self):
input = "31337"
assert 31337 == ujson.decode(input)
def test_decodeNumericIntNeg(self):
input = "-31337"
assert -31337 == ujson.decode(input)
@pytest.mark.skipif(compat.PY3, reason="only PY2")
def test_encodeUnicode4BytesUTF8Fail(self):
input = "\xfd\xbf\xbf\xbf\xbf\xbf"
try:
enc = ujson.encode(input) # noqa
assert False, "Expected exception"
except OverflowError:
pass
def test_encodeNullCharacter(self):
input = "31337 \x00 1337"
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
input = "\x00"
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
assert '" \\u0000\\r\\n "' == ujson.dumps(u(" \u0000\r\n "))
pass
def test_decodeNullCharacter(self):
input = "\"31337 \\u0000 31337\""
assert ujson.decode(input) == json.loads(input)
def test_encodeListLongConversion(self):
input = [9223372036854775807, 9223372036854775807, 9223372036854775807,
9223372036854775807, 9223372036854775807, 9223372036854775807]
output = ujson.encode(input)
assert input == json.loads(output)
assert input == ujson.decode(output)
tm.assert_numpy_array_equal(np.array(input),
ujson.decode(output, numpy=True,
dtype=np.int64))
pass
def test_encodeLongConversion(self):
input = 9223372036854775807
output = ujson.encode(input)
assert input == json.loads(output)
assert output == json.dumps(input)
assert input == ujson.decode(output)
pass
def test_numericIntExp(self):
input = "1337E40"
output = ujson.decode(input)
assert output == json.loads(input)
def test_numericIntFrcExp(self):
input = "1.337E40"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpEPLUS(self):
input = "1337E+9"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpePLUS(self):
input = "1.337e+40"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpE(self):
input = "1337E40"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpe(self):
input = "1337e40"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpEMinus(self):
input = "1.337E-4"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_decodeNumericIntExpeMinus(self):
input = "1.337e-4"
output = ujson.decode(input)
tm.assert_almost_equal(output, json.loads(input))
def test_dumpToFile(self):
f = StringIO()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.getvalue()
def test_dumpToFileLikeObject(self):
class FileLike(object):
def __init__(self):
self.bytes = ''
def write(self, bytes):
self.bytes += bytes
f = FileLike()
ujson.dump([1, 2, 3], f)
assert "[1,2,3]" == f.bytes
def test_dumpFileArgsError(self):
try:
ujson.dump([], '')
except TypeError:
pass
else:
assert False, 'expected TypeError'
def test_loadFile(self):
f = StringIO("[1,2,3,4]")
assert [1, 2, 3, 4] == ujson.load(f)
f = StringIO("[1,2,3,4]")
tm.assert_numpy_array_equal(
np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileLikeObject(self):
class FileLike(object):
def read(self):
try:
self.end
except AttributeError:
self.end = True
return "[1,2,3,4]"
f = FileLike()
assert [1, 2, 3, 4] == ujson.load(f)
f = FileLike()
tm.assert_numpy_array_equal(
np.array([1, 2, 3, 4]), ujson.load(f, numpy=True))
def test_loadFileArgsError(self):
try:
ujson.load("[]")
except TypeError:
pass
else:
assert False, "expected TypeError"
def test_version(self):
assert re.match(r'^\d+\.\d+(\.\d+)?$', ujson.__version__), \
"ujson.__version__ must be a string like '1.4.0'"
def test_encodeNumericOverflow(self):
try:
ujson.encode(12839128391289382193812939)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_encodeNumericOverflowNested(self):
for n in range(0, 100):
class Nested(object):
x = 12839128391289382193812939
nested = Nested()
try:
ujson.encode(nested)
except OverflowError:
pass
else:
assert False, "expected OverflowError"
def test_decodeNumberWith32bitSignBit(self):
# Test that numbers that fit within 32 bits but would have the
# sign bit set (2**31 <= x < 2**32) are decoded properly.
boundary1 = 2**31 # noqa
boundary2 = 2**32 # noqa
docs = (
'{"id": 3590016419}',
'{{"id": {low}}}'.format(low=2**31),
'{{"id": {high}}}'.format(high=2**32),
'{{"id": {one_less}}}'.format(one_less=(2**32) - 1),
)
results = (3590016419, 2**31, 2**32, 2**32 - 1)
for doc, result in zip(docs, results):
assert ujson.decode(doc)['id'] == result
def test_encodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
input = base * 1024 * 1024 * 2
output = ujson.encode(input) # noqa
def test_decodeBigEscape(self):
for x in range(10):
if compat.PY3:
base = '\u00e5'.encode('utf-8')
else:
base = "\xc3\xa5"
quote = compat.str_to_bytes("\"")
input = quote + (base * 1024 * 1024 * 2) + quote
output = ujson.decode(input) # noqa
def test_toDict(self):
d = {u("key"): 31337}
class DictTest(object):
def toDict(self):
return d
o = DictTest()
output = ujson.encode(o)
dec = ujson.decode(output)
assert dec == d
def test_defaultHandler(self):
class _TestObject(object):
def __init__(self, val):
self.val = val
@property
def recursive_attr(self):
return _TestObject("recursive_attr")
def __str__(self):
return str(self.val)
pytest.raises(OverflowError, ujson.encode, _TestObject("foo"))
assert '"foo"' == ujson.encode(_TestObject("foo"),
default_handler=str)
def my_handler(obj):
return "foobar"
assert '"foobar"' == ujson.encode(_TestObject("foo"),
default_handler=my_handler)
def my_handler_raises(obj):
raise TypeError("I raise for anything")
with tm.assert_raises_regex(TypeError, "I raise for anything"):
ujson.encode(_TestObject("foo"), default_handler=my_handler_raises)
def my_int_handler(obj):
return 42
assert ujson.decode(ujson.encode(
_TestObject("foo"), default_handler=my_int_handler)) == 42
def my_obj_handler(obj):
return datetime.datetime(2013, 2, 3)
assert (ujson.decode(ujson.encode(datetime.datetime(2013, 2, 3))) ==
ujson.decode(ujson.encode(_TestObject("foo"),
default_handler=my_obj_handler)))
l = [_TestObject("foo"), _TestObject("bar")]
assert (json.loads(json.dumps(l, default=str)) ==
ujson.decode(ujson.encode(l, default_handler=str)))
class TestNumpyJSONTests(object):
def test_Bool(self):
b = np.bool(True)
assert ujson.decode(ujson.encode(b)) == b
def test_BoolArray(self):
inpt = np.array([True, False, True, True, False, True, False, False],
dtype=np.bool)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=np.bool)
tm.assert_numpy_array_equal(inpt, outp)
def test_Int(self):
num = np.int(2562010)
assert np.int(ujson.decode(ujson.encode(num))) == num
num = np.int8(127)
assert np.int8(ujson.decode(ujson.encode(num))) == num
num = np.int16(2562010)
assert np.int16(ujson.decode(ujson.encode(num))) == num
num = np.int32(2562010)
assert np.int32(ujson.decode(ujson.encode(num))) == num
num = np.int64(2562010)
assert np.int64(ujson.decode(ujson.encode(num))) == num
num = np.uint8(255)
assert np.uint8(ujson.decode(ujson.encode(num))) == num
num = np.uint16(2562010)
assert np.uint16(ujson.decode(ujson.encode(num))) == num
num = np.uint32(2562010)
assert np.uint32(ujson.decode(ujson.encode(num))) == num
num = np.uint64(2562010)
assert np.uint64(ujson.decode(ujson.encode(num))) == num
def test_IntArray(self):
arr = np.arange(100, dtype=np.int)
dtypes = (np.int, np.int8, np.int16, np.int32, np.int64,
np.uint, np.uint8, np.uint16, np.uint32, np.uint64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(inpt)), dtype=dtype)
tm.assert_numpy_array_equal(inpt, outp)
def test_IntMax(self):
num = np.int(np.iinfo(np.int).max)
assert np.int(ujson.decode(ujson.encode(num))) == num
num = np.int8(np.iinfo(np.int8).max)
assert np.int8(ujson.decode(ujson.encode(num))) == num
num = np.int16(np.iinfo(np.int16).max)
assert np.int16(ujson.decode(ujson.encode(num))) == num
num = np.int32(np.iinfo(np.int32).max)
assert np.int32(ujson.decode(ujson.encode(num))) == num
num = np.uint8(np.iinfo(np.uint8).max)
assert np.uint8(ujson.decode(ujson.encode(num))) == num
num = np.uint16(np.iinfo(np.uint16).max)
assert np.uint16(ujson.decode(ujson.encode(num))) == num
num = np.uint32(np.iinfo(np.uint32).max)
assert np.uint32(ujson.decode(ujson.encode(num))) == num
if not compat.is_platform_32bit():
num = np.int64(np.iinfo(np.int64).max)
assert np.int64(ujson.decode(ujson.encode(num))) == num
# uint64 max will always overflow as it's encoded to signed
num = np.uint64(np.iinfo(np.int64).max)
assert np.uint64(ujson.decode(ujson.encode(num))) == num
def test_Float(self):
num = np.float(256.2013)
assert np.float(ujson.decode(ujson.encode(num))) == num
num = np.float32(256.2013)
assert np.float32(ujson.decode(ujson.encode(num))) == num
num = np.float64(256.2013)
assert np.float64(ujson.decode(ujson.encode(num))) == num
def test_FloatArray(self):
arr = np.arange(12.5, 185.72, 1.7322, dtype=np.float)
dtypes = (np.float, np.float32, np.float64)
for dtype in dtypes:
inpt = arr.astype(dtype)
outp = np.array(ujson.decode(ujson.encode(
inpt, double_precision=15)), dtype=dtype)
tm.assert_almost_equal(inpt, outp)
def test_FloatMax(self):
num = np.float(np.finfo(np.float).max / 10)
tm.assert_almost_equal(np.float(ujson.decode(
ujson.encode(num, double_precision=15))), num, 15)
num = np.float32(np.finfo(np.float32).max / 10)
tm.assert_almost_equal(np.float32(ujson.decode(
ujson.encode(num, double_precision=15))), num, 15)
num = np.float64(np.finfo(np.float64).max / 10)
tm.assert_almost_equal(np.float64(ujson.decode(
ujson.encode(num, double_precision=15))), num, 15)
def test_Arrays(self):
arr = np.arange(100)
arr = arr.reshape((10, 10))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((5, 5, 4))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
arr = arr.reshape((100, 1))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
arr = np.arange(96)
arr = arr.reshape((2, 2, 2, 2, 3, 2))
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
tm.assert_numpy_array_equal(ujson.decode(
ujson.encode(arr), numpy=True), arr)
l = ['a', list(), dict(), dict(), list(),
42, 97.8, ['a', 'b'], {'key': 'val'}]
arr = np.array(l)
tm.assert_numpy_array_equal(
np.array(ujson.decode(ujson.encode(arr))), arr)
arr = np.arange(100.202, 200.202, 1, dtype=np.float32)
arr = arr.reshape((5, 5, 4))
outp = np.array(ujson.decode(ujson.encode(arr)), dtype=np.float32)
tm.assert_almost_equal(arr, outp)
outp = ujson.decode(ujson.encode(arr), numpy=True, dtype=np.float32)
tm.assert_almost_equal(arr, outp)
def test_OdArray(self):
def will_raise():
ujson.encode(np.array(1))
pytest.raises(TypeError, will_raise)
def test_ArrayNumpyExcept(self):
input = ujson.dumps([42, {}, 'a'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps(['a', 'b', [], 'c'])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, ['a'], 42])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{}, []])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([42, None])
try:
ujson.decode(input, numpy=True)
assert False, "Expected exception!"
except(TypeError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 'b'}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps({'a': {'b': {'c': 42}}})
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
input = ujson.dumps([{'a': 42, 'b': 23}, {'c': 17}])
try:
ujson.decode(input, numpy=True, labelled=True)
assert False, "Expected exception!"
except(ValueError):
pass
except:
assert False, "Wrong exception"
def test_ArrayNumpyLabelled(self):
input = {'a': []}
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
assert (np.empty((1, 0)) == output[0]).all()
assert (np.array(['a']) == output[1]).all()
assert output[2] is None
input = [{'a': 42}]
output = ujson.loads(ujson.dumps(input), numpy=True, labelled=True)
assert (np.array([42]) == output[0]).all()
assert output[1] is None
assert (np.array([u('a')]) == output[2]).all()
# Write out the dump explicitly so there is no dependency on iteration
# order GH10837
input_dumps = ('[{"a": 42, "b":31}, {"a": 24, "c": 99}, '
'{"a": 2.4, "b": 78}]')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expectedvals == output[0]).all()
assert output[1] is None
assert (np.array([u('a'), 'b']) == output[2]).all()
input_dumps = ('{"1": {"a": 42, "b":31}, "2": {"a": 24, "c": 99}, '
'"3": {"a": 2.4, "b": 78}}')
output = ujson.loads(input_dumps, numpy=True, labelled=True)
expectedvals = np.array(
[42, 31, 24, 99, 2.4, 78], dtype=int).reshape((3, 2))
assert (expectedvals == output[0]).all()
assert (np.array(['1', '2', '3']) == output[1]).all()
assert (np.array(['a', 'b']) == output[2]).all()
class TestPandasJSONTests(object):
def test_DataFrame(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df)))
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split")))
outp = DataFrame(**dec)
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="records")))
outp.index = df.index
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="values")))
outp.index = df.index
assert (df.values == outp.values).all()
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index")))
assert (df.transpose() == outp).values.all()
tm.assert_index_equal(df.transpose().columns, outp.columns)
tm.assert_index_equal(df.transpose().index, outp.index)
def test_DataFrameNumpy(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(ujson.decode(ujson.encode(df), numpy=True))
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
dec = _clean_dict(ujson.decode(ujson.encode(df, orient="split"),
numpy=True))
outp = DataFrame(**dec)
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
outp = DataFrame(ujson.decode(ujson.encode(df, orient="index"),
numpy=True))
assert (df.transpose() == outp).values.all()
tm.assert_index_equal(df.transpose().columns, outp.columns)
tm.assert_index_equal(df.transpose().index, outp.index)
def test_DataFrameNested(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
nested = {'df1': df, 'df2': df.copy()}
exp = {'df1': ujson.decode(ujson.encode(df)),
'df2': ujson.decode(ujson.encode(df))}
assert ujson.decode(ujson.encode(nested)) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="index")),
'df2': ujson.decode(ujson.encode(df, orient="index"))}
assert ujson.decode(ujson.encode(nested, orient="index")) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="records")),
'df2': ujson.decode(ujson.encode(df, orient="records"))}
assert ujson.decode(ujson.encode(nested, orient="records")) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="values")),
'df2': ujson.decode(ujson.encode(df, orient="values"))}
assert ujson.decode(ujson.encode(nested, orient="values")) == exp
exp = {'df1': ujson.decode(ujson.encode(df, orient="split")),
'df2': ujson.decode(ujson.encode(df, orient="split"))}
assert ujson.decode(ujson.encode(nested, orient="split")) == exp
def test_DataFrameNumpyLabelled(self):
df = DataFrame([[1, 2, 3], [4, 5, 6]], index=[
'a', 'b'], columns=['x', 'y', 'z'])
# column indexed
outp = DataFrame(*ujson.decode(ujson.encode(df),
numpy=True, labelled=True))
assert (df.T == outp).values.all()
tm.assert_index_equal(df.T.columns, outp.columns)
tm.assert_index_equal(df.T.index, outp.index)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="records"),
numpy=True, labelled=True))
outp.index = df.index
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
outp = DataFrame(*ujson.decode(ujson.encode(df, orient="index"),
numpy=True, labelled=True))
assert (df == outp).values.all()
tm.assert_index_equal(df.columns, outp.columns)
tm.assert_index_equal(df.index, outp.index)
def test_Series(self):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
# column indexed
outp = Series(ujson.decode(ujson.encode(s))).sort_values()
exp = Series([10, 20, 30, 40, 50, 60],
index=['6', '7', '8', '9', '10', '15'])
tm.assert_series_equal(outp, exp)
outp = Series(ujson.decode(ujson.encode(s), numpy=True)).sort_values()
tm.assert_series_equal(outp, exp)
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split")))
outp = Series(**dec)
tm.assert_series_equal(outp, s)
dec = _clean_dict(ujson.decode(ujson.encode(s, orient="split"),
numpy=True))
outp = Series(**dec)
exp_np = Series(np.array([10, 20, 30, 40, 50, 60]))
exp_pd = Series([10, 20, 30, 40, 50, 60])
outp = Series(ujson.decode(ujson.encode(s, orient="records"),
numpy=True))
tm.assert_series_equal(outp, exp_np)
outp = Series(ujson.decode(ujson.encode(s, orient="records")))
exp = Series([10, 20, 30, 40, 50, 60])
tm.assert_series_equal(outp, exp_pd)
outp = Series(ujson.decode(ujson.encode(s, orient="values"),
numpy=True))
tm.assert_series_equal(outp, exp_np)
outp = Series(ujson.decode(ujson.encode(s, orient="values")))
tm.assert_series_equal(outp, exp_pd)
outp = Series(ujson.decode(ujson.encode(
s, orient="index"))).sort_values()
exp = Series([10, 20, 30, 40, 50, 60],
index=['6', '7', '8', '9', '10', '15'])
tm.assert_series_equal(outp, exp)
outp = Series(ujson.decode(ujson.encode(
s, orient="index"), numpy=True)).sort_values()
tm.assert_series_equal(outp, exp)
def test_SeriesNested(self):
s = Series([10, 20, 30, 40, 50, 60], name="series",
index=[6, 7, 8, 9, 10, 15]).sort_values()
nested = {'s1': s, 's2': s.copy()}
exp = {'s1': ujson.decode(ujson.encode(s)),
's2': ujson.decode(ujson.encode(s))}
assert ujson.decode(ujson.encode(nested)) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="split")),
's2': ujson.decode(ujson.encode(s, orient="split"))}
assert ujson.decode(ujson.encode(nested, orient="split")) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="records")),
's2': ujson.decode(ujson.encode(s, orient="records"))}
assert ujson.decode(ujson.encode(nested, orient="records")) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="values")),
's2': ujson.decode(ujson.encode(s, orient="values"))}
assert ujson.decode(ujson.encode(nested, orient="values")) == exp
exp = {'s1': ujson.decode(ujson.encode(s, orient="index")),
's2': ujson.decode(ujson.encode(s, orient="index"))}
assert ujson.decode(ujson.encode(nested, orient="index")) == exp
def test_Index(self):
i = Index([23, 45, 18, 98, 43, 11], name="index")
# column indexed
outp = Index(ujson.decode(ujson.encode(i)), name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i), numpy=True), name='index')
tm.assert_index_equal(i, outp)
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split")))
outp = Index(**dec)
tm.assert_index_equal(i, outp)
assert i.name == outp.name
dec = _clean_dict(ujson.decode(ujson.encode(i, orient="split"),
numpy=True))
outp = Index(**dec)
tm.assert_index_equal(i, outp)
assert i.name == outp.name
outp = Index(ujson.decode(ujson.encode(i, orient="values")),
name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="values"),
numpy=True), name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="records")),
name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="records"),
numpy=True), name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="index")),
name='index')
tm.assert_index_equal(i, outp)
outp = Index(ujson.decode(ujson.encode(i, orient="index"),
numpy=True), name='index')
tm.assert_index_equal(i, outp)
def test_datetimeindex(self):
from pandas.core.indexes.datetimes import date_range
rng = date_range('1/1/2000', periods=20)
encoded = ujson.encode(rng, date_unit='ns')
decoded = DatetimeIndex(np.array(ujson.decode(encoded)))
tm.assert_index_equal(rng, decoded)
ts = Series(np.random.randn(len(rng)), index=rng)
decoded = Series(ujson.decode(ujson.encode(ts, date_unit='ns')))
idx_values = decoded.index.values.astype(np.int64)
decoded.index = DatetimeIndex(idx_values)
tm.assert_series_equal(ts, decoded)
def test_decodeArrayTrailingCommaFail(self):
input = "[31337,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayLeadingCommaFail(self):
input = "[,31337]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayOnlyCommaFail(self):
input = "[,]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayUnmatchedBracketFail(self):
input = "[]]"
try:
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayEmpty(self):
input = "[]"
ujson.decode(input)
def test_decodeArrayOneItem(self):
input = "[31337]"
ujson.decode(input)
def test_decodeBigValue(self):
input = "9223372036854775807"
ujson.decode(input)
def test_decodeSmallValue(self):
input = "-9223372036854775808"
ujson.decode(input)
def test_decodeTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooBigValue(self):
try:
input = "9223372036854775808"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeVeryTooSmallValue(self):
try:
input = "-90223372036854775809"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeWithTrailingWhitespaces(self):
input = "{}\n\t "
ujson.decode(input)
def test_decodeWithTrailingNonWhitespaces(self):
try:
input = "{}\n\t a"
ujson.decode(input)
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayWithBigInt(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeArrayFaultyUnicode(self):
try:
ujson.loads('[18446098363113800555]')
except ValueError:
pass
else:
assert False, "expected ValueError"
def test_decodeFloatingPointAdditionalTests(self):
places = 15
tm.assert_almost_equal(-1.1234567893,
ujson.loads("-1.1234567893"),
check_less_precise=places)
tm.assert_almost_equal(-1.234567893,
ujson.loads("-1.234567893"),
check_less_precise=places)
tm.assert_almost_equal(-1.34567893,
ujson.loads("-1.34567893"),
check_less_precise=places)
tm.assert_almost_equal(-1.4567893,
ujson.loads("-1.4567893"),
check_less_precise=places)
tm.assert_almost_equal(-1.567893,
ujson.loads("-1.567893"),
check_less_precise=places)
tm.assert_almost_equal(-1.67893,
ujson.loads("-1.67893"),
check_less_precise=places)
tm.assert_almost_equal(-1.7893, ujson.loads("-1.7893"),
check_less_precise=places)
tm.assert_almost_equal(-1.893, ujson.loads("-1.893"),
check_less_precise=places)
tm.assert_almost_equal(-1.3, ujson.loads("-1.3"),
check_less_precise=places)
tm.assert_almost_equal(1.1234567893, ujson.loads(
"1.1234567893"), check_less_precise=places)
tm.assert_almost_equal(1.234567893, ujson.loads(
"1.234567893"), check_less_precise=places)
tm.assert_almost_equal(
1.34567893, ujson.loads("1.34567893"), check_less_precise=places)
tm.assert_almost_equal(
1.4567893, ujson.loads("1.4567893"), check_less_precise=places)
tm.assert_almost_equal(
1.567893, ujson.loads("1.567893"), check_less_precise=places)
tm.assert_almost_equal(1.67893, ujson.loads("1.67893"),
check_less_precise=places)
tm.assert_almost_equal(1.7893, ujson.loads("1.7893"),
check_less_precise=places)
tm.assert_almost_equal(1.893, ujson.loads("1.893"),
check_less_precise=places)
tm.assert_almost_equal(1.3, ujson.loads("1.3"),
check_less_precise=places)
def test_encodeBigSet(self):
s = set()
for x in range(0, 100000):
s.add(x)
ujson.encode(s)
def test_encodeEmptySet(self):
s = set()
assert "[]" == ujson.encode(s)
def test_encodeSet(self):
s = set([1, 2, 3, 4, 5, 6, 7, 8, 9])
enc = ujson.encode(s)
dec = ujson.decode(enc)
for v in dec:
assert v in s
def _clean_dict(d):
return {str(k): v for k, v in compat.iteritems(d)}
| louispotok/pandas | pandas/tests/io/json/test_ujson.py | Python | bsd-3-clause | 56,148 |
from fjord.base.tests import TestCase
from fjord.feedback.models import ResponseDocType
from fjord.feedback.tests import ResponseFactory
from fjord.search.index import chunked
from fjord.search.tests import ElasticTestCase
class ChunkedTests(TestCase):
def test_chunked(self):
# chunking nothing yields nothing.
assert list(chunked([], 1)) == []
# chunking list where len(list) < n
assert list(chunked([1], 10)) == [(1,)]
# chunking a list where len(list) == n
assert list(chunked([1, 2], 2)) == [(1, 2)]
# chunking list where len(list) > n
assert list(chunked([1, 2, 3, 4, 5], 2)) == [(1, 2), (3, 4), (5,)]
class TestLiveIndexing(ElasticTestCase):
def test_live_indexing(self):
search = ResponseDocType.docs.search()
count_pre = search.count()
s = ResponseFactory(happy=True, description='Test live indexing.')
self.refresh()
assert count_pre + 1 == search.count()
s.delete()
self.refresh()
assert count_pre == search.count()
| staranjeet/fjord | fjord/search/tests/test_index.py | Python | bsd-3-clause | 1,072 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import os
import codecs
from setuptools import setup
def read(fname):
file_path = os.path.join(os.path.dirname(__file__), fname)
return codecs.open(file_path, encoding='utf-8').read()
setup(
name='pytest-typehints',
version='0.1.0',
author='Edward Dunn Ekelund',
author_email='edward.ekelund@gmail.com',
maintainer='Edward Dunn Ekelund',
maintainer_email='edward.ekelund@gmail.com',
license='BSD-3',
url='https://github.com/eddie-dunn/pytest-typehints',
description='Pytest plugin that checks for type hinting',
long_description=read('README.rst'),
py_modules=['pytest_typehints'],
install_requires=['pytest>=2.9.2'],
classifiers=[
'Development Status :: 4 - Beta',
'Framework :: Pytest',
'Intended Audience :: Developers',
'Topic :: Software Development :: Testing',
'Programming Language :: Python',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy',
'Operating System :: OS Independent',
'License :: OSI Approved :: BSD License',
],
entry_points={
'pytest11': [
'typehints = pytest_typehints',
],
},
)
| eddie-dunn/pytest-typehints | setup.py | Python | bsd-3-clause | 1,385 |
import operator
from turbion.bits.antispam import Filter
urlpatterns = reduce(
operator.add,
[filter.urlpatterns for name, filter in Filter.manager.all() if hasattr(filter, 'urlpatterns')],
[]
)
| strogo/turbion | turbion/bits/antispam/urls.py | Python | bsd-3-clause | 209 |
import collections
from django import forms
from django.forms.util import ErrorDict
from tower import ugettext as _, ugettext_lazy as _lazy
import amo
from amo import helpers
from applications.models import AppVersion
sort_by = (
('', _lazy(u'Keyword Match')),
('updated', _lazy(u'Updated', 'advanced_search_form_updated')),
('newest', _lazy(u'Created', 'advanced_search_form_newest')),
('weeklydownloads', _lazy(u'Downloads')),
('users', _lazy(u'Users')),
('averagerating', _lazy(u'Rating', 'advanced_search_form_rating')),
)
collection_sort_by = (
('weekly', _lazy(u'Most popular this week')),
('monthly', _lazy(u'Most popular this month')),
('all', _lazy(u'Most popular all time')),
('rating', _lazy(u'Highest Rated')),
('newest', _lazy(u'Newest')),
)
per_page = (20, 50, )
tuplize = lambda x: divmod(int(x * 10), 10)
# These releases were so minor that we don't want to search for them.
skip_versions = collections.defaultdict(list)
skip_versions[amo.FIREFOX] = [tuplize(v) for v in amo.FIREFOX.exclude_versions]
min_version = collections.defaultdict(lambda: (0, 0))
min_version.update({
amo.FIREFOX: tuplize(amo.FIREFOX.min_display_version),
amo.THUNDERBIRD: tuplize(amo.THUNDERBIRD.min_display_version),
amo.SEAMONKEY: tuplize(amo.SEAMONKEY.min_display_version),
amo.SUNBIRD: tuplize(amo.SUNBIRD.min_display_version),
})
def get_app_versions(app):
appversions = AppVersion.objects.filter(application=app.id)
min_ver, skip = min_version[app], skip_versions[app]
versions = [(a.major, a.minor1) for a in appversions]
strings = ['%s.%s' % v for v in sorted(set(versions), reverse=True)
if v >= min_ver and v not in skip]
return [('any', _('Any'))] + zip(strings, strings)
# Fake categories to slip some add-on types into the search groups.
_Cat = collections.namedtuple('Cat', 'id name weight type')
def get_search_groups(app):
sub = []
types_ = [t for t in (amo.ADDON_DICT, amo.ADDON_SEARCH, amo.ADDON_THEME)
if t in app.types]
for type_ in types_:
sub.append(_Cat(0, amo.ADDON_TYPES[type_], 0, type_))
sub.extend(helpers.sidebar(app)[0])
sub = [('%s,%s' % (a.type, a.id), a.name) for a in
sorted(sub, key=lambda x: (x.weight, x.name))]
top_level = [('all', _('all add-ons')),
('collections', _('all collections')), ]
if amo.ADDON_PERSONA in app.types:
top_level += (('personas', _('all personas')),)
return top_level[:1] + sub + top_level[1:], top_level
SEARCH_CHOICES = (
('all', _lazy('search for add-ons')),
('collections', _lazy('search for collections')),
('personas', _lazy('search for personas')),
('apps', _lazy('search for apps')))
class SimpleSearchForm(forms.Form):
"""Powers the search box on every page."""
q = forms.CharField(required=False)
cat = forms.CharField(required=False, widget=forms.HiddenInput)
appver = forms.CharField(required=False, widget=forms.HiddenInput)
platform = forms.CharField(required=False, widget=forms.HiddenInput)
choices = dict(SEARCH_CHOICES)
def clean_cat(self):
self.data = dict(self.data.items())
return self.data.setdefault('cat', 'all')
def placeholder(self):
val = self.clean_cat()
return self.choices.get(val, self.choices['all'])
def SearchForm(request):
current_app = request.APP or amo.FIREFOX
search_groups, top_level = get_search_groups(current_app)
class _SearchForm(SimpleSearchForm):
cat = forms.ChoiceField(choices=search_groups, required=False)
# This gets replaced by a <select> with js.
lver = forms.ChoiceField(
label=_(u'{0} Version').format(unicode(current_app.pretty)),
choices=get_app_versions(current_app), required=False)
appver = forms.CharField(required=False)
atype = forms.TypedChoiceField(label=_('Type'),
choices=[(t, amo.ADDON_TYPE[t]) for t in amo.ADDON_SEARCH_TYPES],
required=False, coerce=int, empty_value=amo.ADDON_ANY)
pid = forms.TypedChoiceField(label=_('Platform'),
choices=[(p[0], p[1].name) for p in amo.PLATFORMS.iteritems()
if p[1] != amo.PLATFORM_ANY], required=False,
coerce=int, empty_value=amo.PLATFORM_ANY.id)
platform = forms.ChoiceField(required=False,
choices=[[p.shortname, p.id] for p in amo.PLATFORMS.values()])
sort = forms.ChoiceField(label=_('Sort By'), choices=sort_by,
required=False)
pp = forms.TypedChoiceField(label=_('Per Page'),
choices=zip(per_page, per_page), required=False, coerce=int,
empty_value=per_page[0])
advanced = forms.BooleanField(widget=forms.HiddenInput, required=False)
tag = forms.CharField(widget=forms.HiddenInput, required=False)
page = forms.IntegerField(widget=forms.HiddenInput, required=False)
# Attach these to the form for usage in the template.
top_level_cat = dict(top_level)
def clean_platform(self):
p = self.cleaned_data.get('platform')
choices = dict(self.fields['platform'].choices)
return choices.get(p)
# TODO(jbalogh): when we start using this form for zamboni search, it
# should check that the appid and lver match up using app_versions.
def clean(self):
d = self.cleaned_data
raw = self.data
# Set some defaults
if not d.get('appid'):
d['appid'] = request.APP.id
# Since not all categories are listed in this form, we use the raw
# data.
if 'cat' in raw:
if ',' in raw['cat']:
try:
d['atype'], d['cat'] = map(int, raw['cat'].split(','))
except ValueError:
d['cat'] = None
elif raw['cat'] == 'all':
d['cat'] = None
if 'page' not in d or not d['page'] or d['page'] < 1:
d['page'] = 1
return d
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
Does not remove cleaned_data if there are errors.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data
# has changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
d = request.GET.copy()
return _SearchForm(d)
class SecondarySearchForm(forms.Form):
q = forms.CharField(widget=forms.HiddenInput, required=False)
cat = forms.CharField(widget=forms.HiddenInput)
pp = forms.CharField(widget=forms.HiddenInput, required=False)
sortby = forms.ChoiceField(label=_lazy(u'Sort By'),
choices=collection_sort_by,
initial='weekly', required=False)
page = forms.IntegerField(widget=forms.HiddenInput, required=False)
def clean_pp(self):
d = self.cleaned_data['pp']
try:
return int(d)
except:
return per_page[0]
def clean(self):
d = self.cleaned_data
if not d.get('pp'):
d['pp'] = per_page[0]
return d
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self.cleaned_data.
Does not remove cleaned_data if there are errors.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data
# has changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
SORT_CHOICES = (
(None, _lazy(u'Relevance')),
('users', _lazy(u'Most Users')),
('rating', _lazy(u'Top Rated')),
('created', _lazy(u'Newest')),
# --
('name', _lazy(u'Name')),
('downloads', _lazy(u'Weekly Downloads')),
('updated', _lazy(u'Recently Updated')),
('hotness', _lazy(u'Up & Coming')),
)
APP_SORT_CHOICES = (
(None, _lazy(u'Relevance')),
('downloads', _lazy(u'Weekly Downloads')),
('rating', _lazy(u'Top Rated')),
('created', _lazy(u'Newest')),
# --
('name', _lazy(u'Name')),
('hotness', _lazy(u'Up & Coming')),
)
class ESSearchForm(forms.Form):
q = forms.CharField(required=False)
tag = forms.CharField(required=False)
platform = forms.ChoiceField(required=False,
choices=[(p.shortname, p.id) for p in amo.PLATFORMS.values()])
appver = forms.CharField(required=False)
atype = forms.TypedChoiceField(required=False, coerce=int,
choices=[(t, amo.ADDON_TYPE[t]) for t in amo.ADDON_SEARCH_TYPES])
cat = forms.CharField(required=False)
sort = forms.ChoiceField(required=False, choices=SORT_CHOICES)
def __init__(self, *args, **kw):
addon_type = kw.pop('type', None)
super(ESSearchForm, self).__init__(*args, **kw)
if addon_type == amo.ADDON_WEBAPP:
self.fields['sort'].choices = APP_SORT_CHOICES
def clean_appver(self):
appver = self.cleaned_data.get('appver')
if appver:
major = appver.split('.')[0]
if major.isdigit():
appver = major + '.0'
return appver
def clean_sort(self):
sort = self.cleaned_data.get('sort')
return sort if sort in dict(SORT_CHOICES) else None
def clean_cat(self):
cat = self.cleaned_data.get('cat')
if ',' in cat:
try:
self.cleaned_data['atype'], cat = map(int, cat.split(','))
except ValueError:
return None
else:
try:
return int(cat)
except ValueError:
return None
def full_clean(self):
"""
Cleans self.data and populates self._errors and self.cleaned_data.
Does not remove cleaned_data if there are errors.
"""
self._errors = ErrorDict()
if not self.is_bound: # Stop further processing.
return
self.cleaned_data = {}
# If the form is permitted to be empty, and none of the form data
# has changed from the initial data, short circuit any validation.
if self.empty_permitted and not self.has_changed():
return
self._clean_fields()
self._clean_form()
| jbalogh/zamboni | apps/search/forms.py | Python | bsd-3-clause | 11,040 |
class Grating:
"""A class that describing gratings. Sigma should be in lines/mm and the
units of the dimensions should be mm.
"""
def __init__(self, name='', spacing=600, order=1, height=100, width=100,
thickness=100, blaze=0, type='transmission'):
# define the variables that describe the grating
self.order = order
self.height = height
self.width = width
self.thickness = thickness
self.sigma = 1.0 / spacing
self.blaze = blaze
self.name = name
self.type = type
# set the sign for the grating equation
self.sign = 1
if self.type == 'transmission':
self.sign = -1
| crawfordsm/pyspectrograph | PySpectrograph/Spectrograph/Grating.py | Python | bsd-3-clause | 714 |
#!/usr/bin/env python
import sys
import argparse
import os
import unittest2 as unittest
from ruamel import yaml
from smacha.util import Tester
import rospy
import rospkg
import rostest
ROS_TEMPLATES_DIR = '../src/smacha_ros/templates'
TEMPLATES_DIR = 'smacha_templates/smacha_test_examples'
WRITE_OUTPUT_FILES = False
OUTPUT_PY_DIR = '/tmp/smacha/smacha_test_examples/smacha_generated_py'
OUTPUT_YML_DIR = '/tmp/smacha/smacha_test_examples/smacha_generated_scripts'
CONF_FILE = 'test_examples_config.yml'
DEBUG_LEVEL = 1
CONF_DICT = {}
class TestGenerate(Tester):
"""Tester class for general unit testing of various SMACHA tool
functionalities.
The tests run by this class are performed by generating code using SMACHA
scripts and templates and comparing the generated output code to the
expected code from hand-written code samples.
This includes testing both SMACHA YAML scripts generated by, e.g. the
:func:`smacha.parser.contain` and :func:`smacha.parser.extract` methods,
and Python code generated by the :func:`smacha.generator.run` method.
"""
def __init__(self, *args, **kwargs):
# Set Tester member variables
self.set_write_output_files(WRITE_OUTPUT_FILES)
self.set_output_py_dir(OUTPUT_PY_DIR)
self.set_output_yml_dir(OUTPUT_YML_DIR)
self.set_debug_level(DEBUG_LEVEL)
# Store the base path
self._base_path = os.path.dirname(os.path.abspath(__file__))
# Call the parent constructor
super(TestGenerate, self).__init__(
*args,
script_dirs=[os.path.join(self._base_path, 'smacha_scripts/smacha_test_examples')],
template_dirs=[
os.path.join(self._base_path, ROS_TEMPLATES_DIR),
os.path.join(self._base_path, TEMPLATES_DIR)
],
**kwargs)
def test_generate(self):
"""Test generating against baseline files"""
for test_case in CONF_DICT['TEST_GENERATE']:
with self.subTest(test_case=test_case):
test_params = test_case.values()[0]
script_file = test_params['script']
baseline = test_params['baseline']
with open(os.path.join(self._base_path, 'smacha_test_examples/{}'.format(baseline))) as original_file:
generated_code = self._strip_uuids(self._generate(os.path.join(self._base_path, 'smacha_scripts/smacha_test_examples/{}'.format(script_file))))
original_code = original_file.read()
self.assertTrue(self._compare(generated_code, original_code, file_a='generated', file_b='original'))
if __name__=="__main__":
# Read the configuration file before parsing arguments,
try:
base_path = os.path.dirname(os.path.abspath(__file__))
conf_file_loc = os.path.join(base_path, CONF_FILE)
f = open(conf_file_loc)
CONF_DICT = yaml.load(f)
except Exception as e:
print('Failed to read the configuration file. See error:\n{}'.format(e))
exit()
if CONF_DICT.has_key('WRITE_OUTPUT_FILES'):
WRITE_OUTPUT_FILES = CONF_DICT['WRITE_OUTPUT_FILES']
if CONF_DICT.has_key('OUTPUT_PY_DIR'):
OUTPUT_PY_DIR = CONF_DICT['OUTPUT_PY_DIR']
if CONF_DICT.has_key('OUTPUT_YML_DIR'):
OUTPUT_YML_DIR = CONF_DICT['OUTPUT_YML_DIR']
if CONF_DICT.has_key('DEBUG_LEVEL'):
DEBUG_LEVEL = CONF_DICT['DEBUG_LEVEL']
rospy.init_node('test_smacha_ros_generate',log_level=rospy.DEBUG)
rostest.rosrun('smacha_ros', 'test_smacha_ros_generate', TestGenerate)
| ReconCell/smacha | smacha_ros/test/smacha_diff_test_examples.py | Python | bsd-3-clause | 3,604 |
from .datareduction import DataReduction
from .datareductionpipeline import DataReductionPipeLine, ProcessingError
| awacha/cct | cct/core2/instrument/components/datareduction/__init__.py | Python | bsd-3-clause | 115 |
# -*- coding: utf-8 -*-
import scrapy
from scraper.items import ResolutionItem
class ResolutionSpider(scrapy.Spider):
name = "resolutions"
allowed_domains = ["www.pmo.gov.il"]
start_urls = ["http://www.pmo.gov.il/Secretary/GovDecisions/Pages/default.aspx"]
def should_retry(self, response):
"""Sometimes body uses anti-scraping tricks.
e.g. body is:
<html><body><script>document.cookie='yyyyyyy=ea850ff3yyyyyyy_ea850ff3; path=/';window.location.href=window.location.href;</script></body></html>
Retrying usually yields a correct response.
"""
if not response.body.startswith('<html><body><script>'):
return False
self.logger.debug('anti-scraping trick for url %s', response.url)
new_request = response.request.copy()
new_request.dont_filter = True # don't de-duplicate the url for retrying
return new_request
def parse(self, response):
"""Parse pages containing links to government resolutions."""
# check if response was bad
new_request = self.should_retry(response)
# retry if so
if new_request:
yield new_request
return
# parse specific resolutions found in current page
for sel in response.xpath("//div[@id='GDSR']/div/a/@href"):
yield scrapy.Request(sel.extract(), callback=self.parse_resolution)
# parse next pages
for sel in response.xpath("//a[@class='PMM-resultsPagingNumber']/@href"):
url = response.urljoin(sel.extract())
yield scrapy.Request(url)
def parse_resolution(self, response):
"""Scrape relevant fields in specific resolution response."""
# check if response was bad
new_request = self.should_retry(response)
# retry if so
if new_request:
yield new_request
return
try:
yield ResolutionItem(
url=response.url,
date=response.xpath("/html/head/meta[@name='EventDate']/@content").extract(),
resolution_number=response.xpath("//*[@id='aspnetForm']/@action").extract(),
gov=response.xpath("/html/head/meta[@name='Subjects']/@content").extract(),
title=response.xpath("//h1[@class='mainTitle']//text()").extract(),
subject=response.xpath("//div[@id='ctl00_PlaceHolderMain_GovXParagraph1Panel']//text()[not(ancestor::h3)]").extract(),
body=response.xpath("//*[@id='ctl00_PlaceHolderMain_GovXParagraph2Panel']//text()[not(ancestor::h3)]").extract(),
)
except AttributeError:
self.logger.error('bad body in response for url %s and body %s',
response.url, response.body)
| dvirsky/govsearch | scraper/scraper/spiders/resolutions.py | Python | bsd-3-clause | 2,802 |
import re, urllib2
arguments = ["self", "info", "args"]
helpstring = "randfact"
minlevel = 1
def main(connection, info, args) :
"""Returns a random fact"""
source = urllib2.urlopen("http://randomfunfacts.com/").read()
fact = re.search(r"<strong><i>(.*)</i></strong>", source)
connection.msg(info["channel"], "%s: %s" % (info["sender"], fact.group(1)))
| sonicrules1234/sonicbot | oldplugins/randfact.py | Python | bsd-3-clause | 378 |
import decimal
import sys
import steel
from steel import chunks
COMPRESSION_CHOICES = (
(0, 'zlib/deflate'),
)
RENDERING_INTENT_CHOICES = (
(0, 'Perceptual'),
(1, 'Relative Colorimetric'),
(2, 'Saturation'),
(3, 'Absolute Colorimetric'),
)
PHYSICAL_UNIT_CHOICES = (
(0, '<Unknown Unit>'),
(1, 'Meters'),
)
FILTER_CHOICES = (
(0, 'Adaptive Filtering'),
)
INTERLACE_CHOICES = (
(0, '<No Interlacing>'),
(1, 'Adam7'),
)
class Chunk(chunks.Chunk, encoding='ascii'):
"""
A special chunk for PNG, which puts the size before the type
and includes a CRC field for verifying data integrity.
"""
size = steel.Integer(size=4)
id = steel.String(size=4)
payload = chunks.Payload(size=size)
crc = steel.CRC32(first=id)
@property
def is_critical(self):
# Critical chunks will always have an uppercase letter for the
# first character in the type. Ancillary will always be lower.
return self.type[0].upper() == self.type[0]
@property
def is_public(self):
# Public chunks will always have an uppercase letter for the
# second character in the type. Private will always be lower.
return self.type[1].upper() == self.type[1]
@Chunk('IHDR')
class Header(steel.Structure):
width = steel.Integer(size=4)
height = steel.Integer(size=4)
bit_depth = steel.Integer(size=1, choices=(1, 2, 4, 8, 16))
color_type = steel.Integer(size=1, choices=(0, 2, 3, 4, 6))
compression_method = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
filter_method = steel.Integer(size=1, choices=FILTER_CHOICES)
interlace_method = steel.Integer(size=1, choices=INTERLACE_CHOICES)
class HundredThousand(steel.Integer):
"""
Value is usable as a Decimal in Python, but stored
as an integer after multiplying the value by 100,000
"""
def __init__(self):
super(HundredThousand, self).__init__(size=4)
def decode(self, value):
value = super(HundredThousand, self).decode(value)
return decimal.Decimal('0.%05s' % value)
def encode(self, obj, value):
return super(HundredThousand, self).encode(obj, int(value * 100000))
@Chunk('cHRM')
class Chromaticity(steel.Structure):
white_x = HundredThousand()
white_y = HundredThousand()
red_x = HundredThousand()
red_y = HundredThousand()
green_x = HundredThousand()
green_y = HundredThousand()
blue_x = HundredThousand()
blue_y = HundredThousand()
@Chunk('gAMA')
class Gamma(steel.Structure):
value = HundredThousand()
@Chunk('iCCP')
class ICCProfile(steel.Structure):
name = steel.String(encoding='latin-1')
compression = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
profile = steel.Bytes(size=steel.Remainder) # TODO: decompress
@Chunk('sBIT')
class SignificantBits(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
# TODO: decode based on parent Header.color_type
@Chunk('sRGB')
class sRGB(steel.Structure):
rendering_intent = steel.Integer(size=1, choices=RENDERING_INTENT_CHOICES)
class PaletteColor(steel.Structure):
red = steel.Integer(size=1)
green = steel.Integer(size=1)
blue = steel.Integer(size=1)
@Chunk('PLTE')
class Palette(steel.Structure):
colors = steel.List(steel.SubStructure(PaletteColor), size=steel.Remainder)
def __iter__(self):
return iter(self.colors)
@Chunk('bKGD')
class Background(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
# TODO: decode based on parent Header.color_type
@Chunk('hIST')
class Histogram(steel.Structure):
frequencies = steel.List(steel.Integer(size=2), size=steel.Remainder)
@Chunk('tRNS')
class Transparency(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
# TODO: decode based on parent Header.color_type
@Chunk('IDAT', multiple=True)
class Data(steel.Structure):
data = steel.Bytes(size=steel.Remainder)
@Chunk('pHYs')
class PhysicalDimentions(steel.Structure):
x = steel.Integer(size=4)
y = steel.Integer(size=4)
unit = steel.Integer(size=1, choices=PHYSICAL_UNIT_CHOICES)
class SuggestedPaletteEntry(steel.Structure):
red = steel.Integer(size=2)
green = steel.Integer(size=2)
blue = steel.Integer(size=2)
alpha = steel.Integer(size=2)
frequency = steel.Integer(size=2)
# TODO: figure out a good way to handle size based on sample_depth below
@Chunk('sPLT')
class SuggestedPalette(steel.Structure):
name = steel.String(encoding='latin-1')
sample_depth = steel.Integer(size=1)
colors = steel.List(steel.SubStructure(SuggestedPaletteEntry), size=steel.Remainder)
@Chunk('tIME')
class Timestamp(steel.Structure):
year = steel.Integer(size=2)
month = steel.Integer(size=1)
day = steel.Integer(size=1)
hour = steel.Integer(size=1)
minute = steel.Integer(size=1)
second = steel.Integer(size=1)
# TODO: convert this into a datetime object
@Chunk('tEXt', multiple=True)
class Text(steel.Structure, encoding='latin-1'):
keyword = steel.String()
content = steel.String(size=steel.Remainder)
@Chunk('zTXt', multiple=True)
class CompressedText(steel.Structure, encoding='latin-1'):
keyword = steel.String()
compression = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
content = steel.Bytes(size=steel.Remainder) # TODO: decompress
@Chunk('iTXt', multiple=True)
class InternationalText(steel.Structure, encoding='utf8'):
keyword = steel.String()
is_compressed = steel.Integer(size=1)
compression = steel.Integer(size=1, choices=COMPRESSION_CHOICES)
language = steel.String()
translated_keyword = steel.String()
content = steel.Bytes(size=steel.Remainder) # TODO: decompress
@Chunk('IEND')
class End(steel.Structure):
pass
class PNG(steel.Structure):
signature = steel.FixedString(b'\x89PNG\x0d\x0a\x1a\x0a')
header = steel.SubStructure(Header)
chunks = chunks.ChunkList(Chunk, (Header, Chromaticity, Gamma, ICCProfile,
SignificantBits, sRGB, Palette, Background,
Histogram, Transparency, PhysicalDimentions,
SuggestedPalette, Data, Timestamp, Text,
CompressedText, InternationalText), terminator=End)
@property
def data_chunks(self):
for chunk in self.chunks:
if isinstance(chunk, Data):
yield chunk
if __name__ == '__main__':
png = PNG(open(sys.argv[1], 'rb'))
print('%s x %s' % (png.header.width, png.header.height))
print(list(png.data_chunks))
| gulopine/steel | examples/images/png.py | Python | bsd-3-clause | 6,898 |
from .group_analysis import create_fsl_flame_wf, \
get_operation
__all__ = ['create_fsl_flame_wf', \
'get_operation']
| FCP-INDI/C-PAC | CPAC/group_analysis/__init__.py | Python | bsd-3-clause | 158 |
import numpy as np
from sklearn import datasets, svm
iris = datasets.load_iris()
iris_X = iris.data
iris_y = iris.target
# Set aside the first 10 data points as test data
indicies = np.random.permutation(len(iris_X))
iris_X_train = iris_X[indicies[:-10]]
iris_y_train = iris_y[indicies[:-10]]
iris_X_test = iris_X[indicies[-10:]]
iris_y_test = iris_y[indicies[-10:]]
svc = svm.SVC(kernel='linear')
print(svc.fit(iris_X_train, iris_y_train)) | samcervantes/scikit-learn-tutorials | statistical-learning-for-scientific-data-processing/supervised-learning/classification-svc-iris.py | Python | bsd-3-clause | 447 |
from __future__ import division, absolute_import, print_function
r''' Test the .npy file format.
Set up:
>>> import sys
>>> from io import BytesIO
>>> from numpy.lib import format
>>>
>>> scalars = [
... np.uint8,
... np.int8,
... np.uint16,
... np.int16,
... np.uint32,
... np.int32,
... np.uint64,
... np.int64,
... np.float32,
... np.float64,
... np.complex64,
... np.complex128,
... object,
... ]
>>>
>>> basic_arrays = []
>>>
>>> for scalar in scalars:
... for endian in '<>':
... dtype = np.dtype(scalar).newbyteorder(endian)
... basic = np.arange(15).astype(dtype)
... basic_arrays.extend([
... np.array([], dtype=dtype),
... np.array(10, dtype=dtype),
... basic,
... basic.reshape((3,5)),
... basic.reshape((3,5)).T,
... basic.reshape((3,5))[::-1,::2],
... ])
...
>>>
>>> Pdescr = [
... ('x', 'i4', (2,)),
... ('y', 'f8', (2, 2)),
... ('z', 'u1')]
>>>
>>>
>>> PbufferT = [
... ([3,2], [[6.,4.],[6.,4.]], 8),
... ([4,3], [[7.,5.],[7.,5.]], 9),
... ]
>>>
>>>
>>> Ndescr = [
... ('x', 'i4', (2,)),
... ('Info', [
... ('value', 'c16'),
... ('y2', 'f8'),
... ('Info2', [
... ('name', 'S2'),
... ('value', 'c16', (2,)),
... ('y3', 'f8', (2,)),
... ('z3', 'u4', (2,))]),
... ('name', 'S2'),
... ('z2', 'b1')]),
... ('color', 'S2'),
... ('info', [
... ('Name', 'U8'),
... ('Value', 'c16')]),
... ('y', 'f8', (2, 2)),
... ('z', 'u1')]
>>>
>>>
>>> NbufferT = [
... ([3,2], (6j, 6., ('nn', [6j,4j], [6.,4.], [1,2]), 'NN', True), 'cc', ('NN', 6j), [[6.,4.],[6.,4.]], 8),
... ([4,3], (7j, 7., ('oo', [7j,5j], [7.,5.], [2,1]), 'OO', False), 'dd', ('OO', 7j), [[7.,5.],[7.,5.]], 9),
... ]
>>>
>>>
>>> record_arrays = [
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
... np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
... np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
... ]
Test the magic string writing.
>>> format.magic(1, 0)
'\x93NUMPY\x01\x00'
>>> format.magic(0, 0)
'\x93NUMPY\x00\x00'
>>> format.magic(255, 255)
'\x93NUMPY\xff\xff'
>>> format.magic(2, 5)
'\x93NUMPY\x02\x05'
Test the magic string reading.
>>> format.read_magic(BytesIO(format.magic(1, 0)))
(1, 0)
>>> format.read_magic(BytesIO(format.magic(0, 0)))
(0, 0)
>>> format.read_magic(BytesIO(format.magic(255, 255)))
(255, 255)
>>> format.read_magic(BytesIO(format.magic(2, 5)))
(2, 5)
Test the header writing.
>>> for arr in basic_arrays + record_arrays:
... f = BytesIO()
... format.write_array_header_1_0(f, arr) # XXX: arr is not a dict, items gets called on it
... print repr(f.getvalue())
...
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|u1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|u1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '|i1', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '|i1', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i2', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i2', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<u8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<u8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>u8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>u8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<i8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<i8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>i8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>i8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<f4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<f4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>f4', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>f4', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<f8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<f8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>f8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>f8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<c8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<c8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>c8', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>c8', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '<c16', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '<c16', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': '>c16', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': '>c16', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (0,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': ()} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (15,)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 5)} \n"
"F\x00{'descr': 'O', 'fortran_order': True, 'shape': (5, 3)} \n"
"F\x00{'descr': 'O', 'fortran_order': False, 'shape': (3, 3)} \n"
"v\x00{'descr': [('x', '<i4', (2,)), ('y', '<f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '<i4', (2,)),\n ('Info',\n [('value', '<c16'),\n ('y2', '<f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '<c16', (2,)),\n ('y3', '<f8', (2,)),\n ('z3', '<u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '<U8'), ('Value', '<c16')]),\n ('y', '<f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"v\x00{'descr': [('x', '>i4', (2,)), ('y', '>f8', (2, 2)), ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
"\x16\x02{'descr': [('x', '>i4', (2,)),\n ('Info',\n [('value', '>c16'),\n ('y2', '>f8'),\n ('Info2',\n [('name', '|S2'),\n ('value', '>c16', (2,)),\n ('y3', '>f8', (2,)),\n ('z3', '>u4', (2,))]),\n ('name', '|S2'),\n ('z2', '|b1')]),\n ('color', '|S2'),\n ('info', [('Name', '>U8'), ('Value', '>c16')]),\n ('y', '>f8', (2, 2)),\n ('z', '|u1')],\n 'fortran_order': False,\n 'shape': (2,)} \n"
'''
import sys
import os
import shutil
import tempfile
import warnings
from io import BytesIO
import numpy as np
from numpy.compat import asbytes, asbytes_nested, sixu
from numpy.testing import (
run_module_suite, assert_, assert_array_equal, assert_raises, raises,
dec
)
from numpy.lib import format
tempdir = None
# Module-level setup.
def setup_module():
global tempdir
tempdir = tempfile.mkdtemp()
def teardown_module():
global tempdir
if tempdir is not None and os.path.isdir(tempdir):
shutil.rmtree(tempdir)
tempdir = None
# Generate some basic arrays to test with.
scalars = [
np.uint8,
np.int8,
np.uint16,
np.int16,
np.uint32,
np.int32,
np.uint64,
np.int64,
np.float32,
np.float64,
np.complex64,
np.complex128,
object,
]
basic_arrays = []
for scalar in scalars:
for endian in '<>':
dtype = np.dtype(scalar).newbyteorder(endian)
basic = np.arange(1500).astype(dtype)
basic_arrays.extend([
# Empty
np.array([], dtype=dtype),
# Rank-0
np.array(10, dtype=dtype),
# 1-D
basic,
# 2-D C-contiguous
basic.reshape((30, 50)),
# 2-D F-contiguous
basic.reshape((30, 50)).T,
# 2-D non-contiguous
basic.reshape((30, 50))[::-1, ::2],
])
# More complicated record arrays.
# This is the structure of the table used for plain objects:
#
# +-+-+-+
# |x|y|z|
# +-+-+-+
# Structure of a plain array description:
Pdescr = [
('x', 'i4', (2,)),
('y', 'f8', (2, 2)),
('z', 'u1')]
# A plain list of tuples with values for testing:
PbufferT = [
# x y z
([3, 2], [[6., 4.], [6., 4.]], 8),
([4, 3], [[7., 5.], [7., 5.]], 9),
]
# This is the structure of the table used for nested objects (DON'T PANIC!):
#
# +-+---------------------------------+-----+----------+-+-+
# |x|Info |color|info |y|z|
# | +-----+--+----------------+----+--+ +----+-----+ | |
# | |value|y2|Info2 |name|z2| |Name|Value| | |
# | | | +----+-----+--+--+ | | | | | | |
# | | | |name|value|y3|z3| | | | | | | |
# +-+-----+--+----+-----+--+--+----+--+-----+----+-----+-+-+
#
# The corresponding nested array description:
Ndescr = [
('x', 'i4', (2,)),
('Info', [
('value', 'c16'),
('y2', 'f8'),
('Info2', [
('name', 'S2'),
('value', 'c16', (2,)),
('y3', 'f8', (2,)),
('z3', 'u4', (2,))]),
('name', 'S2'),
('z2', 'b1')]),
('color', 'S2'),
('info', [
('Name', 'U8'),
('Value', 'c16')]),
('y', 'f8', (2, 2)),
('z', 'u1')]
NbufferT = [
# x Info color info y z
# value y2 Info2 name z2 Name Value
# name value y3 z3
([3, 2], (6j, 6., ('nn', [6j, 4j], [6., 4.], [1, 2]), 'NN', True),
'cc', ('NN', 6j), [[6., 4.], [6., 4.]], 8),
([4, 3], (7j, 7., ('oo', [7j, 5j], [7., 5.], [2, 1]), 'OO', False),
'dd', ('OO', 7j), [[7., 5.], [7., 5.]], 9),
]
record_arrays = [
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('<')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('<')),
np.array(PbufferT, dtype=np.dtype(Pdescr).newbyteorder('>')),
np.array(NbufferT, dtype=np.dtype(Ndescr).newbyteorder('>')),
]
#BytesIO that reads a random number of bytes at a time
class BytesIOSRandomSize(BytesIO):
def read(self, size=None):
import random
size = random.randint(1, size)
return super(BytesIOSRandomSize, self).read(size)
def roundtrip(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIO(f.getvalue())
arr2 = format.read_array(f2)
return arr2
def roundtrip_randsize(arr):
f = BytesIO()
format.write_array(f, arr)
f2 = BytesIOSRandomSize(f.getvalue())
arr2 = format.read_array(f2)
return arr2
def roundtrip_truncated(arr):
f = BytesIO()
format.write_array(f, arr)
#BytesIO is one byte short
f2 = BytesIO(f.getvalue()[0:-1])
arr2 = format.read_array(f2)
return arr2
def assert_equal_(o1, o2):
assert_(o1 == o2)
def test_roundtrip():
for arr in basic_arrays + record_arrays:
arr2 = roundtrip(arr)
yield assert_array_equal, arr, arr2
def test_roundtrip_randsize():
for arr in basic_arrays + record_arrays:
if arr.dtype != object:
arr2 = roundtrip_randsize(arr)
yield assert_array_equal, arr, arr2
def test_roundtrip_truncated():
for arr in basic_arrays:
if arr.dtype != object:
yield assert_raises, ValueError, roundtrip_truncated, arr
def test_long_str():
# check items larger than internal buffer size, gh-4027
long_str_arr = np.ones(1, dtype=np.dtype((str, format.BUFFER_SIZE + 1)))
long_str_arr2 = roundtrip(long_str_arr)
assert_array_equal(long_str_arr, long_str_arr2)
@dec.slow
def test_memmap_roundtrip():
# Fixme: test crashes nose on windows.
if not (sys.platform == 'win32' or sys.platform == 'cygwin'):
for arr in basic_arrays + record_arrays:
if arr.dtype.hasobject:
# Skip these since they can't be mmap'ed.
continue
# Write it out normally and through mmap.
nfn = os.path.join(tempdir, 'normal.npy')
mfn = os.path.join(tempdir, 'memmap.npy')
fp = open(nfn, 'wb')
try:
format.write_array(fp, arr)
finally:
fp.close()
fortran_order = (
arr.flags.f_contiguous and not arr.flags.c_contiguous)
ma = format.open_memmap(mfn, mode='w+', dtype=arr.dtype,
shape=arr.shape, fortran_order=fortran_order)
ma[...] = arr
del ma
# Check that both of these files' contents are the same.
fp = open(nfn, 'rb')
normal_bytes = fp.read()
fp.close()
fp = open(mfn, 'rb')
memmap_bytes = fp.read()
fp.close()
yield assert_equal_, normal_bytes, memmap_bytes
# Check that reading the file using memmap works.
ma = format.open_memmap(nfn, mode='r')
del ma
def test_compressed_roundtrip():
arr = np.random.rand(200, 200)
npz_file = os.path.join(tempdir, 'compressed.npz')
np.savez_compressed(npz_file, arr=arr)
arr1 = np.load(npz_file)['arr']
assert_array_equal(arr, arr1)
def test_python2_python3_interoperability():
if sys.version_info[0] >= 3:
fname = 'win64python2.npy'
else:
fname = 'python3.npy'
path = os.path.join(os.path.dirname(__file__), 'data', fname)
data = np.load(path)
assert_array_equal(data, np.ones(2))
def test_pickle_python2_python3():
# Test that loading object arrays saved on Python 2 works both on
# Python 2 and Python 3 and vice versa
data_dir = os.path.join(os.path.dirname(__file__), 'data')
if sys.version_info[0] >= 3:
xrange = range
else:
import __builtin__
xrange = __builtin__.xrange
expected = np.array([None, xrange, sixu('\u512a\u826f'),
asbytes('\xe4\xb8\x8d\xe8\x89\xaf')],
dtype=object)
for fname in ['py2-objarr.npy', 'py2-objarr.npz',
'py3-objarr.npy', 'py3-objarr.npz']:
path = os.path.join(data_dir, fname)
if (fname.endswith('.npz') and sys.version_info[0] == 2 and
sys.version_info[1] < 7):
# Reading object arrays directly from zipfile appears to fail
# on Py2.6, see cfae0143b4
continue
for encoding in ['bytes', 'latin1']:
if (sys.version_info[0] >= 3 and sys.version_info[1] < 4 and
encoding == 'bytes'):
# The bytes encoding is available starting from Python 3.4
continue
data_f = np.load(path, encoding=encoding)
if fname.endswith('.npz'):
data = data_f['x']
data_f.close()
else:
data = data_f
if sys.version_info[0] >= 3:
if encoding == 'latin1' and fname.startswith('py2'):
assert_(isinstance(data[3], str))
assert_array_equal(data[:-1], expected[:-1])
# mojibake occurs
assert_array_equal(data[-1].encode(encoding), expected[-1])
else:
assert_(isinstance(data[3], bytes))
assert_array_equal(data, expected)
else:
assert_array_equal(data, expected)
if sys.version_info[0] >= 3:
if fname.startswith('py2'):
if fname.endswith('.npz'):
data = np.load(path)
assert_raises(UnicodeError, data.__getitem__, 'x')
data.close()
data = np.load(path, fix_imports=False, encoding='latin1')
assert_raises(ImportError, data.__getitem__, 'x')
data.close()
else:
assert_raises(UnicodeError, np.load, path)
assert_raises(ImportError, np.load, path,
encoding='latin1', fix_imports=False)
def test_version_2_0():
f = BytesIO()
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
d = np.ones(1000, dtype=dt)
format.write_array(f, d, version=(2, 0))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', UserWarning)
format.write_array(f, d)
assert_(w[0].category is UserWarning)
f.seek(0)
n = format.read_array(f)
assert_array_equal(d, n)
# 1.0 requested but data cannot be saved this way
assert_raises(ValueError, format.write_array, f, d, (1, 0))
def test_version_2_0_memmap():
# requires more than 2 byte for header
dt = [(("%d" % i) * 100, float) for i in range(500)]
d = np.ones(1000, dtype=dt)
tf = tempfile.mktemp('', 'mmap', dir=tempdir)
# 1.0 requested but data cannot be saved this way
assert_raises(ValueError, format.open_memmap, tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=(1, 0))
ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=(2, 0))
ma[...] = d
del ma
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', UserWarning)
ma = format.open_memmap(tf, mode='w+', dtype=d.dtype,
shape=d.shape, version=None)
assert_(w[0].category is UserWarning)
ma[...] = d
del ma
ma = format.open_memmap(tf, mode='r')
assert_array_equal(ma, d)
def test_write_version():
f = BytesIO()
arr = np.arange(1)
# These should pass.
format.write_array(f, arr, version=(1, 0))
format.write_array(f, arr)
format.write_array(f, arr, version=None)
format.write_array(f, arr)
format.write_array(f, arr, version=(2, 0))
format.write_array(f, arr)
# These should all fail.
bad_versions = [
(1, 1),
(0, 0),
(0, 1),
(2, 2),
(255, 255),
]
for version in bad_versions:
try:
format.write_array(f, arr, version=version)
except ValueError:
pass
else:
raise AssertionError("we should have raised a ValueError for the bad version %r" % (version,))
bad_version_magic = asbytes_nested([
'\x93NUMPY\x01\x01',
'\x93NUMPY\x00\x00',
'\x93NUMPY\x00\x01',
'\x93NUMPY\x02\x00',
'\x93NUMPY\x02\x02',
'\x93NUMPY\xff\xff',
])
malformed_magic = asbytes_nested([
'\x92NUMPY\x01\x00',
'\x00NUMPY\x01\x00',
'\x93numpy\x01\x00',
'\x93MATLB\x01\x00',
'\x93NUMPY\x01',
'\x93NUMPY',
'',
])
def test_read_magic_bad_magic():
for magic in malformed_magic:
f = BytesIO(magic)
yield raises(ValueError)(format.read_magic), f
def test_read_version_1_0_bad_magic():
for magic in bad_version_magic + malformed_magic:
f = BytesIO(magic)
yield raises(ValueError)(format.read_array), f
def test_bad_magic_args():
assert_raises(ValueError, format.magic, -1, 1)
assert_raises(ValueError, format.magic, 256, 1)
assert_raises(ValueError, format.magic, 1, -1)
assert_raises(ValueError, format.magic, 1, 256)
def test_large_header():
s = BytesIO()
d = {'a': 1, 'b': 2}
format.write_array_header_1_0(s, d)
s = BytesIO()
d = {'a': 1, 'b': 2, 'c': 'x'*256*256}
assert_raises(ValueError, format.write_array_header_1_0, s, d)
def test_bad_header():
# header of length less than 2 should fail
s = BytesIO()
assert_raises(ValueError, format.read_array_header_1_0, s)
s = BytesIO(asbytes('1'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# header shorter than indicated size should fail
s = BytesIO(asbytes('\x01\x00'))
assert_raises(ValueError, format.read_array_header_1_0, s)
# headers without the exact keys required should fail
d = {"shape": (1, 2),
"descr": "x"}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
d = {"shape": (1, 2),
"fortran_order": False,
"descr": "x",
"extrakey": -1}
s = BytesIO()
format.write_array_header_1_0(s, d)
assert_raises(ValueError, format.read_array_header_1_0, s)
def test_large_file_support():
from nose import SkipTest
if (sys.platform == 'win32' or sys.platform == 'cygwin'):
raise SkipTest("Unknown if Windows has sparse filesystems")
# try creating a large sparse file
tf_name = os.path.join(tempdir, 'sparse_file')
try:
# seek past end would work too, but linux truncate somewhat
# increases the chances that we have a sparse filesystem and can
# avoid actually writing 5GB
import subprocess as sp
sp.check_call(["truncate", "-s", "5368709120", tf_name])
except:
raise SkipTest("Could not create 5GB large file")
# write a small array to the end
with open(tf_name, "wb") as f:
f.seek(5368709120)
d = np.arange(5)
np.save(f, d)
# read it back
with open(tf_name, "rb") as f:
f.seek(5368709120)
r = np.load(f)
assert_array_equal(r, d)
if __name__ == "__main__":
run_module_suite()
| naritta/numpy | numpy/lib/tests/test_format.py | Python | bsd-3-clause | 32,619 |
from math import pi
import pandas as pd
from bokeh.sampledata.stocks import MSFT
from bokeh.plotting import *
df = pd.DataFrame(MSFT)[:50]
df['date'] = pd.to_datetime(df['date'])
mids = (df.open + df.close)/2
spans = abs(df.close-df.open)
inc = df.close > df.open
dec = df.open > df.close
w = 12*60*60*1000 # half day in ms
output_cloud("candlestick")
figure(x_axis_type = "datetime", tools="pan,wheel_zoom,box_zoom,reset,previewsave",
width=1000, name="candlestick")
hold()
segment(df.date, df.high, df.date, df.low, color='black')
rect(df.date[inc], mids[inc], w, spans[inc], fill_color="#D5E1DD", line_color="black")
rect(df.date[dec], mids[dec], w, spans[dec], fill_color="#F2583E", line_color="black")
curplot().title = "MSFT Candlestick"
xaxis().major_label_orientation = pi/4
grid().grid_line_alpha=0.3
# open a browser
show()
| sahat/bokeh | examples/plotting/cloud/candlestick.py | Python | bsd-3-clause | 853 |
# Time-stamp: <2019-09-25 10:04:48 taoliu>
"""Description: Fine-tuning script to call broad peaks from a single
bedGraph track for scores.
This code is free software; you can redistribute it and/or modify it
under the terms of the BSD License (see the file LICENSE included with
the distribution).
"""
# ------------------------------------
# python modules
# ------------------------------------
import sys
import os
import logging
from MACS2.IO import BedGraphIO
# ------------------------------------
# constants
# ------------------------------------
logging.basicConfig(level=20,
format='%(levelname)-5s @ %(asctime)s: %(message)s ',
datefmt='%a, %d %b %Y %H:%M:%S',
stream=sys.stderr,
filemode="w"
)
# ------------------------------------
# Misc functions
# ------------------------------------
error = logging.critical # function alias
warn = logging.warning
debug = logging.debug
info = logging.info
# ------------------------------------
# Classes
# ------------------------------------
# ------------------------------------
# Main function
# ------------------------------------
def run( options ):
info("Read and build bedGraph...")
bio = BedGraphIO.bedGraphIO(options.ifile)
btrack = bio.build_bdgtrack(baseline_value=0)
info("Call peaks from bedGraph...")
bpeaks = btrack.call_broadpeaks (lvl1_cutoff=options.cutoffpeak, lvl2_cutoff=options.cutofflink, min_length=options.minlen, lvl1_max_gap=options.lvl1maxgap, lvl2_max_gap=options.lvl2maxgap)
info("Write peaks...")
if options.ofile:
bf = open( os.path.join( options.outdir, options.ofile ), "w" )
options.oprefix = options.ofile
else:
bf = open ( os.path.join( options.outdir, "%s_c%.1f_C%.2f_l%d_g%d_G%d_broad.bed12" % (options.oprefix,options.cutoffpeak,options.cutofflink,options.minlen,options.lvl1maxgap,options.lvl2maxgap)), "w" )
bpeaks.write_to_gappedPeak(bf, name_prefix=(options.oprefix+"_broadRegion").encode(), score_column="score", trackline=options.trackline)
info("Done")
| taoliu/MACS | MACS2/bdgbroadcall_cmd.py | Python | bsd-3-clause | 2,141 |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-02-13 22:21
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registration', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='sqlregistrationrequest',
name='request_ip',
field=models.CharField(max_length=31, null=True),
),
]
| dimagi/commcare-hq | corehq/apps/registration/migrations/0002_alter_request_ip.py | Python | bsd-3-clause | 477 |
import operator
import numpy as np
import pytest
from pandas.core.dtypes.common import is_list_like
import pandas as pd
from pandas import (
Categorical,
Index,
Interval,
IntervalIndex,
Period,
Series,
Timedelta,
Timestamp,
date_range,
period_range,
timedelta_range,
)
import pandas._testing as tm
from pandas.core.arrays import IntervalArray
@pytest.fixture(
params=[
(Index([0, 2, 4, 4]), Index([1, 3, 5, 8])),
(Index([0.0, 1.0, 2.0, np.nan]), Index([1.0, 2.0, 3.0, np.nan])),
(
timedelta_range("0 days", periods=3).insert(4, pd.NaT),
timedelta_range("1 day", periods=3).insert(4, pd.NaT),
),
(
date_range("20170101", periods=3).insert(4, pd.NaT),
date_range("20170102", periods=3).insert(4, pd.NaT),
),
(
date_range("20170101", periods=3, tz="US/Eastern").insert(4, pd.NaT),
date_range("20170102", periods=3, tz="US/Eastern").insert(4, pd.NaT),
),
],
ids=lambda x: str(x[0].dtype),
)
def left_right_dtypes(request):
"""
Fixture for building an IntervalArray from various dtypes
"""
return request.param
@pytest.fixture
def array(left_right_dtypes):
"""
Fixture to generate an IntervalArray of various dtypes containing NA if possible
"""
left, right = left_right_dtypes
return IntervalArray.from_arrays(left, right)
def create_categorical_intervals(left, right, closed="right"):
return Categorical(IntervalIndex.from_arrays(left, right, closed))
def create_series_intervals(left, right, closed="right"):
return Series(IntervalArray.from_arrays(left, right, closed))
def create_series_categorical_intervals(left, right, closed="right"):
return Series(Categorical(IntervalIndex.from_arrays(left, right, closed)))
class TestComparison:
@pytest.fixture(params=[operator.eq, operator.ne])
def op(self, request):
return request.param
@pytest.fixture(
params=[
IntervalArray.from_arrays,
IntervalIndex.from_arrays,
create_categorical_intervals,
create_series_intervals,
create_series_categorical_intervals,
],
ids=[
"IntervalArray",
"IntervalIndex",
"Categorical[Interval]",
"Series[Interval]",
"Series[Categorical[Interval]]",
],
)
def interval_constructor(self, request):
"""
Fixture for all pandas native interval constructors.
To be used as the LHS of IntervalArray comparisons.
"""
return request.param
def elementwise_comparison(self, op, array, other):
"""
Helper that performs elementwise comparisons between `array` and `other`
"""
other = other if is_list_like(other) else [other] * len(array)
return np.array([op(x, y) for x, y in zip(array, other)])
def test_compare_scalar_interval(self, op, array):
# matches first interval
other = array[0]
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# matches on a single endpoint but not both
other = Interval(array.left[0], array.right[1])
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_scalar_interval_mixed_closed(self, op, closed, other_closed):
array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)
other = Interval(0, 1, closed=other_closed)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_scalar_na(self, op, array, nulls_fixture, request):
result = op(array, nulls_fixture)
expected = self.elementwise_comparison(op, array, nulls_fixture)
if nulls_fixture is pd.NA and array.dtype != pd.IntervalDtype("int64"):
mark = pytest.mark.xfail(
reason="broken for non-integer IntervalArray; see GH 31882"
)
request.node.add_marker(mark)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
0,
1.0,
True,
"foo",
Timestamp("2017-01-01"),
Timestamp("2017-01-01", tz="US/Eastern"),
Timedelta("0 days"),
Period("2017-01-01", "D"),
],
)
def test_compare_scalar_other(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_interval(
self, op, array, interval_constructor,
):
# same endpoints
other = interval_constructor(array.left, array.right)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# different endpoints
other = interval_constructor(array.left[::-1], array.right[::-1])
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
# all nan endpoints
other = interval_constructor([np.nan] * 4, [np.nan] * 4)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_interval_mixed_closed(
self, op, interval_constructor, closed, other_closed
):
array = IntervalArray.from_arrays(range(2), range(1, 3), closed=closed)
other = interval_constructor(range(2), range(1, 3), closed=other_closed)
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
(
Interval(0, 1),
Interval(Timedelta("1 day"), Timedelta("2 days")),
Interval(4, 5, "both"),
Interval(10, 20, "neither"),
),
(0, 1.5, Timestamp("20170103"), np.nan),
(
Timestamp("20170102", tz="US/Eastern"),
Timedelta("2 days"),
"baz",
pd.NaT,
),
],
)
def test_compare_list_like_object(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
def test_compare_list_like_nan(self, op, array, nulls_fixture, request):
other = [nulls_fixture] * 4
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
if nulls_fixture is pd.NA and array.dtype.subtype != "i8":
reason = "broken for non-integer IntervalArray; see GH 31882"
mark = pytest.mark.xfail(reason=reason)
request.node.add_marker(mark)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"other",
[
np.arange(4, dtype="int64"),
np.arange(4, dtype="float64"),
date_range("2017-01-01", periods=4),
date_range("2017-01-01", periods=4, tz="US/Eastern"),
timedelta_range("0 days", periods=4),
period_range("2017-01-01", periods=4, freq="D"),
Categorical(list("abab")),
Categorical(date_range("2017-01-01", periods=4)),
pd.array(list("abcd")),
pd.array(["foo", 3.14, None, object()]),
],
ids=lambda x: str(x.dtype),
)
def test_compare_list_like_other(self, op, array, other):
result = op(array, other)
expected = self.elementwise_comparison(op, array, other)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize("length", [1, 3, 5])
@pytest.mark.parametrize("other_constructor", [IntervalArray, list])
def test_compare_length_mismatch_errors(self, op, other_constructor, length):
array = IntervalArray.from_arrays(range(4), range(1, 5))
other = other_constructor([Interval(0, 1)] * length)
with pytest.raises(ValueError, match="Lengths must match to compare"):
op(array, other)
@pytest.mark.parametrize(
"constructor, expected_type, assert_func",
[
(IntervalIndex, np.array, tm.assert_numpy_array_equal),
(Series, Series, tm.assert_series_equal),
],
)
def test_index_series_compat(self, op, constructor, expected_type, assert_func):
# IntervalIndex/Series that rely on IntervalArray for comparisons
breaks = range(4)
index = constructor(IntervalIndex.from_breaks(breaks))
# scalar comparisons
other = index[0]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
other = breaks[0]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
# list-like comparisons
other = IntervalArray.from_breaks(breaks)
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
other = [index[0], breaks[0], "foo"]
result = op(index, other)
expected = expected_type(self.elementwise_comparison(op, index, other))
assert_func(result, expected)
@pytest.mark.parametrize("scalars", ["a", False, 1, 1.0, None])
def test_comparison_operations(self, scalars):
# GH #28981
expected = Series([False, False])
s = pd.Series([pd.Interval(0, 1), pd.Interval(1, 2)], dtype="interval")
result = s == scalars
tm.assert_series_equal(result, expected)
| TomAugspurger/pandas | pandas/tests/arithmetic/test_interval.py | Python | bsd-3-clause | 10,306 |
# -*- coding: utf-8 -*-
import time
import requests
from datetime import datetime
from logging import getLogger
from typing import Optional
from typing import Dict
from typing import Iterable
from funcy import compose
from funcy import partial
from pandas import DataFrame
from pandas import to_datetime
from pandas import Series
from pyloniex import PoloniexPublicAPI
from moneybot.clients import Postgres
from moneybot.clients import Poloniex
YEAR_IN_SECS = 60 * 60 * 24 * 365
logger = getLogger(__name__)
def format_time(ts: datetime) -> str:
return ts.strftime('%Y-%m-%d %H:%M:%S')
def historical(ticker: str) -> Dict:
url = f'https://graphs.coinmarketcap.com/currencies/{ticker}'
return requests.get(url).json()
def market_cap(hist_ticker: Dict) -> Series:
r = {}
ts = None
for key, vals in hist_ticker.items():
if ts is None:
ts = [to_datetime(t[0] * 1000000) for t in vals]
r[key] = [t[1] for t in vals]
return DataFrame(r, index=ts)
coin_history = compose(market_cap, historical)
def marshall(hist_df):
btc_to_usd = hist_df['price_usd'] / hist_df['price_btc']
# volume in BTC
# TODO is this correct? or is `'volume'` the quote volume?
hist_df['volume'] = hist_df['volume_usd'] / btc_to_usd
hist_df = hist_df.drop([
'market_cap_by_available_supply',
'volume_usd'
], axis=1)
hist_df['weighted_average'] = hist_df['price_usd']
hist_df['time'] = hist_df.index
hist_df['currency_pair'] = hist_df.apply(lambda x: 'USD_BTC', axis=1)
def nothing_burger():
return hist_df.apply(lambda x: None, axis=1)
hist_df['open'] = nothing_burger()
hist_df['high'] = nothing_burger()
hist_df['low'] = nothing_burger()
hist_df['close'] = nothing_burger()
hist_df['quote_volume'] = nothing_burger()
return hist_df
def historical_prices_of(
polo: PoloniexPublicAPI,
btc_price_history: Series,
pair: str,
period: int = 900,
start: Optional[float] = None,
end: Optional[float] = None,
) -> Iterable[Series]:
'''
Returns a series of time-indexed prices.
`pair` is of the form e.g. 'BTC_ETH',
`period` is an integer number of seconds,
either 300, 900, 1800, 7200, 14400, or 86400.
We do some data marshalling in this method as well,
to turn API results into stuff amenable for our Postgres DB.
'''
def contemporary_usd_price(row: Series) -> float:
contemporary_btc_price = btc_price_history['price_usd'].asof(row.name)
return row['weightedAverage'] * contemporary_btc_price
# Scraping
now = time.time()
start = start or now - YEAR_IN_SECS
end = end or now
ex_trades = polo.return_chart_data(
currency_pair=pair,
period=period,
start=start,
end=end,
)
# Data marshalling
ts_df = DataFrame(ex_trades, dtype=float)
ts_df['time'] = [datetime.fromtimestamp(t) for t in ts_df['date']]
ts_df.index = ts_df['time']
ts_df['price_usd'] = ts_df.apply(contemporary_usd_price, axis=1)
ts_df['currency_pair'] = ts_df.apply(lambda x: pair, axis=1)
ts_df = ts_df.rename(index=str, columns={
'quoteVolume': 'quote_volume',
'weightedAverage': 'weighted_average',
})
for _, row in ts_df.iterrows():
# chart = scraped_chart(pair, row)
# for some reason, when there's no chart data to report,
# the API will give us some reading with all 0s.
if row['volume'] == 0 and row['weighted_average'] == 0:
# we will just ignore these
pass
else:
yield row
def insert(cursor, row):
return cursor.execute("""
INSERT INTO scraped_chart (time, currency_pair, high, low, price_usd, quote_volume, volume, weighted_average)
VALUES (%(time)s, %(currency_pair)s, %(high)s, %(low)s, %(price_usd)s, %(quote_volume)s, %(volume)s, %(weighted_average)s);""",
row.to_dict())
def scrape_since_last_reading():
# postgres client
client = Postgres.get_client()
cursor = client.cursor()
inserter = partial(insert, cursor)
# get the last time we fetched some data,
# looking at the most recent result in the db
query = ' '.join([
'select time from scraped_chart',
'order by time desc',
'limit 1',
])
cursor.execute(query)
latest_fetch_time = cursor.fetchone()[0]
latest_fetch_unix = time.mktime(latest_fetch_time.timetuple())
# now get USD_BTC history
btc_price_hist = coin_history('bitcoin')
# and write that history to DB,
btc_rows = marshall(btc_price_hist)
# NOTE since latest fetch time?
# recent_btc = btc_rows[btc_rows['time'] > latest_fetch_time]
# [inserter(row) for _, row in recent_btc.iterrows()]
[inserter(row) for _, row in btc_rows.iterrows()]
client.commit()
logger.debug('Scraped USD_BTC')
# now, a poloniex client
polo = Poloniex.get_public()
# and a method for grabbing historical prices
grab_historical_prices = partial(historical_prices_of, polo, btc_price_hist)
# for each market,
for market in polo.return_ticker():
# fetch all the chart data since last fetch
generator = grab_historical_prices(
market,
start=latest_fetch_unix,
end=time.time(),
)
list(map(inserter, generator))
client.commit()
logger.debug(f'Scraped {market}')
cursor.close()
| elsehow/moneybot | moneybot/market/scrape.py | Python | bsd-3-clause | 5,485 |
import re
from django.conf import settings
from rest_framework import exceptions, serializers
from olympia import amo
from olympia.accounts.serializers import BaseUserSerializer
from olympia.amo.templatetags.jinja_helpers import absolutify
from olympia.amo.urlresolvers import get_outgoing_url, reverse
from olympia.api.fields import ReverseChoiceField, TranslationSerializerField
from olympia.api.serializers import BaseESSerializer
from olympia.api.utils import is_gate_active
from olympia.applications.models import AppVersion
from olympia.bandwagon.models import Collection
from olympia.constants.applications import APPS_ALL
from olympia.constants.base import ADDON_TYPE_CHOICES_API
from olympia.constants.categories import CATEGORIES_BY_ID
from olympia.files.models import File
from olympia.search.filters import AddonAppVersionQueryParam
from olympia.users.models import UserProfile
from olympia.versions.models import (
ApplicationsVersions, License, Version, VersionPreview)
from .models import (
Addon, AddonFeatureCompatibility, CompatOverride, Persona, Preview,
ReplacementAddon, attach_tags)
class AddonFeatureCompatibilitySerializer(serializers.ModelSerializer):
e10s = ReverseChoiceField(
choices=amo.E10S_COMPATIBILITY_CHOICES_API.items())
class Meta:
model = AddonFeatureCompatibility
fields = ('e10s', )
class FileSerializer(serializers.ModelSerializer):
url = serializers.SerializerMethodField()
platform = ReverseChoiceField(choices=amo.PLATFORM_CHOICES_API.items())
status = ReverseChoiceField(choices=amo.STATUS_CHOICES_API.items())
permissions = serializers.ListField(
source='webext_permissions_list',
child=serializers.CharField())
is_restart_required = serializers.BooleanField()
class Meta:
model = File
fields = ('id', 'created', 'hash', 'is_restart_required',
'is_webextension', 'is_mozilla_signed_extension',
'platform', 'size', 'status', 'url', 'permissions')
def get_url(self, obj):
# File.get_url_path() is a little different, it's already absolute, but
# needs a src parameter that is appended as a query string.
return obj.get_url_path(src='')
class PreviewSerializer(serializers.ModelSerializer):
caption = TranslationSerializerField()
image_url = serializers.SerializerMethodField()
thumbnail_url = serializers.SerializerMethodField()
class Meta:
# Note: this serializer can also be used for VersionPreview.
model = Preview
fields = ('id', 'caption', 'image_size', 'image_url', 'thumbnail_size',
'thumbnail_url')
def get_image_url(self, obj):
return absolutify(obj.image_url)
def get_thumbnail_url(self, obj):
return absolutify(obj.thumbnail_url)
class ESPreviewSerializer(BaseESSerializer, PreviewSerializer):
# Because we have translated fields and dates coming from ES, we can't use
# a regular PreviewSerializer to handle previews for ESAddonSerializer.
# Unfortunately we also need to get the class right (it can be either
# Preview or VersionPreview) so fake_object() implementation in this class
# does nothing, the instance has already been created by a parent
# serializer.
datetime_fields = ('modified',)
translated_fields = ('caption',)
def fake_object(self, data):
return data
class LicenseSerializer(serializers.ModelSerializer):
name = serializers.SerializerMethodField()
text = TranslationSerializerField()
url = serializers.SerializerMethodField()
class Meta:
model = License
fields = ('id', 'name', 'text', 'url')
def __init__(self, *args, **kwargs):
super(LicenseSerializer, self).__init__(*args, **kwargs)
self.db_name = TranslationSerializerField()
self.db_name.bind('name', self)
def get_url(self, obj):
return obj.url or self.get_version_license_url(obj)
def get_version_license_url(self, obj):
# We need the version associated with the license, because that's where
# the license_url() method lives. The problem is, normally we would not
# be able to do that, because there can be multiple versions for a
# given License. However, since we're serializing through a nested
# serializer, we cheat and use `instance.version_instance` which is
# set by SimpleVersionSerializer.to_representation() while serializing.
# Only get the version license url for non-builtin licenses.
if not obj.builtin and hasattr(obj, 'version_instance'):
return absolutify(obj.version_instance.license_url())
return None
def get_name(self, obj):
# See if there is a license constant
license_constant = obj._constant
if not license_constant:
# If not fall back on the name in the database.
return self.db_name.get_attribute(obj)
else:
request = self.context.get('request', None)
if request and request.method == 'GET' and 'lang' in request.GET:
# A single lang requested so return a flat string
return unicode(license_constant.name)
else:
# Otherwise mock the dict with the default lang.
lang = getattr(request, 'LANG', None) or settings.LANGUAGE_CODE
return {lang: unicode(license_constant.name)}
class CompactLicenseSerializer(LicenseSerializer):
class Meta:
model = License
fields = ('id', 'name', 'url')
class MinimalVersionSerializer(serializers.ModelSerializer):
files = FileSerializer(source='all_files', many=True)
class Meta:
model = Version
fields = ('id', 'files', 'reviewed', 'version')
class SimpleVersionSerializer(MinimalVersionSerializer):
compatibility = serializers.SerializerMethodField()
edit_url = serializers.SerializerMethodField()
is_strict_compatibility_enabled = serializers.SerializerMethodField()
license = CompactLicenseSerializer()
release_notes = TranslationSerializerField(source='releasenotes')
url = serializers.SerializerMethodField()
class Meta:
model = Version
fields = ('id', 'compatibility', 'edit_url', 'files',
'is_strict_compatibility_enabled', 'license',
'release_notes', 'reviewed', 'url', 'version')
def to_representation(self, instance):
# Help the LicenseSerializer find the version we're currently
# serializing.
if 'license' in self.fields and instance.license:
instance.license.version_instance = instance
return super(SimpleVersionSerializer, self).to_representation(instance)
def get_compatibility(self, obj):
return {
app.short: {
'min': compat.min.version if compat else (
amo.D2C_MIN_VERSIONS.get(app.id, '1.0')),
'max': compat.max.version if compat else amo.FAKE_MAX_VERSION
} for app, compat in obj.compatible_apps.items()
}
def get_edit_url(self, obj):
return absolutify(obj.addon.get_dev_url(
'versions.edit', args=[obj.pk], prefix_only=True))
def get_is_strict_compatibility_enabled(self, obj):
return any(file_.strict_compatibility for file_ in obj.all_files)
def get_url(self, obj):
return absolutify(obj.get_url_path())
class SimpleESVersionSerializer(SimpleVersionSerializer):
class Meta:
model = Version
# In ES, we don't have license and release notes info, so instead of
# returning null, which is not necessarily true, we omit those fields
# entirely.
fields = ('id', 'compatibility', 'edit_url', 'files',
'is_strict_compatibility_enabled', 'reviewed', 'url',
'version')
class VersionSerializer(SimpleVersionSerializer):
channel = ReverseChoiceField(choices=amo.CHANNEL_CHOICES_API.items())
license = LicenseSerializer()
class Meta:
model = Version
fields = ('id', 'channel', 'compatibility', 'edit_url', 'files',
'is_strict_compatibility_enabled', 'license',
'release_notes', 'reviewed', 'url', 'version')
class CurrentVersionSerializer(SimpleVersionSerializer):
def to_representation(self, obj):
# If the add-on is a langpack, and `appversion` is passed, try to
# determine the latest public compatible version and replace the obj
# with the result. Because of the perf impact, only done for langpacks
# in the detail API.
request = self.context.get('request')
view = self.context.get('view')
addon = obj.addon
if (request and request.GET.get('appversion') and
getattr(view, 'action', None) == 'retrieve' and
addon.type == amo.ADDON_LPAPP):
obj = self.get_current_compatible_version(addon)
return super(CurrentVersionSerializer, self).to_representation(obj)
def get_current_compatible_version(self, addon):
"""
Return latest public version compatible with the app & appversion
passed through the request, or fall back to addon.current_version if
none is found.
Only use on langpacks if the appversion parameter is present.
"""
request = self.context.get('request')
try:
# AddonAppVersionQueryParam.get_values() returns (app_id, min, max)
# but we want {'min': min, 'max': max}.
value = AddonAppVersionQueryParam(request).get_values()
application = value[0]
appversions = dict(zip(('min', 'max'), value[1:]))
except ValueError as exc:
raise exceptions.ParseError(exc.message)
version_qs = Version.objects.latest_public_compatible_with(
application, appversions).filter(addon=addon)
return version_qs.first() or addon.current_version
class AddonEulaPolicySerializer(serializers.ModelSerializer):
eula = TranslationSerializerField()
privacy_policy = TranslationSerializerField()
class Meta:
model = Addon
fields = (
'eula',
'privacy_policy',
)
class AddonDeveloperSerializer(BaseUserSerializer):
picture_url = serializers.SerializerMethodField()
class Meta(BaseUserSerializer.Meta):
fields = BaseUserSerializer.Meta.fields + (
'picture_url',)
read_only_fields = fields
class AddonSerializer(serializers.ModelSerializer):
authors = AddonDeveloperSerializer(many=True, source='listed_authors')
categories = serializers.SerializerMethodField()
contributions_url = serializers.URLField(source='contributions')
current_version = CurrentVersionSerializer()
description = TranslationSerializerField()
developer_comments = TranslationSerializerField()
edit_url = serializers.SerializerMethodField()
has_eula = serializers.SerializerMethodField()
has_privacy_policy = serializers.SerializerMethodField()
homepage = TranslationSerializerField()
icon_url = serializers.SerializerMethodField()
icons = serializers.SerializerMethodField()
is_source_public = serializers.BooleanField(source='view_source')
is_featured = serializers.SerializerMethodField()
name = TranslationSerializerField()
previews = PreviewSerializer(many=True, source='current_previews')
ratings = serializers.SerializerMethodField()
ratings_url = serializers.SerializerMethodField()
review_url = serializers.SerializerMethodField()
status = ReverseChoiceField(choices=amo.STATUS_CHOICES_API.items())
summary = TranslationSerializerField()
support_email = TranslationSerializerField()
support_url = TranslationSerializerField()
tags = serializers.SerializerMethodField()
theme_data = serializers.SerializerMethodField()
type = ReverseChoiceField(choices=amo.ADDON_TYPE_CHOICES_API.items())
url = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = (
'id',
'authors',
'average_daily_users',
'categories',
'contributions_url',
'created',
'current_version',
'default_locale',
'description',
'developer_comments',
'edit_url',
'guid',
'has_eula',
'has_privacy_policy',
'homepage',
'icon_url',
'icons',
'is_disabled',
'is_experimental',
'is_featured',
'is_source_public',
'last_updated',
'name',
'previews',
'public_stats',
'ratings',
'ratings_url',
'requires_payment',
'review_url',
'slug',
'status',
'summary',
'support_email',
'support_url',
'tags',
'theme_data',
'type',
'url',
'weekly_downloads'
)
def to_representation(self, obj):
data = super(AddonSerializer, self).to_representation(obj)
request = self.context.get('request', None)
if 'theme_data' in data and data['theme_data'] is None:
data.pop('theme_data')
if ('request' in self.context and
'wrap_outgoing_links' in self.context['request'].GET):
for key in ('homepage', 'support_url', 'contributions_url'):
if key in data:
data[key] = self.outgoingify(data[key])
if obj.type == amo.ADDON_PERSONA:
if 'weekly_downloads' in data:
# weekly_downloads don't make sense for lightweight themes.
data.pop('weekly_downloads')
if ('average_daily_users' in data and
not self.is_broken_persona(obj)):
# In addition, their average_daily_users number must come from
# the popularity field of the attached Persona.
data['average_daily_users'] = obj.persona.popularity
if request and is_gate_active(request, 'del-addons-created-field'):
data.pop('created', None)
return data
def outgoingify(self, data):
if data:
if isinstance(data, basestring):
return get_outgoing_url(data)
elif isinstance(data, dict):
return {key: get_outgoing_url(value) if value else None
for key, value in data.items()}
# None or empty string... don't bother.
return data
def get_categories(self, obj):
# Return a dict of lists like obj.app_categories does, but exposing
# slugs for keys and values instead of objects.
return {
app.short: [cat.slug for cat in obj.app_categories[app]]
for app in obj.app_categories.keys()
}
def get_has_eula(self, obj):
return bool(getattr(obj, 'has_eula', obj.eula))
def get_is_featured(self, obj):
# obj._is_featured is set from ES, so will only be present for list
# requests.
if not hasattr(obj, '_is_featured'):
# Any featuring will do.
obj._is_featured = obj.is_featured(app=None, lang=None)
return obj._is_featured
def get_has_privacy_policy(self, obj):
return bool(getattr(obj, 'has_privacy_policy', obj.privacy_policy))
def get_tags(self, obj):
if not hasattr(obj, 'tag_list'):
attach_tags([obj])
# attach_tags() might not have attached anything to the addon, if it
# had no tags.
return getattr(obj, 'tag_list', [])
def get_url(self, obj):
# Use get_detail_url(), get_url_path() does an extra check on
# current_version that is annoying in subclasses which don't want to
# load that version.
return absolutify(obj.get_detail_url())
def get_edit_url(self, obj):
return absolutify(obj.get_dev_url())
def get_ratings_url(self, obj):
return absolutify(obj.ratings_url)
def get_review_url(self, obj):
return absolutify(reverse('reviewers.review', args=[obj.pk]))
def get_icon_url(self, obj):
if self.is_broken_persona(obj):
return absolutify(obj.get_default_icon_url(64))
return absolutify(obj.get_icon_url(64))
def get_icons(self, obj):
# We're using only 32 and 64 for compatibility reasons with the
# old search API. https://github.com/mozilla/addons-server/issues/7514
if self.is_broken_persona(obj):
get_icon = obj.get_default_icon_url
else:
get_icon = obj.get_icon_url
return {str(size): absolutify(get_icon(size)) for size in (32, 64)}
def get_ratings(self, obj):
return {
'average': obj.average_rating,
'bayesian_average': obj.bayesian_rating,
'count': obj.total_ratings,
'text_count': obj.text_ratings_count,
}
def get_theme_data(self, obj):
theme_data = None
if obj.type == amo.ADDON_PERSONA and not self.is_broken_persona(obj):
theme_data = obj.persona.theme_data
return theme_data
def is_broken_persona(self, obj):
"""Find out if the object is a Persona and either is missing its
Persona instance or has a broken one.
Call this everytime something in the serializer is suceptible to call
something on the Persona instance, explicitly or not, to avoid 500
errors and/or SQL queries in ESAddonSerializer."""
try:
# Setting obj.persona = None in ESAddonSerializer.fake_object()
# below sadly isn't enough, so we work around it in that method by
# creating a Persona instance with a custom '_broken'
# attribute indicating that it should not be used.
if obj.type == amo.ADDON_PERSONA and (
obj.persona is None or hasattr(obj.persona, '_broken')):
raise Persona.DoesNotExist
except Persona.DoesNotExist:
# We got a DoesNotExist exception, therefore the Persona does not
# exist or is broken.
return True
# Everything is fine, move on.
return False
class AddonSerializerWithUnlistedData(AddonSerializer):
latest_unlisted_version = SimpleVersionSerializer()
class Meta:
model = Addon
fields = AddonSerializer.Meta.fields + ('latest_unlisted_version',)
class SimpleAddonSerializer(AddonSerializer):
class Meta:
model = Addon
fields = ('id', 'slug', 'name', 'icon_url')
class ESAddonSerializer(BaseESSerializer, AddonSerializer):
# Override various fields for related objects which we don't want to expose
# data the same way than the regular serializer does (usually because we
# some of the data is not indexed in ES).
authors = BaseUserSerializer(many=True, source='listed_authors')
current_version = SimpleESVersionSerializer()
previews = ESPreviewSerializer(many=True, source='current_previews')
_score = serializers.SerializerMethodField()
datetime_fields = ('created', 'last_updated', 'modified')
translated_fields = ('name', 'description', 'developer_comments',
'homepage', 'summary', 'support_email', 'support_url')
class Meta:
model = Addon
fields = AddonSerializer.Meta.fields + ('_score', )
def fake_preview_object(self, obj, data, model_class=Preview):
# This is what ESPreviewSerializer.fake_object() would do, but we do
# it here and make that fake_object() method a no-op in order to have
# access to the right model_class to use - VersionPreview for static
# themes, Preview for the rest.
preview = model_class(id=data['id'], sizes=data.get('sizes', {}))
preview.addon = obj
preview.version = obj.current_version
preview_serializer = self.fields['previews'].child
# Attach base attributes that have the same name/format in ES and in
# the model.
preview_serializer._attach_fields(preview, data, ('modified',))
# Attach translations.
preview_serializer._attach_translations(
preview, data, preview_serializer.translated_fields)
return preview
def fake_file_object(self, obj, data):
file_ = File(
id=data['id'], created=self.handle_date(data['created']),
hash=data['hash'], filename=data['filename'],
is_webextension=data.get('is_webextension'),
is_mozilla_signed_extension=data.get(
'is_mozilla_signed_extension'),
is_restart_required=data.get('is_restart_required', False),
platform=data['platform'], size=data['size'],
status=data['status'],
strict_compatibility=data.get('strict_compatibility', False),
version=obj)
file_.webext_permissions_list = data.get('webext_permissions_list', [])
return file_
def fake_version_object(self, obj, data, channel):
if data:
version = Version(
addon=obj, id=data['id'],
reviewed=self.handle_date(data['reviewed']),
version=data['version'], channel=channel)
version.all_files = [
self.fake_file_object(version, file_data)
for file_data in data.get('files', [])
]
# In ES we store integers for the appversion info, we need to
# convert it back to strings.
compatible_apps = {}
for app_id, compat_dict in data.get('compatible_apps', {}).items():
app_name = APPS_ALL[int(app_id)]
compatible_apps[app_name] = ApplicationsVersions(
min=AppVersion(version=compat_dict.get('min_human', '')),
max=AppVersion(version=compat_dict.get('max_human', '')))
version._compatible_apps = compatible_apps
else:
version = None
return version
def fake_object(self, data):
"""Create a fake instance of Addon and related models from ES data."""
obj = Addon(id=data['id'], slug=data['slug'])
# Attach base attributes that have the same name/format in ES and in
# the model.
self._attach_fields(
obj, data, (
'average_daily_users',
'bayesian_rating',
'contributions',
'created',
'default_locale',
'guid',
'has_eula',
'has_privacy_policy',
'hotness',
'icon_hash',
'icon_type',
'is_experimental',
'last_updated',
'modified',
'public_stats',
'requires_payment',
'slug',
'status',
'type',
'view_source',
'weekly_downloads'
)
)
# Attach attributes that do not have the same name/format in ES.
obj.tag_list = data.get('tags', [])
obj.all_categories = [
CATEGORIES_BY_ID[cat_id] for cat_id in data.get('category', [])]
# Not entirely accurate, but enough in the context of the search API.
obj.disabled_by_user = data.get('is_disabled', False)
# Attach translations (they require special treatment).
self._attach_translations(obj, data, self.translated_fields)
# Attach related models (also faking them). `current_version` is a
# property we can't write to, so we use the underlying field which
# begins with an underscore. `latest_unlisted_version` is writeable
# cached_property so we can directly write to them.
obj._current_version = self.fake_version_object(
obj, data.get('current_version'), amo.RELEASE_CHANNEL_LISTED)
obj.latest_unlisted_version = self.fake_version_object(
obj, data.get('latest_unlisted_version'),
amo.RELEASE_CHANNEL_UNLISTED)
data_authors = data.get('listed_authors', [])
obj.listed_authors = [
UserProfile(
id=data_author['id'], display_name=data_author['name'],
username=data_author['username'],
is_public=data_author.get('is_public', False))
for data_author in data_authors
]
is_static_theme = data.get('type') == amo.ADDON_STATICTHEME
preview_model_class = VersionPreview if is_static_theme else Preview
obj.current_previews = [
self.fake_preview_object(
obj, preview_data, model_class=preview_model_class)
for preview_data in data.get('previews', [])
]
ratings = data.get('ratings', {})
obj.average_rating = ratings.get('average')
obj.total_ratings = ratings.get('count')
obj.text_ratings_count = ratings.get('text_count')
obj._is_featured = data.get('is_featured', False)
# Elasticsearch score for this document. Useful for debugging relevancy
# issues.
obj._score = data.get('_score', None)
if data['type'] == amo.ADDON_PERSONA:
persona_data = data.get('persona')
if persona_data:
obj.persona = Persona(
addon=obj,
accentcolor=persona_data['accentcolor'],
display_username=persona_data['author'],
header=persona_data['header'],
footer=persona_data['footer'],
# "New" Persona do not have a persona_id, it's a relic from
# old ones.
persona_id=0 if persona_data['is_new'] else 42,
textcolor=persona_data['textcolor'],
popularity=data.get('average_daily_users'),
)
else:
# Sadly, although we can set obj.persona = None, this does not
# seem to prevent the query later on. So instead, work around
# it by creating a Persona instance with a custom attribute
# indicating that it should not be used.
obj.persona = Persona()
obj.persona._broken = True
return obj
def get__score(self, obj):
return obj._es_meta['score']
def to_representation(self, obj):
data = super(ESAddonSerializer, self).to_representation(obj)
request = self.context.get('request')
if request and '_score' in data and not is_gate_active(
request, 'addons-search-_score-field'):
data.pop('_score')
return data
class ESAddonAutoCompleteSerializer(ESAddonSerializer):
class Meta(ESAddonSerializer.Meta):
fields = ('id', 'icon_url', 'name', 'type', 'url')
model = Addon
def get_url(self, obj):
# Addon.get_url_path() wants current_version to exist, but that's just
# a safeguard. We don't care and don't want to fetch the current
# version field to improve perf, so give it a fake one.
obj._current_version = Version()
return absolutify(obj.get_url_path())
class StaticCategorySerializer(serializers.Serializer):
"""Serializes a `StaticCategory` as found in constants.categories"""
id = serializers.IntegerField()
name = serializers.CharField()
slug = serializers.CharField()
application = serializers.SerializerMethodField()
misc = serializers.BooleanField()
type = serializers.SerializerMethodField()
weight = serializers.IntegerField()
description = serializers.CharField()
def get_application(self, obj):
return APPS_ALL[obj.application].short
def get_type(self, obj):
return ADDON_TYPE_CHOICES_API[obj.type]
class LanguageToolsSerializer(AddonSerializer):
target_locale = serializers.CharField()
current_compatible_version = serializers.SerializerMethodField()
class Meta:
model = Addon
fields = ('id', 'current_compatible_version', 'default_locale', 'guid',
'name', 'slug', 'target_locale', 'type', 'url', )
def get_current_compatible_version(self, obj):
compatible_versions = getattr(obj, 'compatible_versions', None)
if compatible_versions is not None:
data = MinimalVersionSerializer(
compatible_versions, many=True).data
try:
# 99% of the cases there will only be one result, since most
# language packs are automatically uploaded for a given app
# version. If there are more, pick the most recent one.
return data[0]
except IndexError:
# This should not happen, because the queryset in the view is
# supposed to filter results to only return add-ons that do
# have at least one compatible version, but let's not fail
# too loudly if the unthinkable happens...
pass
return None
def to_representation(self, obj):
data = super(LanguageToolsSerializer, self).to_representation(obj)
request = self.context['request']
if (AddonAppVersionQueryParam.query_param not in request.GET and
'current_compatible_version' in data):
data.pop('current_compatible_version')
if request and is_gate_active(
request, 'addons-locale_disambiguation-shim'):
data['locale_disambiguation'] = None
return data
class ReplacementAddonSerializer(serializers.ModelSerializer):
replacement = serializers.SerializerMethodField()
ADDON_PATH_REGEX = r"""/addon/(?P<addon_id>[^/<>"']+)/$"""
COLLECTION_PATH_REGEX = (
r"""/collections/(?P<user_id>[^/<>"']+)/(?P<coll_slug>[^/]+)/$""")
class Meta:
model = ReplacementAddon
fields = ('guid', 'replacement')
def _get_addon_guid(self, addon_id):
try:
addon = Addon.objects.public().id_or_slug(addon_id).get()
except Addon.DoesNotExist:
return []
return [addon.guid]
def _get_collection_guids(self, user_id, collection_slug):
try:
get_args = {'slug': collection_slug, 'listed': True}
if isinstance(user_id, basestring) and not user_id.isdigit():
get_args.update(**{'author__username': user_id})
else:
get_args.update(**{'author': user_id})
collection = Collection.objects.get(**get_args)
except Collection.DoesNotExist:
return []
valid_q = Addon.objects.get_queryset().valid_q([amo.STATUS_PUBLIC])
return list(
collection.addons.filter(valid_q).values_list('guid', flat=True))
def get_replacement(self, obj):
if obj.has_external_url():
# It's an external url so no guids.
return []
addon_match = re.search(self.ADDON_PATH_REGEX, obj.path)
if addon_match:
return self._get_addon_guid(addon_match.group('addon_id'))
coll_match = re.search(self.COLLECTION_PATH_REGEX, obj.path)
if coll_match:
return self._get_collection_guids(
coll_match.group('user_id'), coll_match.group('coll_slug'))
return []
class CompatOverrideSerializer(serializers.ModelSerializer):
class VersionRangeSerializer(serializers.Serializer):
class ApplicationSerializer(serializers.Serializer):
name = serializers.CharField(source='app.pretty')
id = serializers.IntegerField(source='app.id')
min_version = serializers.CharField(source='min')
max_version = serializers.CharField(source='max')
guid = serializers.CharField(source='app.guid')
addon_min_version = serializers.CharField(source='min')
addon_max_version = serializers.CharField(source='max')
applications = ApplicationSerializer(source='apps', many=True)
addon_id = serializers.IntegerField()
addon_guid = serializers.CharField(source='guid')
version_ranges = VersionRangeSerializer(
source='collapsed_ranges', many=True)
class Meta:
model = CompatOverride
fields = ('addon_id', 'addon_guid', 'name', 'version_ranges')
def get_addon_id(self, obj):
return obj.addon_id
| atiqueahmedziad/addons-server | src/olympia/addons/serializers.py | Python | bsd-3-clause | 32,770 |
from setuptools import setup, find_packages
from setuptools.command.test import test
class TestCommand(test):
def run(self):
from tests.runtests import runtests
runtests()
setup(
name='aino-utkik',
version='0.9.1',
description='Small, clean code with a lazy view dispatcher and class based views for Django.',
long_description=open('README.rst').read(),
author='Mikko Hellsing',
author_email='mikko@aino.se',
license='BSD',
url='https://github.com/aino/aino-utkik',
packages=find_packages(exclude=['tests', 'tests.*']),
zip_safe=False,
cmdclass={"test": TestCommand},
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Framework :: Django',
],
)
| aino/aino-utkik | setup.py | Python | bsd-3-clause | 1,034 |
import datetime
import logging
try:
import threading
except ImportError:
threading = None
from django.template.loader import render_to_string
from django.utils.translation import ugettext_lazy as _
from debug_toolbar.panels import DebugPanel
class ThreadTrackingHandler(logging.Handler):
def __init__(self):
if threading is None:
raise NotImplementedError("threading module is not available, \
the logging panel cannot be used without it")
logging.Handler.__init__(self)
self.records = {} # a dictionary that maps threads to log records
def emit(self, record):
self.get_records().append(record)
def get_records(self, thread=None):
"""
Returns a list of records for the provided thread, of if none is provided,
returns a list for the current thread.
"""
if thread is None:
thread = threading.currentThread()
if thread not in self.records:
self.records[thread] = []
return self.records[thread]
def clear_records(self, thread=None):
if thread is None:
thread = threading.currentThread()
if thread in self.records:
del self.records[thread]
handler = ThreadTrackingHandler()
logging.root.setLevel(logging.NOTSET)
logging.root.addHandler(handler)
class LoggingPanel(DebugPanel):
name = 'Logging'
has_content = True
def process_request(self, request):
handler.clear_records()
def get_and_delete(self):
records = handler.get_records()
handler.clear_records()
return records
def nav_title(self):
return _("Logging")
def nav_subtitle(self):
return "%s message%s" % (len(handler.get_records()), (len(handler.get_records()) == 1) and '' or 's')
def title(self):
return 'Log Messages'
def url(self):
return ''
def content(self):
records = []
for record in self.get_and_delete():
records.append({
'message': record.getMessage(),
'time': datetime.datetime.fromtimestamp(record.created),
'level': record.levelname,
'file': record.pathname,
'line': record.lineno,
})
return render_to_string('debug_toolbar/panels/logger.html', {'records': records})
| none-da/zeshare | debug_toolbar/panels/logger.py | Python | bsd-3-clause | 2,377 |
""" Django support. """
from __future__ import absolute_import
import datetime
from os import path
from types import GeneratorType
import decimal
from django import VERSION
if VERSION < (1, 8):
from django.contrib.contenttypes.generic import (
GenericForeignKey, GenericRelation)
else:
from django.contrib.contenttypes.fields import (
GenericForeignKey, GenericRelation)
from django.contrib.contenttypes.models import ContentType
from django.core.files.base import ContentFile
from django.core.validators import (
validate_ipv4_address, validate_ipv6_address)
from django.db import models
from django.conf import settings
from .. import mix_types as t, _compat as _
from ..main import (
SKIP_VALUE, TypeMixerMeta as BaseTypeMixerMeta, TypeMixer as BaseTypeMixer,
GenFactory as BaseFactory, Mixer as BaseMixer, _Deffered, partial, faker)
get_contentfile = ContentFile
MOCK_FILE = path.abspath(path.join(
path.dirname(path.dirname(__file__)), 'resources', 'file.txt'
))
MOCK_IMAGE = path.abspath(path.join(
path.dirname(path.dirname(__file__)), 'resources', 'image.jpg'
))
def get_file(filepath=MOCK_FILE, **kwargs):
""" Generate a content file.
:return ContentFile:
"""
with open(filepath, 'rb') as f:
name = path.basename(filepath)
return get_contentfile(f.read(), name)
def get_image(filepath=MOCK_IMAGE):
""" Generate a content image.
:return ContentFile:
"""
return get_file(filepath)
def get_relation(_scheme=None, _typemixer=None, **params):
""" Function description. """
if VERSION < (1, 8):
scheme = _scheme.related.parent_model
else:
scheme = _scheme.related_model
if scheme is ContentType:
choices = [m for m in models.get_models() if m is not ContentType]
return ContentType.objects.get_for_model(faker.random_element(choices))
return TypeMixer(scheme, mixer=_typemixer._TypeMixer__mixer,
factory=_typemixer._TypeMixer__factory,
fake=_typemixer._TypeMixer__fake,).blend(**params)
def get_datetime(**params):
""" Support Django TZ support. """
return faker.datetime(tzinfo=settings.USE_TZ)
class GenFactory(BaseFactory):
""" Map a django classes to simple types. """
types = {
(models.AutoField, models.PositiveIntegerField): t.PositiveInteger,
models.BigIntegerField: t.BigInteger,
models.BooleanField: bool,
(models.CharField, models.SlugField): str,
models.DateField: datetime.date,
models.DecimalField: decimal.Decimal,
models.EmailField: t.EmailString,
models.FloatField: float,
models.GenericIPAddressField: t.IPString,
models.IPAddressField: t.IP4String,
models.IntegerField: int,
models.PositiveSmallIntegerField: t.PositiveSmallInteger,
models.SmallIntegerField: t.SmallInteger,
models.TextField: t.Text,
models.TimeField: datetime.time,
models.URLField: t.URL,
}
generators = {
models.BinaryField: faker.pybytes,
models.DateTimeField: get_datetime,
models.FileField: get_file,
models.FilePathField: lambda: MOCK_FILE,
models.ForeignKey: get_relation,
models.ImageField: get_image,
models.ManyToManyField: get_relation,
models.OneToOneField: get_relation,
}
class TypeMixerMeta(BaseTypeMixerMeta):
""" Load django models from strings. """
def __new__(mcs, name, bases, params):
""" Associate Scheme with Django models.
Cache Django models.
:return mixer.backend.django.TypeMixer: A generated class.
"""
params['models_cache'] = dict()
cls = super(TypeMixerMeta, mcs).__new__(mcs, name, bases, params)
return cls
def __load_cls(cls, cls_type):
if isinstance(cls_type, _.string_types):
if '.' in cls_type:
app_label, model_name = cls_type.split(".")
return models.get_model(app_label, model_name)
else:
try:
if cls_type not in cls.models_cache:
cls.__update_cache()
return cls.models_cache[cls_type]
except KeyError:
raise ValueError('Model "%s" not found.' % cls_type)
return cls_type
def __update_cache(cls):
""" Update apps cache for Django < 1.7. """
if VERSION < (1, 7):
for app_models in models.loading.cache.app_models.values():
for name, model in app_models.items():
cls.models_cache[name] = model
else:
from django.apps import apps
for app in apps.all_models:
for name, model in apps.all_models[app].items():
cls.models_cache[name] = model
class TypeMixer(_.with_metaclass(TypeMixerMeta, BaseTypeMixer)):
""" TypeMixer for Django. """
__metaclass__ = TypeMixerMeta
factory = GenFactory
def postprocess(self, target, postprocess_values):
""" Fill postprocess_values. """
for name, deffered in postprocess_values:
if not type(deffered.scheme) is GenericForeignKey:
continue
name, value = self._get_value(name, deffered.value)
setattr(target, name, value)
if self.__mixer:
target = self.__mixer.postprocess(target)
for name, deffered in postprocess_values:
if type(deffered.scheme) is GenericForeignKey or not target.pk:
continue
name, value = self._get_value(name, deffered.value)
# # If the ManyToMany relation has an intermediary model,
# # the add and remove methods do not exist.
if not deffered.scheme.rel.through._meta.auto_created and self.__mixer: # noqa
self.__mixer.blend(
deffered.scheme.rel.through, **{
deffered.scheme.m2m_field_name(): target,
deffered.scheme.m2m_reverse_field_name(): value})
continue
if not isinstance(value, (list, tuple)):
value = [value]
setattr(target, name, value)
return target
def get_value(self, name, value):
""" Set value to generated instance.
:return : None or (name, value) for later use
"""
field = self.__fields.get(name)
if field:
if (field.scheme in self.__scheme._meta.local_many_to_many or
type(field.scheme) is GenericForeignKey):
return name, _Deffered(value, field.scheme)
return self._get_value(name, value, field)
return super(TypeMixer, self).get_value(name, value)
def _get_value(self, name, value, field=None):
if isinstance(value, GeneratorType):
return self._get_value(name, next(value), field)
if not isinstance(value, t.Mix) and value is not SKIP_VALUE:
if callable(value):
return self._get_value(name, value(), field)
if field:
value = field.scheme.to_python(value)
return name, value
def gen_select(self, field_name, select):
""" Select exists value from database.
:param field_name: Name of field for generation.
:return : None or (name, value) for later use
"""
if field_name not in self.__fields:
return field_name, None
try:
field = self.__fields[field_name]
return field.name, field.scheme.rel.to.objects.filter(**select.params).order_by('?')[0]
except Exception:
raise Exception("Cannot find a value for the field: '{0}'".format(field_name))
def gen_field(self, field):
""" Generate value by field.
:param relation: Instance of :class:`Field`
:return : None or (name, value) for later use
"""
if isinstance(field.scheme, GenericForeignKey):
return field.name, SKIP_VALUE
if field.params and not field.scheme:
raise ValueError('Invalid relation %s' % field.name)
return super(TypeMixer, self).gen_field(field)
def make_fabric(self, field, fname=None, fake=False, kwargs=None): # noqa
""" Make a fabric for field.
:param field: A mixer field
:param fname: Field name
:param fake: Force fake data
:return function:
"""
kwargs = {} if kwargs is None else kwargs
fcls = type(field)
stype = self.__factory.cls_to_simple(fcls)
if fcls is models.CommaSeparatedIntegerField:
return partial(faker.choices, range(0, 10), length=field.max_length)
if field and field.choices:
try:
choices, _ = list(zip(*field.choices))
return partial(faker.random_element, choices)
except ValueError:
pass
if stype in (str, t.Text):
fab = super(TypeMixer, self).make_fabric(
fcls, field_name=fname, fake=fake, kwargs=kwargs)
return lambda: fab()[:field.max_length]
if stype is decimal.Decimal:
kwargs['left_digits'] = field.max_digits - field.decimal_places
kwargs['right_digits'] = field.decimal_places
elif stype is t.IPString:
# Hack for support Django 1.4/1.5
protocol = getattr(field, 'protocol', None)
if not protocol:
validator = field.default_validators[0]
protocol = 'both'
if validator is validate_ipv4_address:
protocol = 'ipv4'
elif validator is validate_ipv6_address:
protocol = 'ipv6'
# protocol matching is case insensitive
# default address is either IPv4 or IPv6
kwargs['protocol'] = protocol.lower()
elif isinstance(field, models.fields.related.RelatedField):
kwargs.update({'_typemixer': self, '_scheme': field})
return super(TypeMixer, self).make_fabric(
fcls, field_name=fname, fake=fake, kwargs=kwargs)
@staticmethod
def is_unique(field):
""" Return True is field's value should be a unique.
:return bool:
"""
if VERSION < (1, 7) and isinstance(field.scheme, models.OneToOneField):
return True
return field.scheme.unique
@staticmethod
def is_required(field):
""" Return True is field's value should be defined.
:return bool:
"""
if field.params:
return True
if field.scheme.has_default() or field.scheme.null and field.scheme.blank:
return False
if field.scheme.auto_created:
return False
if isinstance(field.scheme, models.ManyToManyField):
return False
if isinstance(field.scheme, GenericRelation):
return False
return True
def guard(self, *args, **kwargs):
""" Look objects in database.
:returns: A finded object or False
"""
qs = self.__scheme.objects.filter(*args, **kwargs)
count = qs.count()
if count == 1:
return qs.get()
if count:
return list(qs)
return False
def reload(self, obj):
""" Reload object from database. """
if not obj.pk:
raise ValueError("Cannot load the object: %s" % obj)
return self.__scheme._default_manager.get(pk=obj.pk)
def __load_fields(self):
for field in self.__scheme._meta.virtual_fields:
yield field.name, t.Field(field, field.name)
for field in self.__scheme._meta.fields:
if isinstance(field, models.AutoField)\
and self.__mixer and self.__mixer.params.get('commit'):
continue
yield field.name, t.Field(field, field.name)
for field in self.__scheme._meta.local_many_to_many:
yield field.name, t.Field(field, field.name)
class Mixer(BaseMixer):
""" Integration with Django. """
type_mixer_cls = TypeMixer
def __init__(self, commit=True, **params):
"""Initialize Mixer instance.
:param commit: (True) Save object to database.
"""
super(Mixer, self).__init__(**params)
self.params['commit'] = commit
def postprocess(self, target):
""" Save objects in db.
:return value: A generated value
"""
if self.params.get('commit'):
target.save()
return target
# Default mixer
mixer = Mixer()
# pylama:ignore=E1120
| mechaxl/mixer | mixer/backend/django.py | Python | bsd-3-clause | 12,824 |
import json
import os
import re
from django import http
from django.conf import settings
from django.db.transaction import non_atomic_requests
from django.http import HttpResponse, HttpResponseBadRequest
from django.shortcuts import render
from django.utils.encoding import iri_to_uri
from django.views.decorators.cache import never_cache
from django.views.decorators.csrf import csrf_exempt
from django.views.decorators.http import require_POST
import commonware.log
import waffle
from django_statsd.clients import statsd
from olympia import amo, api
from olympia.amo.utils import log_cef
from . import monitors
log = commonware.log.getLogger('z.amo')
monitor_log = commonware.log.getLogger('z.monitor')
jp_log = commonware.log.getLogger('z.jp.repack')
flash_re = re.compile(r'^(Win|(PPC|Intel) Mac OS X|Linux.+i\d86)|SunOs',
re.IGNORECASE)
quicktime_re = re.compile(
r'^(application/(sdp|x-(mpeg|rtsp|sdp))|audio/(3gpp(2)?|AMR|aiff|basic|'
r'mid(i)?|mp4|mpeg|vnd\.qcelp|wav|x-(aiff|m4(a|b|p)|midi|mpeg|wav))|'
r'image/(pict|png|tiff|x-(macpaint|pict|png|quicktime|sgi|targa|tiff))|'
r'video/(3gpp(2)?|flc|mp4|mpeg|quicktime|sd-video|x-mpeg))$')
java_re = re.compile(
r'^application/x-java-((applet|bean)(;jpi-version=1\.5|;'
r'version=(1\.(1(\.[1-3])?|(2|4)(\.[1-2])?|3(\.1)?|5)))?|vm)$')
wmp_re = re.compile(
r'^(application/(asx|x-(mplayer2|ms-wmp))|video/x-ms-(asf(-plugin)?|'
r'wm(p|v|x)?|wvx)|audio/x-ms-w(ax|ma))$')
@never_cache
@non_atomic_requests
def monitor(request, format=None):
# For each check, a boolean pass/fail status to show in the template
status_summary = {}
results = {}
checks = ['memcache', 'libraries', 'elastic', 'path',
'redis']
for check in checks:
with statsd.timer('monitor.%s' % check) as timer:
status, result = getattr(monitors, check)()
# state is a string. If it is empty, that means everything is fine.
status_summary[check] = {'state': not status,
'status': status}
results['%s_results' % check] = result
results['%s_timer' % check] = timer.ms
# If anything broke, send HTTP 500.
status_code = 200 if all(a['state']
for a in status_summary.values()) else 500
if format == '.json':
return http.HttpResponse(json.dumps(status_summary),
status=status_code)
ctx = {}
ctx.update(results)
ctx['status_summary'] = status_summary
return render(request, 'services/monitor.html', ctx, status=status_code)
@non_atomic_requests
def robots(request):
"""Generate a robots.txt"""
_service = (request.META['SERVER_NAME'] == settings.SERVICES_DOMAIN)
if _service or not settings.ENGAGE_ROBOTS:
template = "User-agent: *\nDisallow: /"
else:
template = render(request, 'amo/robots.html', {'apps': amo.APP_USAGE})
return HttpResponse(template, content_type="text/plain")
@non_atomic_requests
def contribute(request):
path = os.path.join(settings.ROOT, 'contribute.json')
return HttpResponse(open(path, 'rb'), content_type='application/json')
@non_atomic_requests
def handler403(request):
if request.path_info.startswith('/api/'):
# Pass over to handler403 view in api if api was targeted.
return api.views.handler403(request)
else:
return render(request, 'amo/403.html', status=403)
@non_atomic_requests
def handler404(request):
if request.path_info.startswith('/api/'):
# Pass over to handler404 view in api if api was targeted.
return api.views.handler404(request)
else:
return render(request, 'amo/404.html', status=404)
@non_atomic_requests
def handler500(request):
if request.path_info.startswith('/api/'):
# Pass over to handler500 view in api if api was targeted.
return api.views.handler500(request)
else:
return render(request, 'amo/500.html', status=500)
@non_atomic_requests
def csrf_failure(request, reason=''):
return render(request, 'amo/403.html',
{'because_csrf': 'CSRF' in reason}, status=403)
@non_atomic_requests
def loaded(request):
return http.HttpResponse('%s' % request.META['wsgi.loaded'],
content_type='text/plain')
@csrf_exempt
@require_POST
@non_atomic_requests
def cspreport(request):
"""Accept CSP reports and log them."""
report = ('blocked-uri', 'violated-directive', 'original-policy')
if not waffle.sample_is_active('csp-store-reports'):
return HttpResponse()
try:
v = json.loads(request.body)['csp-report']
# If possible, alter the PATH_INFO to contain the request of the page
# the error occurred on, spec: http://mzl.la/P82R5y
meta = request.META.copy()
meta['PATH_INFO'] = v.get('document-uri', meta['PATH_INFO'])
v = [(k, v[k]) for k in report if k in v]
log_cef('CSPViolation', 5, meta, username=request.user,
signature='CSPREPORT',
msg='A client reported a CSP violation',
cs6=v, cs6Label='ContentPolicy')
except (KeyError, ValueError), e:
log.debug('Exception in CSP report: %s' % e, exc_info=True)
return HttpResponseBadRequest()
return HttpResponse()
@non_atomic_requests
def version(request):
path = os.path.join(settings.ROOT, 'version.json')
return HttpResponse(open(path, 'rb'), content_type='application/json')
@non_atomic_requests
def plugin_check_redirect(request):
return http.HttpResponseRedirect('%s?%s' % (
settings.PFS_URL, iri_to_uri(request.META.get('QUERY_STRING', ''))))
| jpetto/olympia | src/olympia/amo/views.py | Python | bsd-3-clause | 5,726 |
from django.conf.urls.defaults import *
# Uncomment the next two lines to enable the admin:
# from django.contrib import admin
# admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^pyconpads/', include('pyconpads.foo.urls')),
# Uncomment the admin/doc line below and add 'django.contrib.admindocs'
# to INSTALLED_APPS to enable admin documentation:
# (r'^admin/doc/', include('django.contrib.admindocs.urls')),
(r'^list/', include('pyconpads.pads.urls')),
# Uncomment the next line to enable the admin:
# (r'^admin/', include(admin.site.urls)),
)
| SeanOC/pyconpads | pyconpads/urls.py | Python | bsd-3-clause | 597 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import re
from cpt.packager import ConanMultiPackager
from cpt.ci_manager import CIManager
from cpt.printer import Printer
class BuilderSettings(object):
@property
def username(self):
""" Set catchorg as package's owner
"""
return os.getenv("CONAN_USERNAME", "catchorg")
@property
def login_username(self):
""" Set Bintray login username
"""
return os.getenv("CONAN_LOGIN_USERNAME", "horenmar")
@property
def upload(self):
""" Set Catch2 repository to be used on upload.
The upload server address could be customized by env var
CONAN_UPLOAD. If not defined, the method will check the branch name.
Only master or CONAN_STABLE_BRANCH_PATTERN will be accepted.
The master branch will be pushed to testing channel, because it does
not match the stable pattern. Otherwise it will upload to stable
channel.
"""
return os.getenv("CONAN_UPLOAD", "https://api.bintray.com/conan/catchorg/catch2")
@property
def upload_only_when_stable(self):
""" Force to upload when running over tag branch
"""
return os.getenv("CONAN_UPLOAD_ONLY_WHEN_STABLE", "True").lower() in ["true", "1", "yes"]
@property
def stable_branch_pattern(self):
""" Only upload the package the branch name is like a tag
"""
return os.getenv("CONAN_STABLE_BRANCH_PATTERN", r"v\d+\.\d+\.\d+")
@property
def reference(self):
""" Read project version from branch create Conan reference
"""
return os.getenv("CONAN_REFERENCE", "Catch2/{}".format(self._version))
@property
def channel(self):
""" Default Conan package channel when not stable
"""
return os.getenv("CONAN_CHANNEL", "testing")
@property
def _version(self):
""" Get version name from cmake file
"""
pattern = re.compile(r"project\(Catch2 LANGUAGES CXX VERSION (\d+\.\d+\.\d+)\)")
version = "latest"
with open("CMakeLists.txt") as file:
for line in file:
result = pattern.search(line)
if result:
version = result.group(1)
return version
@property
def _branch(self):
""" Get branch name from CI manager
"""
printer = Printer(None)
ci_manager = CIManager(printer)
return ci_manager.get_branch()
if __name__ == "__main__":
settings = BuilderSettings()
builder = ConanMultiPackager(
reference=settings.reference,
channel=settings.channel,
upload=settings.upload,
upload_only_when_stable=settings.upload_only_when_stable,
stable_branch_pattern=settings.stable_branch_pattern,
login_username=settings.login_username,
username=settings.username,
test_folder=os.path.join(".conan", "test_package"))
builder.add()
builder.run()
| ibc/MediaSoup | worker/deps/catch/.conan/build.py | Python | isc | 3,044 |
#!/usr/bin/env python
"""
"""
from __future__ import unicode_literals
from django.conf.urls import patterns, url
from views import *
urlpatterns = patterns('',
url(r'^index/$', FabricIndex.as_view(), name='fabric_index'),
# Users
url(r'^access/$', FabricAccessList.as_view(), name='fabric_access'),
url(r'^access/create/$', FabricAccessCreate.as_view(), name='fabric_access_create'),
url(r'^access/(?P<pk>\d+)/$', FabricAccessDetail.as_view(), name='fabric_access_details'),
url(r'^access/delete/(?P<pk>\d+)/$', FabricAccessDelete.as_view(), name='fabric_access_delete'),
url(r'^access/update/(?P<pk>\d+)/$', FabricAccessUpdate.as_view(), name='fabric_access_update'),
url(r'^history/$', ExecutionHistoryList.as_view(), name='fabric_exe_history'),
url(r'^history/(?P<pk>\d+)/$', ExecutionHistoryDetail.as_view(), name='fabric_exe_history_details'),
#url(r'^cmd/$', ExecutionHistoryList.as_view(), name='command'),
#url(r'^cmd/(?P<pk>\d+)/$', ExecutionHistoryDetail.as_view(), name='command_details'),
) | rangertaha/salt-manager | salt-manager/webapp/apps/fabric/fabhistory/urls.py | Python | mit | 1,051 |
# Copyright (c) 1998-2002 John Aycock
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
# CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
# SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
__version__ = 'SPARK-0.7 (pre-alpha-7) uncompyle trim'
def _namelist(instance):
namelist, namedict, classlist = [], {}, [instance.__class__]
for c in classlist:
for b in c.__bases__:
classlist.append(b)
for name in c.__dict__.keys():
if not namedict.has_key(name):
namelist.append(name)
namedict[name] = 1
return namelist
#
# Extracted from GenericParser and made global so that [un]picking works.
#
class _State:
def __init__(self, stateno, items):
self.T, self.complete, self.items = [], [], items
self.stateno = stateno
class GenericParser:
#
# An Earley parser, as per J. Earley, "An Efficient Context-Free
# Parsing Algorithm", CACM 13(2), pp. 94-102. Also J. C. Earley,
# "An Efficient Context-Free Parsing Algorithm", Ph.D. thesis,
# Carnegie-Mellon University, August 1968. New formulation of
# the parser according to J. Aycock, "Practical Earley Parsing
# and the SPARK Toolkit", Ph.D. thesis, University of Victoria,
# 2001, and J. Aycock and R. N. Horspool, "Practical Earley
# Parsing", unpublished paper, 2001.
#
def __init__(self, start):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
self.augment(start)
self.ruleschanged = 1
_NULLABLE = '\e_'
_START = 'START'
_BOF = '|-'
#
# When pickling, take the time to generate the full state machine;
# some information is then extraneous, too. Unfortunately we
# can't save the rule2func map.
#
def __getstate__(self):
if self.ruleschanged:
#
# XXX - duplicated from parse()
#
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = 0
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
#
# XXX - should find a better way to do this..
#
changes = 1
while changes:
changes = 0
for k, v in self.edges.items():
if v is None:
state, sym = k
if self.states.has_key(state):
self.goto(state, sym)
changes = 1
rv = self.__dict__.copy()
for s in self.states.values():
del s.items
del rv['rule2func']
del rv['nullable']
del rv['cores']
return rv
def __setstate__(self, D):
self.rules = {}
self.rule2func = {}
self.rule2name = {}
self.collectRules()
start = D['rules'][self._START][0][1][1] # Blech.
self.augment(start)
D['rule2func'] = self.rule2func
D['makeSet'] = self.makeSet_fast
self.__dict__ = D
#
# A hook for GenericASTBuilder and GenericASTMatcher. Mess
# thee not with this; nor shall thee toucheth the _preprocess
# argument to addRule.
#
def preprocess(self, rule, func): return rule, func
def addRule(self, doc, func, _preprocess=1):
fn = func
rules = doc.split()
index = []
for i in xrange(len(rules)):
if rules[i] == '::=':
index.append(i-1)
index.append(len(rules))
for i in xrange(len(index)-1):
lhs = rules[index[i]]
rhs = rules[index[i]+2:index[i+1]]
rule = (lhs, tuple(rhs))
if _preprocess:
rule, fn = self.preprocess(rule, func)
if self.rules.has_key(lhs):
self.rules[lhs].append(rule)
else:
self.rules[lhs] = [ rule ]
self.rule2func[rule] = fn
self.rule2name[rule] = func.__name__[2:]
self.ruleschanged = 1
def collectRules(self):
for name in _namelist(self):
if name[:2] == 'p_':
func = getattr(self, name)
doc = func.__doc__
self.addRule(doc, func)
def augment(self, start):
rule = '%s ::= %s %s' % (self._START, self._BOF, start)
self.addRule(rule, lambda args: args[1], 0)
def computeNull(self):
self.nullable = {}
tbd = []
for rulelist in self.rules.values():
lhs = rulelist[0][0]
self.nullable[lhs] = 0
for rule in rulelist:
rhs = rule[1]
if len(rhs) == 0:
self.nullable[lhs] = 1
continue
#
# We only need to consider rules which
# consist entirely of nonterminal symbols.
# This should be a savings on typical
# grammars.
#
for sym in rhs:
if not self.rules.has_key(sym):
break
else:
tbd.append(rule)
changes = 1
while changes:
changes = 0
for lhs, rhs in tbd:
if self.nullable[lhs]:
continue
for sym in rhs:
if not self.nullable[sym]:
break
else:
self.nullable[lhs] = 1
changes = 1
def makeState0(self):
s0 = _State(0, [])
for rule in self.newrules[self._START]:
s0.items.append((rule, 0))
return s0
def finalState(self, tokens):
#
# Yuck.
#
if len(self.newrules[self._START]) == 2 and len(tokens) == 0:
return 1
start = self.rules[self._START][0][1][1]
return self.goto(1, start)
def makeNewRules(self):
worklist = []
for rulelist in self.rules.values():
for rule in rulelist:
worklist.append((rule, 0, 1, rule))
for rule, i, candidate, oldrule in worklist:
lhs, rhs = rule
n = len(rhs)
while i < n:
sym = rhs[i]
if not self.rules.has_key(sym) or \
not self.nullable[sym]:
candidate = 0
i = i + 1
continue
newrhs = list(rhs)
newrhs[i] = self._NULLABLE+sym
newrule = (lhs, tuple(newrhs))
worklist.append((newrule, i+1,
candidate, oldrule))
candidate = 0
i = i + 1
else:
if candidate:
lhs = self._NULLABLE+lhs
rule = (lhs, rhs)
if self.newrules.has_key(lhs):
self.newrules[lhs].append(rule)
else:
self.newrules[lhs] = [ rule ]
self.new2old[rule] = oldrule
def typestring(self, token):
return None
def error(self, token):
print "Syntax error at or near `%s' token" % token
raise SystemExit
def parse(self, tokens):
sets = [ [(1,0), (2,0)] ]
self.links = {}
if self.ruleschanged:
self.computeNull()
self.newrules = {}
self.new2old = {}
self.makeNewRules()
self.ruleschanged = 0
self.edges, self.cores = {}, {}
self.states = { 0: self.makeState0() }
self.makeState(0, self._BOF)
for i in xrange(len(tokens)):
sets.append([])
if sets[i] == []:
break
self.makeSet(tokens[i], sets, i)
else:
sets.append([])
self.makeSet(None, sets, len(tokens))
finalitem = (self.finalState(tokens), 0)
if finalitem not in sets[-2]:
if len(tokens) > 0:
self.error(tokens[i-1])
else:
self.error(None)
return self.buildTree(self._START, finalitem,
tokens, len(sets)-2)
def isnullable(self, sym):
#
# For symbols in G_e only. If we weren't supporting 1.5,
# could just use sym.startswith().
#
return self._NULLABLE == sym[0:len(self._NULLABLE)]
def skip(self, (lhs, rhs), pos=0):
n = len(rhs)
while pos < n:
if not self.isnullable(rhs[pos]):
break
pos = pos + 1
return pos
def makeState(self, state, sym):
assert sym is not None
#
# Compute \epsilon-kernel state's core and see if
# it exists already.
#
kitems = []
for rule, pos in self.states[state].items:
lhs, rhs = rule
if rhs[pos:pos+1] == (sym,):
kitems.append((rule, self.skip(rule, pos+1)))
tcore = tuple(sorted(kitems))
if self.cores.has_key(tcore):
return self.cores[tcore]
#
# Nope, doesn't exist. Compute it and the associated
# \epsilon-nonkernel state together; we'll need it right away.
#
k = self.cores[tcore] = len(self.states)
K, NK = _State(k, kitems), _State(k+1, [])
self.states[k] = K
predicted = {}
edges = self.edges
rules = self.newrules
for X in K, NK:
worklist = X.items
for item in worklist:
rule, pos = item
lhs, rhs = rule
if pos == len(rhs):
X.complete.append(rule)
continue
nextSym = rhs[pos]
key = (X.stateno, nextSym)
if not rules.has_key(nextSym):
if not edges.has_key(key):
edges[key] = None
X.T.append(nextSym)
else:
edges[key] = None
if not predicted.has_key(nextSym):
predicted[nextSym] = 1
for prule in rules[nextSym]:
ppos = self.skip(prule)
new = (prule, ppos)
NK.items.append(new)
#
# Problem: we know K needs generating, but we
# don't yet know about NK. Can't commit anything
# regarding NK to self.edges until we're sure. Should
# we delay committing on both K and NK to avoid this
# hacky code? This creates other problems..
#
if X is K:
edges = {}
if NK.items == []:
return k
#
# Check for \epsilon-nonkernel's core. Unfortunately we
# need to know the entire set of predicted nonterminals
# to do this without accidentally duplicating states.
#
tcore = tuple(sorted(predicted.keys()))
if self.cores.has_key(tcore):
self.edges[(k, None)] = self.cores[tcore]
return k
nk = self.cores[tcore] = self.edges[(k, None)] = NK.stateno
self.edges.update(edges)
self.states[nk] = NK
return k
def goto(self, state, sym):
key = (state, sym)
if not self.edges.has_key(key):
#
# No transitions from state on sym.
#
return None
rv = self.edges[key]
if rv is None:
#
# Target state isn't generated yet. Remedy this.
#
rv = self.makeState(state, sym)
self.edges[key] = rv
return rv
def gotoT(self, state, t):
return [self.goto(state, t)]
def gotoST(self, state, st):
rv = []
for t in self.states[state].T:
if st == t:
rv.append(self.goto(state, t))
return rv
def add(self, set, item, i=None, predecessor=None, causal=None):
if predecessor is None:
if item not in set:
set.append(item)
else:
key = (item, i)
if item not in set:
self.links[key] = []
set.append(item)
self.links[key].append((predecessor, causal))
def makeSet(self, token, sets, i):
cur, next = sets[i], sets[i+1]
ttype = token is not None and self.typestring(token) or None
if ttype is not None:
fn, arg = self.gotoT, ttype
else:
fn, arg = self.gotoST, token
for item in cur:
ptr = (item, i)
state, parent = item
add = fn(state, arg)
for k in add:
if k is not None:
self.add(next, (k, parent), i+1, ptr)
nk = self.goto(k, None)
if nk is not None:
self.add(next, (nk, i+1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
k = self.goto(pstate, lhs)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
self.add(cur, (k, pparent),
i, pptr, why)
nk = self.goto(k, None)
if nk is not None:
self.add(cur, (nk, i))
def makeSet_fast(self, token, sets, i):
#
# Call *only* when the entire state machine has been built!
# It relies on self.edges being filled in completely, and
# then duplicates and inlines code to boost speed at the
# cost of extreme ugliness.
#
cur, next = sets[i], sets[i+1]
ttype = token is not None and self.typestring(token) or None
for item in cur:
ptr = (item, i)
state, parent = item
if ttype is not None:
k = self.edges.get((state, ttype), None)
if k is not None:
#self.add(next, (k, parent), i+1, ptr)
#INLINED --v
new = (k, parent)
key = (new, i+1)
if new not in next:
self.links[key] = []
next.append(new)
self.links[key].append((ptr, None))
#INLINED --^
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
#self.add(next, (nk, i+1))
#INLINED --v
new = (nk, i+1)
if new not in next:
next.append(new)
#INLINED --^
else:
add = self.gotoST(state, token)
for k in add:
if k is not None:
self.add(next, (k, parent), i+1, ptr)
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
self.add(next, (nk, i+1))
if parent == i:
continue
for rule in self.states[state].complete:
lhs, rhs = rule
for pitem in sets[parent]:
pstate, pparent = pitem
#k = self.goto(pstate, lhs)
k = self.edges.get((pstate, lhs), None)
if k is not None:
why = (item, i, rule)
pptr = (pitem, parent)
#self.add(cur, (k, pparent),
# i, pptr, why)
#INLINED --v
new = (k, pparent)
key = (new, i)
if new not in cur:
self.links[key] = []
cur.append(new)
self.links[key].append((pptr, why))
#INLINED --^
#nk = self.goto(k, None)
nk = self.edges.get((k, None), None)
if nk is not None:
#self.add(cur, (nk, i))
#INLINED --v
new = (nk, i)
if new not in cur:
cur.append(new)
#INLINED --^
def predecessor(self, key, causal):
for p, c in self.links[key]:
if c == causal:
return p
assert 0
def causal(self, key):
links = self.links[key]
if len(links) == 1:
return links[0][1]
choices = []
rule2cause = {}
for p, c in links:
rule = c[2]
choices.append(rule)
rule2cause[rule] = c
return rule2cause[self.ambiguity(choices)]
def deriveEpsilon(self, nt):
if len(self.newrules[nt]) > 1:
rule = self.ambiguity(self.newrules[nt])
else:
rule = self.newrules[nt][0]
#print rule
rhs = rule[1]
attr = [None] * len(rhs)
for i in xrange(len(rhs)-1, -1, -1):
attr[i] = self.deriveEpsilon(rhs[i])
return self.rule2func[self.new2old[rule]](attr)
def buildTree(self, nt, item, tokens, k):
state, parent = item
choices = []
for rule in self.states[state].complete:
if rule[0] == nt:
choices.append(rule)
rule = choices[0]
if len(choices) > 1:
rule = self.ambiguity(choices)
#print rule
rhs = rule[1]
attr = [None] * len(rhs)
for i in xrange(len(rhs)-1, -1, -1):
sym = rhs[i]
if not self.newrules.has_key(sym):
if sym != self._BOF:
attr[i] = tokens[k-1]
key = (item, k)
item, k = self.predecessor(key, None)
#elif self.isnullable(sym):
elif self._NULLABLE == sym[0:len(self._NULLABLE)]:
attr[i] = self.deriveEpsilon(sym)
else:
key = (item, k)
why = self.causal(key)
attr[i] = self.buildTree(sym, why[0],
tokens, why[1])
item, k = self.predecessor(key, why)
return self.rule2func[self.new2old[rule]](attr)
def ambiguity(self, rules):
#
# XXX - problem here and in collectRules() if the same rule
# appears in >1 method. Also undefined results if rules
# causing the ambiguity appear in the same method.
#
sortlist = []
name2index = {}
for i in xrange(len(rules)):
lhs, rhs = rule = rules[i]
name = self.rule2name[self.new2old[rule]]
sortlist.append((len(rhs), name))
name2index[name] = i
sortlist.sort()
list = map(lambda (a,b): b, sortlist)
return rules[name2index[self.resolve(list)]]
def resolve(self, list):
#
# Resolve ambiguity in favor of the shortest RHS.
# Since we walk the tree from the top down, this
# should effectively resolve in favor of a "shift".
#
return list[0]
#
# GenericASTBuilder automagically constructs a concrete/abstract syntax tree
# for a given input. The extra argument is a class (not an instance!)
# which supports the "__setslice__" and "__len__" methods.
#
# XXX - silently overrides any user code in methods.
#
class GenericASTBuilder(GenericParser):
def __init__(self, AST, start):
GenericParser.__init__(self, start)
self.AST = AST
def preprocess(self, rule, func):
rebind = lambda lhs, self=self: \
lambda args, lhs=lhs, self=self: \
self.buildASTNode(args, lhs)
lhs, rhs = rule
return rule, rebind(lhs)
def buildASTNode(self, args, lhs):
children = []
for arg in args:
if isinstance(arg, self.AST):
children.append(arg)
else:
children.append(arg)
return self.nonterminal(lhs, children)
def nonterminal(self, type, args):
rv = self.AST(type)
rv[:len(args)] = args
return rv
#
# GenericASTTraversal is a Visitor pattern according to Design Patterns. For
# each node it attempts to invoke the method n_<node type>, falling
# back onto the default() method if the n_* can't be found. The preorder
# traversal also looks for an exit hook named n_<node type>_exit (no default
# routine is called if it's not found). To prematurely halt traversal
# of a subtree, call the prune() method -- this only makes sense for a
# preorder traversal. Node type is determined via the typestring() method.
#
class GenericASTTraversalPruningException:
pass
class GenericASTTraversal:
def __init__(self, ast):
self.ast = ast
def typestring(self, node):
return node.type
def prune(self):
raise GenericASTTraversalPruningException
def preorder(self, node=None):
if node is None:
node = self.ast
try:
name = 'n_' + self.typestring(node)
if hasattr(self, name):
func = getattr(self, name)
func(node)
else:
self.default(node)
except GenericASTTraversalPruningException:
return
for kid in node:
self.preorder(kid)
name = name + '_exit'
if hasattr(self, name):
func = getattr(self, name)
func(node)
def default(self, node):
pass
| Katharsis/unfrozen_binary | uncompyle2/spark.py | Python | mit | 23,261 |
import warnings
from xigt.consts import (
ID,
TYPE,
ALIGNMENT,
CONTENT,
SEGMENTATION
)
from xigt.errors import (
XigtError,
XigtStructureError
)
from xigt.ref import id_re
# list.clear() doesn't exist in Python2, but del list[:] has other problems
try:
[].clear
except AttributeError:
def listclear(x): del x[:]
else:
def listclear(x): list.clear(x)
def _has_parent(obj):
return hasattr(obj, '_parent') and obj._parent is not None
class XigtContainerMixin(list):
"""
Common methods for accessing subelements in XigtCorpus, Igt, and
Tier objects.
"""
def __init__(self, container=None, contained_type=None):
self._dict = {}
self._contained_type = contained_type
self._container = container if container is not None else self
def __eq__(self, other):
try:
return (
# quick check for comparing, e.g., XigtCorpus and Igt
self._contained_type == other._contained_type
and len(self) == len(other)
and all(a == b for a, b in zip(self, other))
)
except AttributeError:
return False
def __getitem__(self, obj_id):
if isinstance(obj_id, (int, slice)):
return list.__getitem__(self, obj_id)
elif obj_id in self._dict:
return self._dict[obj_id]
else:
try:
return list.__getitem__(self, int(obj_id))
except ValueError:
pass
raise KeyError(obj_id)
def __setitem__(self, idx, obj):
# only allow list indices, not dict keys (IDs)
# NOTE: this method is destructive. check for broken refs here?
self._assert_type(obj)
try:
cur_obj = list.__getitem__(self, idx)
except TypeError:
idx = int(idx)
cur_obj = list.__getitem__(self, idx)
if cur_obj.id is not None:
del self._dict[cur_obj.id]
self._create_id_mapping(obj)
list.__setitem__(self, idx, obj)
def __delitem__(self, obj_id):
# NOTE: this method is destructive. check for broken refs here?
obj = self[obj_id]
self.remove(obj)
def get(self, obj_id, default=None):
try:
return self[obj_id]
except (KeyError, IndexError):
pass
return default
def select(self, **kwargs):
# handle namespace separately so we can lookup the nsmap
if 'namespace' in kwargs and kwargs['namespace'] in self.nsmap:
kwargs['namespace'] = self.nsmap[kwargs['namespace']]
def match(x):
return all(getattr(x, k, None) == v for k, v in kwargs.items())
return filter(match, self)
def _assert_type(self, obj):
if self._contained_type and not isinstance(obj, self._contained_type):
raise XigtStructureError(
'Only {} objects are allowed in this container.'
.format(self._contained_type.__name__)
)
def append(self, obj):
self._assert_type(obj)
obj._parent = self._container
self._create_id_mapping(obj)
list.append(self, obj)
def insert(self, i, obj):
self._assert_type(obj)
obj._parent = self._container
self._create_id_mapping(obj)
list.insert(self, i, obj)
def extend(self, objs):
for obj in objs:
self.append(obj)
def remove(self, obj):
# NOTE: this method is destructive. check for broken refs here?
if obj.id is not None:
del self._dict[obj.id]
list.remove(self, obj)
def clear(self):
self._dict.clear()
# list.clear doesn't exist in Python2
# list.clear(self)
listclear(self)
def _create_id_mapping(self, obj):
if obj.id is not None:
if obj.id in self._dict:
raise XigtError(
'Id "{}" already exists in collection.'.format(obj.id),
)
self._dict[obj.id] = obj
def refresh_index(self):
self._dict = {}
for obj in self:
self._create_id_mapping(obj)
# deprecated methods
def add(self, obj):
warnings.warn(
'add(x) is deprecated; use append(x) instead.',
DeprecationWarning
)
return self.append(obj)
def add_list(self, objs):
warnings.warn(
'add_list(xs) is deprecated; use extend(xs) instead.',
DeprecationWarning
)
return self.extend(objs)
class XigtAttributeMixin(object):
def __init__(self, id=None, type=None, attributes=None,
namespace=None, nsmap=None):
self.id = id
self.type = type
self.attributes = dict(attributes or [])
self.namespace = namespace
self.nsmap = nsmap
# if id is not None or ID not in self.attributes:
# self.attributes[ID] = id
# if type is not None or TYPE not in self.attributes:
# self.attributes[TYPE] = type
def __eq__(self, other):
try:
return (
self.id == other.id
and self.type == other.type
and self.attributes == other.attributes
and self.namespace == other.namespace
# and self.nsmap == other.nsmap
)
except AttributeError:
return False
def get_attribute(self, key, default=None, inherit=False, namespace=None):
if key is None:
raise ValueError(
'Attribute key must be of type str, not '
+ key.__class__.__name__
)
if not key.startswith('{') and ':' in key:
prefix, suffix = key.split(':', 1)
key = '{%s}%s' % (self.nsmap[prefix], suffix)
elif namespace in self.nsmap:
key = '{%s}%s' % (self.nsmap[namespace], key)
elif namespace:
key = '{%s}%s' % (namespace, key)
try:
return self.attributes[key]
except KeyError:
if inherit and _has_parent(self):
return self._parent.get_attribute(
key, default, inherit, namespace=namespace
)
else:
return default
@property
def id(self):
return self._id
@id.setter
def id(self, value):
if value is not None and not id_re.match(value):
raise ValueError('Invalid ID: {}'.format(value))
self._id = value
@property
def nsmap(self):
if self._nsmap is None:
if _has_parent(self):
return self._parent.nsmap
else:
return {}
else:
return self._nsmap
@nsmap.setter
def nsmap(self, value):
if value is not None:
value = dict(value or [])
self._nsmap = value
# no validation for type yet, so the property isn't necessary
# @property
# def type(self):
# return self._type
# @type.setter
# def type(self, value):
# self._type = value
class XigtReferenceAttributeMixin(object):
def __init__(self, alignment=None, content=None, segmentation=None):
if segmentation and (content or alignment):
raise XigtError(
'The "segmentation" reference attribute cannot co-occur with '
'the "content" or "alignment" reference attributes.'
)
if alignment is not None:
self.attributes[ALIGNMENT] = alignment
if content is not None:
self.attributes[CONTENT] = content
if segmentation is not None:
self.attributes[SEGMENTATION] = segmentation
def referents(self, refattrs=None):
if not getattr(self, 'igt'):
raise XigtError('Cannot retrieve referents; unspecified IGT.')
if not getattr(self, 'id'):
raise XigtError('Cannot retrieve referents; unspecified id.')
return self.igt.referents(self.id, refattrs=refattrs)
def referrers(self, refattrs=None):
if not getattr(self, 'igt'):
raise XigtError('Cannot retrieve referrers; unspecified IGT.')
if not getattr(self, 'id'):
raise XigtError('Cannot retrieve referrers; unspecified id.')
return self.igt.referrers(self.id, refattrs=refattrs)
@property
def alignment(self):
return self.attributes.get(ALIGNMENT)
@alignment.setter
def alignment(self, value):
self.attributes[ALIGNMENT] = value
@property
def content(self):
return self.attributes.get(CONTENT)
@content.setter
def content(self, value):
self.attributes[CONTENT] = value
@property
def segmentation(self):
return self.attributes.get(SEGMENTATION)
@segmentation.setter
def segmentation(self, value):
self.attributes[SEGMENTATION] = value
| goodmami/xigt | xigt/mixins.py | Python | mit | 9,014 |
import os.path
from django.core.urlresolvers import reverse
from django.template import Context, Template
from django.template.defaultfilters import slugify
from django.test import TestCase
from django.test.client import Client
from radpress.compat import get_user_model
User = get_user_model()
from radpress.models import Article, Page, Tag
from radpress.readers import get_reader
from radpress.settings import CONTEXT_DATA, MORE_TAG
from radpress.templatetags.radpress_tags import radpress_get_url
class RadpressTestCase(TestCase):
fixtures = [os.path.join(os.path.dirname(__file__), 'data.json')]
def setUp(self):
self.client = Client()
# define article
self.article1 = Article.objects.get(pk=1)
# define user
self.user1 = User.objects.get(username='gokmen')
self.user1.set_password('secret')
self.user1.save()
# define second user password
self.user2 = User.objects.get(username='defne')
self.user2.set_password('secret')
self.user2.save()
def render_template(self, template, context):
context = Context(context)
return Template(template).render(context)
class RadpressReaderTestCase(RadpressTestCase):
markup = None
file_path = None
def setUp(self):
# default markup name is reStructuredText
self.reader = get_reader(markup=self.markup)
if self.file_path is not None:
# default content_body, metada
file_path = os.path.join(os.path.dirname(__file__), self.file_path)
content = open(file_path).read()
self.content_body, self.metadata = self.reader(content).read()
def test_check_metadata(self):
self.assertEqual(self.metadata['image'], '1')
self.assertTrue(self.metadata['published'])
self.assertEqual(self.metadata['slug'], 'samuel-l-ipsum')
self.assertEqual(self.metadata['title'], 'Samuel L. Ipsum')
for tag in ['ipsum', 'samuel', 'lorem']:
self.assertIn(tag, self.metadata['tags'])
def test_contents(self):
for article in Article.objects.filter(markup=self.markup):
content_body, metadata = self.reader(article.content).read()
self.assertEqual(article.content_body, content_body)
def test_more_tag(self):
self.assertIn(MORE_TAG, self.content_body)
class BaseTest(RadpressTestCase):
def test_all_published_articles(self):
# check published article count
self.assertEqual(Article.objects.all_published().count(), 1)
# check published page count
self.assertEqual(Page.objects.all_published().count(), 2)
def test_open_private_and_public_article_details(self):
for article in Article.objects.all():
status_code = 200 if article.is_published else 404
response = self.client.get(article.get_absolute_url())
self.assertEqual(response.status_code, status_code)
def test_preview_page(self):
# try to get response with GET method
response = self.client.get(reverse('radpress-preview'))
expected_status_code = 302 # because, login required
self.assertEqual(response.status_code, expected_status_code)
self.client.login(username='gokmen', password='secret')
response = self.client.get(reverse('radpress-preview'))
expected_status_code = 405 # because, view only allows `post` method
self.assertEqual(response.status_code, expected_status_code)
def test_slugs(self):
for article in Article.objects.all():
slug = slugify(article.slug)
self.assertEqual(article.slug, slug)
def test_tags(self):
# checks tag count from fixture
self.assertEqual(Tag.objects.count(), 2)
# create new tag and check slug
tag_name = 'how I met your mother'
tag = Tag.objects.create(name=tag_name)
self.assertEqual(tag.slug, slugify(tag_name))
# add tag to a published article and check count of tags
self.article1.articletag_set.create(tag=tag)
self.assertEqual(self.article1.tags.count(), 1)
# try to filter articles for tags
articles = Article.objects.filter(tags__name=tag_name)
self.assertEqual(articles.count(), 1)
def test_access_not_published_article(self):
"""
If user is not authenticated, user can not access not published
articles and pages.
"""
article = Article.objects.get(slug='i-have-a-dream')
page = Page.objects.get(slug='page-3-not-published')
def get_responses():
response_article = self.client.get(
reverse('radpress-article-detail', args=[article.slug]))
response_page = self.client.get(
reverse('radpress-page-detail', args=[page.slug]))
return response_article, response_page
# if user is not authenticated to site:
response_article, response_page = get_responses()
self.assertEqual(response_article.status_code, 404)
self.assertEqual(response_page.status_code, 404)
# if user is not superuser and not author of the entries:
self.client.login(username=self.user2.username, password='secret')
self.assertFalse(self.user2.is_superuser)
response_article, response_page = get_responses()
self.assertEqual(response_article.status_code, 404)
self.assertEqual(response_page.status_code, 404)
# if user is superuser but not the author of entries:
self.user2.is_superuser = True
self.user2.save()
self.assertTrue(self.user2.is_superuser)
response_article, response_page = get_responses()
self.assertEqual(response_article.status_code, 200)
self.assertEqual(response_page.status_code, 200)
# if user is not superuser but the author of entries:
article.author = self.user2
article.save()
self.user2.is_superuser = False
self.user2.save()
self.assertFalse(self.user2.is_superuser)
response_article, response_page = get_responses()
self.assertEqual(response_article.status_code, 200)
self.assertEqual(response_page.status_code, 404)
def test_context_data(self):
"""
Important! All context data keys should be start with `RADPRESS_`
prefix and uppercase.
"""
for context in CONTEXT_DATA.keys():
self.assertTrue(context.startswith('RADPRESS_'))
self.assertEqual(context, context.upper())
def test_radpress_get_url_tag(self):
response = self.client.get(reverse('radpress-article-list'))
self.assertIn('DOMAIN', response.context_data)
context = response.context_data
for article in Article.objects.all():
article_url = context['DOMAIN'] + article.get_absolute_url()
expected_url = radpress_get_url(context, article)
self.assertEqual(article_url, expected_url)
class RestructuredtextTest(RadpressReaderTestCase):
markup = 'restructuredtext'
file_path = 'test_content.rst'
def test_pygmentize(self):
self.assertIn('<table class="highlighttable">', self.content_body)
self.assertIn('<td class="linenos">', self.content_body)
| AakashRaina/radpress | radpress/tests/base.py | Python | mit | 7,339 |
import os
import peru.runtime as runtime
import shared
class RuntimeTest(shared.PeruTest):
def test_find_peru_file(self):
test_dir = shared.create_dir({
'a/find_me': 'junk',
'a/b/c/junk': 'junk',
})
result = runtime.find_project_file(
os.path.join(test_dir, 'a', 'b', 'c'),
'find_me')
expected = os.path.join(test_dir, 'a', 'find_me')
self.assertEqual(expected, result)
| oconnor663/peru | tests/test_runtime.py | Python | mit | 466 |
import six
from unittest import TestCase
from dark.reads import Read
from dark.local_align import LocalAlignment
class TestLocalAlign(TestCase):
"""
Test the LocalAlignment class.
With match +1, mismatch -1, gap open -1, gap extend -1 and
gap extend decay 0.0.
"""
def testPositiveMismatch(self):
"""
If the mismatch value passed is positive, an exception
must be raised.
"""
seq1 = Read('seq1', 'a')
seq2 = Read('seq2', 'a')
six.assertRaisesRegex(self, ValueError, 'Mismatch must be negative',
LocalAlignment, seq1, seq2, mismatch=3)
def testZeroMismatch(self):
"""
If the mismatch value passed is zero, an exception
must be raised.
"""
seq1 = Read('seq1', 'a')
seq2 = Read('seq2', 'a')
six.assertRaisesRegex(self, ValueError, 'Mismatch must be negative',
LocalAlignment, seq1, seq2, mismatch=0)
def testPositiveGap(self):
"""
If the gap value passed is positive, an exception
must be raised.
"""
seq1 = Read('seq1', 'a')
seq2 = Read('seq2', 'a')
six.assertRaisesRegex(self, ValueError, 'Gap must be negative',
LocalAlignment, seq1, seq2, gap=3)
def testZeroGap(self):
"""
If the gap value passed is zero, an exception
must be raised.
"""
seq1 = Read('seq1', 'a')
seq2 = Read('seq2', 'a')
six.assertRaisesRegex(self, ValueError, 'Gap must be negative',
LocalAlignment, seq1, seq2, gap=0)
def testPositiveGapExtend(self):
"""
If the gap extend value passed is positive, an exception
must be raised.
"""
seq1 = Read('seq1', 'a')
seq2 = Read('seq2', 'a')
six.assertRaisesRegex(self, ValueError,
'Gap extension penalty cannot be positive',
LocalAlignment, seq1, seq2, gapExtend=3)
def testFirstSequenceEmpty(self):
"""
If the first sequence passed is empty, an exception must be raised.
"""
seq1 = Read('seq1', '')
seq2 = Read('seq2', 'agtcagtcagtc')
six.assertRaisesRegex(self, ValueError, 'Empty sequence: seq1',
LocalAlignment, seq1, seq2)
def testSecondSequenceEmpty(self):
"""
If the second sequence passed is empty, an exception must be raised.
"""
seq1 = Read('seq1', 'agtcagtcagtc')
seq2 = Read('seq2', '')
six.assertRaisesRegex(self, ValueError, 'Empty sequence: seq2',
LocalAlignment, seq1, seq2)
def testBothSequencesEmpty(self):
"""
If two empty sequences are passed, an exception must be raised.
"""
seq1 = Read('seq1', '')
seq2 = Read('seq2', '')
six.assertRaisesRegex(self, ValueError, 'Empty sequence: seq1',
LocalAlignment, seq1, seq2)
def testGapAtStartOfSeq1(self):
seq1 = Read('seq1', 'gaatcg')
seq2 = Read('seq2', 'cgaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 6=\n'
'seq1 Match start: 1 Match end: 6\n'
'seq2 Match start: 2 Match end: 7\n'
'seq1 1 GAATCG 6\n'
' ||||||\n'
'seq2 2 GAATCG 7')
self.assertEqual(result, alignment)
def testGapAtStartOfSeq2(self):
seq1 = Read('seq1', 'cgaatcg')
seq2 = Read('seq2', 'gaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 6=\n'
'seq1 Match start: 2 Match end: 7\n'
'seq2 Match start: 1 Match end: 6\n'
'seq1 2 GAATCG 7\n'
' ||||||\n'
'seq2 1 GAATCG 6')
self.assertEqual(result, alignment)
def testGapAtEndOfSeq1(self):
seq1 = Read('seq1', 'cgaatc')
seq2 = Read('seq2', 'cgaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 6=\n'
'seq1 Match start: 1 Match end: 6\n'
'seq2 Match start: 1 Match end: 6\n'
'seq1 1 CGAATC 6\n'
' ||||||\n'
'seq2 1 CGAATC 6')
self.assertEqual(result, alignment)
def testGapAtEndOfSeq2(self):
seq1 = Read('seq1', 'cgaatcg')
seq2 = Read('seq2', 'cgaatc')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 6=\n'
'seq1 Match start: 1 Match end: 6\n'
'seq2 Match start: 1 Match end: 6\n'
'seq1 1 CGAATC 6\n'
' ||||||\n'
'seq2 1 CGAATC 6')
self.assertEqual(result, alignment)
def testGapAtBothEndsOfSeq1(self):
seq1 = Read('seq1', 'gaatc')
seq2 = Read('seq2', 'cgaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 5=\n'
'seq1 Match start: 1 Match end: 5\n'
'seq2 Match start: 2 Match end: 6\n'
'seq1 1 GAATC 5\n'
' |||||\n'
'seq2 2 GAATC 6')
self.assertEqual(result, alignment)
def testGapAtBothEndsOfSeq2(self):
seq1 = Read('seq1', 'cgaatcg')
seq2 = Read('seq2', 'gaatc')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 5=\n'
'seq1 Match start: 2 Match end: 6\n'
'seq2 Match start: 1 Match end: 5\n'
'seq1 2 GAATC 6\n'
' |||||\n'
'seq2 1 GAATC 5')
self.assertEqual(result, alignment)
def testAlignmentWithGapInMiddle(self):
seq1 = Read('seq1', 'agtcagtcagtc')
seq2 = Read('seq2', 'cgaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 2=1D1=\n'
'seq1 Match start: 7 Match end: 10\n'
'seq2 Match start: 5 Match end: 7\n'
'seq1 7 TCAG 10\n'
' || |\n'
'seq2 5 TC-G 7')
self.assertEqual(result, alignment)
def testTwoEqualSequences(self):
"""
When two identical sequences are given, the result should
show that the sequences completely match.
"""
seq1 = Read('seq1', 'cgaatcg')
seq2 = Read('seq2', 'cgaatcg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 7=\n'
'seq1 Match start: 1 Match end: 7\n'
'seq2 Match start: 1 Match end: 7\n'
'seq1 1 CGAATCG 7\n'
' |||||||\n'
'seq2 1 CGAATCG 7')
self.assertEqual(result, alignment)
def testTwoCompletelyDifferentSequences(self):
"""
When two completely different sequences are given, the result
should be the two sequences with an empty alignment.
"""
seq1 = Read('seq1', 'aaaaaa')
seq2 = Read('seq2', 'gggggg')
align = LocalAlignment(seq1, seq2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nNo alignment between seq1 and seq2\n')
self.assertEqual(result, alignment)
def testWikiAnswer(self):
"""
Test the example given in Wikipedia:
http://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm
"""
seq1 = Read('seq1', 'ACACACTA')
seq2 = Read('seq2', 'AGCACACA')
align = LocalAlignment(seq1, seq2, match=2)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 1=1I5=1D1=\n'
'seq1 Match start: 1 Match end: 8\n'
'seq2 Match start: 1 Match end: 8\n'
'seq1 1 A-CACACTA 8\n'
' | ||||| |\n'
'seq2 1 AGCACAC-A 8')
self.assertEqual(result, alignment)
def testWikiAnswerWithMatchOne(self):
"""
Test the example given in Wikipedia
http://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm
Wikipedia uses a match score of two, here we use a score of one.
"""
seq1 = Read('seq1', 'ACACACTA')
seq2 = Read('seq2', 'AGCACACA')
align = LocalAlignment(seq1, seq2, match=1)
result = align.createAlignment(resultFormat=str)
alignment = ('\nCigar string of aligned region: 5=1D1=\n'
'seq1 Match start: 2 Match end: 8\n'
'seq2 Match start: 3 Match end: 8\n'
'seq1 2 CACACTA 8\n'
' ||||| |\n'
'seq2 3 CACAC-A 8')
self.assertEqual(result, alignment)
def testWikiAnswerAsDict(self):
"""
Test the example given in Wikipedia:
http://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm
with the return result being a dict.
"""
seq1 = Read('seq1', 'ACACACTA')
seq2 = Read('seq2', 'AGCACACA')
align = LocalAlignment(seq1, seq2, match=2)
result = align.createAlignment()
self.assertEqual(
{
'cigar': '1=1I5=1D1=',
'sequence1Start': 1,
'sequence1End': 8,
'sequence2Start': 1,
'sequence2End': 8,
'text': [
'seq1 1 A-CACACTA 8',
' | ||||| |',
'seq2 1 AGCACAC-A 8',
]
},
result
)
def testWikiAnswerWithMatchOneAsDict(self):
"""
Test the example given in Wikipedia
http://en.wikipedia.org/wiki/Smith%E2%80%93Waterman_algorithm
Wikipedia uses a match score of two, here we use a score of one.
Get the result as a dict.
"""
seq1 = Read('seq1', 'ACACACTA')
seq2 = Read('seq2', 'AGCACACA')
align = LocalAlignment(seq1, seq2, match=1)
result = align.createAlignment()
self.assertEqual(
{
'cigar': '5=1D1=',
'sequence1Start': 2,
'sequence1End': 8,
'sequence2Start': 3,
'sequence2End': 8,
'text': [
'seq1 2 CACACTA 8',
' ||||| |',
'seq2 3 CACAC-A 8',
]
},
result
)
| terrycojones/dark-matter | test/test_local_align.py | Python | mit | 11,532 |
# -*- coding: utf-8 -*-
import boto.sqs
import uuid
from beaver.transports.base_transport import BaseTransport
from beaver.transports.exception import TransportException
class SqsTransport(BaseTransport):
def __init__(self, beaver_config, logger=None):
super(SqsTransport, self).__init__(beaver_config, logger=logger)
self._access_key = beaver_config.get('sqs_aws_access_key')
self._secret_key = beaver_config.get('sqs_aws_secret_key')
self._region = beaver_config.get('sqs_aws_region')
self._queue_name = beaver_config.get('sqs_aws_queue')
try:
if self._access_key is None and self._secret_key is None:
self._connection = boto.sqs.connect_to_region(self._region)
else:
self._connection = boto.sqs.connect_to_region(self._region,
aws_access_key_id=self._access_key,
aws_secret_access_key=self._secret_key)
if self._connection is None:
self._logger.warn('Unable to connect to AWS - check your AWS credentials')
raise TransportException('Unable to connect to AWS - check your AWS credentials')
self._queue = self._connection.get_queue(self._queue_name)
if self._queue is None:
raise TransportException('Unable to access queue with name {0}'.format(self._queue_name))
except Exception, e:
raise TransportException(e.message)
def callback(self, filename, lines, **kwargs):
timestamp = self.get_timestamp(**kwargs)
if kwargs.get('timestamp', False):
del kwargs['timestamp']
message_batch = []
for line in lines:
message_batch.append((uuid.uuid4(), self.format(filename, line, timestamp, **kwargs), 0))
if len(message_batch) == 10: # SQS can only handle up to 10 messages in batch send
self._logger.debug('Flushing 10 messages to SQS queue')
self._send_message_batch(message_batch)
message_batch = []
if len(message_batch) > 0:
self._logger.debug('Flushing last {0} messages to SQS queue'.format(len(message_batch)))
self._send_message_batch(message_batch)
return True
def _send_message_batch(self, message_batch):
try:
result = self._queue.write_batch(message_batch)
if not result:
self._logger.error('Error occurred sending messages to SQS queue {0}. result: {1}'.format(
self._queue_name, result))
raise TransportException('Error occurred sending message to queue {0}'.format(self._queue_name))
except Exception, e:
self._logger.exception('Exception occurred sending batch to SQS queue')
raise TransportException(e.message)
def interrupt(self):
return True
def unhandled(self):
return True
| moniker-dns/debian-beaver | beaver/transports/sqs_transport.py | Python | mit | 3,043 |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
from knack.util import CLIError
from azure.cli.core.azclierror import InvalidArgumentValueError, ArgumentUsageError
from azure.cli.core.util import is_guid
from azure.graphrbac.models import GraphErrorException
from msrestazure.azure_exceptions import CloudError
from .._client_factory import cf_synapse_role_assignments, cf_synapse_role_definitions, cf_graph_client_factory
from ..constant import ITEM_NAME_MAPPING
# List Synapse Role Assignment
def list_role_assignments(cmd, workspace_name, role=None, assignee=None, assignee_object_id=None,
scope=None, item=None, item_type=None):
if bool(assignee) and bool(assignee_object_id):
raise ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID')
if bool(item) != bool(item_type):
raise ArgumentUsageError('usage error: --item-type STRING --item STRING')
return _list_role_assignments(cmd, workspace_name, role, assignee or assignee_object_id,
scope, resolve_assignee=(not assignee_object_id), item=item, item_type=item_type)
def _list_role_assignments(cmd, workspace_name, role=None, assignee=None, scope=None,
resolve_assignee=True, item=None, item_type=None):
"""Prepare scope, role ID and resolve object ID from Graph API."""
if any([scope, item, item_type]):
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_id = _resolve_role_id(cmd, role, workspace_name)
object_id = _resolve_object_id(cmd, assignee, fallback_to_object_id=True) if resolve_assignee else assignee
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
role_assignments = client.list_role_assignments(role_id, object_id, scope).value
return role_assignments
# Show Synapse Role Assignment By Id
def get_role_assignment_by_id(cmd, workspace_name, role_assignment_id):
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
return client.get_role_assignment_by_id(role_assignment_id)
# Delete Synapse Role Assignment
def delete_role_assignment(cmd, workspace_name, ids=None, assignee=None, assignee_object_id=None, role=None,
scope=None, item=None, item_type=None):
client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
if not any([ids, assignee, assignee_object_id, role, scope, item, item_type]):
raise ArgumentUsageError('usage error: No argument are provided. --assignee STRING | --ids GUID')
if ids:
if any([assignee, assignee_object_id, role, scope, item, item_type]):
raise ArgumentUsageError('You should not provide --role or --assignee or --assignee_object_id '
'or --scope or --principal-type when --ids is provided.')
role_assignments = list_role_assignments(cmd, workspace_name, None, None, None, None, None, None)
assignment_id_list = [x.id for x in role_assignments]
# check role assignment id
for assignment_id in ids:
if assignment_id not in assignment_id_list:
raise ArgumentUsageError("role assignment id:'{}' doesn't exist.".format(assignment_id))
# delete when all ids check pass
for assignment_id in ids:
client.delete_role_assignment_by_id(assignment_id)
return
role_assignments = list_role_assignments(cmd, workspace_name, role, assignee, assignee_object_id,
scope, item, item_type)
if any([scope, item, item_type]):
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_assignments = [x for x in role_assignments if x.scope == scope]
if role_assignments:
for assignment in role_assignments:
client.delete_role_assignment_by_id(assignment.id)
else:
raise CLIError('No matched assignments were found to delete, please provide correct --role or --assignee.'
'Use `az synapse role assignment list` to get role assignments.')
def create_role_assignment(cmd, workspace_name, role, assignee=None, assignee_object_id=None,
scope=None, assignee_principal_type=None, item_type=None, item=None, assignment_id=None):
"""Check parameters are provided correctly, then call _create_role_assignment."""
if assignment_id and not is_guid(assignment_id):
raise InvalidArgumentValueError('usage error: --id GUID')
if bool(assignee) == bool(assignee_object_id):
raise ArgumentUsageError('usage error: --assignee STRING | --assignee-object-id GUID')
if assignee_principal_type and not assignee_object_id:
raise ArgumentUsageError('usage error: --assignee-object-id GUID [--assignee-principal-type]')
if bool(item) != bool(item_type):
raise ArgumentUsageError('usage error: --item-type STRING --item STRING')
try:
return _create_role_assignment(cmd, workspace_name, role, assignee or assignee_object_id, scope, item,
item_type, resolve_assignee=(not assignee_object_id),
assignee_principal_type=assignee_principal_type, assignment_id=assignment_id)
except Exception as ex: # pylint: disable=broad-except
if _error_caused_by_role_assignment_exists(ex): # for idempotent
return list_role_assignments(cmd, workspace_name, role=role,
assignee=assignee, assignee_object_id=assignee_object_id,
scope=scope, item=item, item_type=item_type)
raise
def _resolve_object_id(cmd, assignee, fallback_to_object_id=False):
if assignee is None:
return None
client = cf_graph_client_factory(cmd.cli_ctx)
result = None
try:
result = list(client.users.list(filter="userPrincipalName eq '{0}' or mail eq '{0}' or displayName eq '{0}'"
.format(assignee)))
if not result:
result = list(client.service_principals.list(filter="displayName eq '{}'".format(assignee)))
if not result:
result = list(client.groups.list(filter="mail eq '{}'".format(assignee)))
if not result and is_guid(assignee): # assume an object id, let us verify it
result = _get_object_stubs(client, [assignee])
# 2+ matches should never happen, so we only check 'no match' here
if not result:
raise CLIError("Cannot find user or group or service principal in graph database for '{assignee}'. "
"If the assignee is a principal id, make sure the corresponding principal is created "
"with 'az ad sp create --id {assignee}'.".format(assignee=assignee))
if len(result) > 1:
raise CLIError("Find more than one user or group or service principal in graph database for '{assignee}'. "
"Please using --assignee-object-id GUID to specify assignee accurately"
.format(assignee=assignee))
return result[0].object_id
except (CloudError, GraphErrorException):
if fallback_to_object_id and is_guid(assignee):
return assignee
raise
def _get_object_stubs(graph_client, assignees):
from azure.graphrbac.models import GetObjectsParameters
result = []
assignees = list(assignees) # callers could pass in a set
for i in range(0, len(assignees), 1000):
params = GetObjectsParameters(include_directory_object_references=True, object_ids=assignees[i:i + 1000])
result += list(graph_client.objects.get_objects_by_object_ids(params))
return result
def _error_caused_by_role_assignment_exists(ex):
return getattr(ex, 'status_code', None) == 409 and 'role assignment already exists' in ex.message
def _create_role_assignment(cmd, workspace_name, role, assignee, scope=None, item=None, item_type=None,
resolve_assignee=True, assignee_principal_type=None, assignment_id=None):
"""Prepare scope, role ID and resolve object ID from Graph API."""
scope = _build_role_scope(workspace_name, scope, item, item_type)
role_id = _resolve_role_id(cmd, role, workspace_name)
object_id = _resolve_object_id(cmd, assignee, fallback_to_object_id=True) if resolve_assignee else assignee
assignment_client = cf_synapse_role_assignments(cmd.cli_ctx, workspace_name)
return assignment_client.create_role_assignment(assignment_id if assignment_id is not None else _gen_guid(),
role_id, object_id, scope, assignee_principal_type)
def _build_role_scope(workspace_name, scope, item, item_type):
if scope:
return scope
if item and item_type:
# workspaces/{workspaceName}/bigDataPools/{bigDataPoolName}
scope = "workspaces/" + workspace_name + "/" + item_type + "/" + item
else:
scope = "workspaces/" + workspace_name
return scope
def _resolve_role_id(cmd, role, workspace_name):
role_id = None
if not role:
return role_id
if is_guid(role):
role_id = role
else:
role_definition_client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
role_definition = role_definition_client.list_role_definitions()
role_dict = {x.name.lower(): x.id for x in role_definition if x.name}
if role.lower() not in role_dict:
raise CLIError("Role '{}' doesn't exist.".format(role))
role_id = role_dict[role.lower()]
return role_id
def _gen_guid():
import uuid
return uuid.uuid4()
# List Synapse Role Definitions Scope
def list_scopes(cmd, workspace_name):
client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
return client.list_scopes()
# List Synapse Role Definitions
def list_role_definitions(cmd, workspace_name, is_built_in=None):
client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
role_definitions = client.list_role_definitions(is_built_in)
return role_definitions
def _build_role_scope_format(scope, item_type):
if scope:
return scope
if item_type:
scope = "workspaces/{workspaceName}/" + item_type + "/" + ITEM_NAME_MAPPING[item_type]
else:
scope = "workspaces/{workspaceName}"
return scope
# Get Synapse Role Definition
def get_role_definition(cmd, workspace_name, role):
role_id = _resolve_role_id(cmd, role, workspace_name)
client = cf_synapse_role_definitions(cmd.cli_ctx, workspace_name)
return client.get_role_definition_by_id(role_id)
| yugangw-msft/azure-cli | src/azure-cli/azure/cli/command_modules/synapse/manual/operations/accesscontrol.py | Python | mit | 11,030 |
from ..io.importer import lexicon_data_to_csvs, import_lexicon_csvs
from ..io.enrichment.lexical import enrich_lexicon_from_csv, parse_file
from .spoken import SpokenContext
class LexicalContext(SpokenContext):
"""
Class that contains methods for dealing specifically with words
"""
def enrich_lexicon(self, lexicon_data, type_data=None, case_sensitive=False):
"""
adds properties to lexicon, adds properties to hierarchy
Parameters
----------
lexicon_data : dict
the data in the lexicon
type_data : dict
default to None
case_sensitive : bool
default to False
"""
if type_data is None:
type_data = {k: type(v) for k, v in next(iter(lexicon_data.values())).items()}
removed = [x for x in type_data.keys() if self.hierarchy.has_type_property(self.word_name, x)]
type_data = {k: v for k,v in type_data.items() if k not in removed}
if not type_data:
return
lexicon_data_to_csvs(self, lexicon_data, case_sensitive=case_sensitive)
import_lexicon_csvs(self, type_data, case_sensitive=case_sensitive)
self.hierarchy.add_type_properties(self, self.word_name, type_data.items())
self.encode_hierarchy()
def enrich_lexicon_from_csv(self, path, case_sensitive=False):
"""
Enriches lexicon from a CSV file
Parameters
----------
path : str
the path to the csv file
case_sensitive : boolean
Defaults to false
"""
enrich_lexicon_from_csv(self, path, case_sensitive)
def reset_lexicon_csv(self, path):
"""
Remove properties that were encoded via a CSV file
Parameters
----------
path : str
CSV file to get property names from
"""
data, type_data = parse_file(path, labels=[])
word = getattr(self, 'lexicon_' + self.word_name)
q = self.query_lexicon(word)
property_names = [x for x in type_data.keys()]
q.set_properties(**{x: None for x in property_names})
self.hierarchy.remove_type_properties(self, self.word_name, property_names)
self.encode_hierarchy()
| PhonologicalCorpusTools/PolyglotDB | polyglotdb/corpus/lexical.py | Python | mit | 2,263 |
"""!event [num]: Displays the next upcoming H@B event."""
__match__ = r"!event( .*)"
| kvchen/keffbot-py | plugins/event.py | Python | mit | 87 |
# coding:utf-8
#
# The MIT License (MIT)
#
# Copyright (c) 2016-2021 yutiansut/QUANTAXIS
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sqlite3
import os
CACHE = None
def _init(cache_file):
"""Creates a new Cache object."""
global CACHE
CACHE = Cache(cache_file)
def get_cache(config_file=None):
"""Used to retrieve the global cache object."""
if CACHE is None:
_init(config_file)
return CACHE
class Cache():
"""This object is used to interface with the job cache. It uses a SQLite3
database to store the information.
:param str cache_file: The path to the cache file. This will be created if
it does not already exist.
"""
def __init__(self, cache_file):
self.filename = cache_file
if not os.path.isfile(self.filename):
self._create(self.filename)
self.conn = sqlite3.connect(self.filename)
self.cur = self.conn.cursor()
self.cur.execute("PRAGMA foreign_keys = ON")
def __del__(self):
"""Commit the changes and close the connection."""
if getattr(self, "conn", None):
self.conn.commit()
self.conn.close()
def _create(self, cache_file):
"""Create the tables needed to store the information."""
conn = sqlite3.connect(cache_file)
cur = conn.cursor()
cur.execute("PRAGMA foreign_keys = ON")
cur.execute('''
CREATE TABLE jobs(
hash TEXT NOT NULL UNIQUE PRIMARY KEY, description TEXT NOT NULL,
last_run REAL, next_run REAL, last_run_result INTEGER)''')
cur.execute('''
CREATE TABLE history(
hash TEXT, description TEXT, time REAL, result INTEGER,
FOREIGN KEY(hash) REFERENCES jobs(hash))''')
conn.commit()
conn.close()
def has(self, job):
"""Checks to see whether or not a job exists in the table.
:param dict job: The job dictionary
:returns: True if the job exists, False otherwise
"""
return bool(self.cur.execute('SELECT count(*) FROM jobs WHERE hash=?', (job["id"],)))
def get(self, id):
"""Retrieves the job with the selected ID.
:param str id: The ID of the job
:returns: The dictionary of the job if found, None otherwise
"""
self.cur.execute("SELECT * FROM jobs WHERE hash=?", (id,))
item = self.cur.fetchone()
if item:
return dict(zip(
("id", "description", "last-run", "next-run", "last-run-result"),
item))
return None
def update(self, job):
"""Update last_run, next_run, and last_run_result for an existing job.
:param dict job: The job dictionary
:returns: True
"""
self.cur.execute('''UPDATE jobs
SET last_run=?,next_run=?,last_run_result=? WHERE hash=?''', (
job["last-run"], job["next-run"], job["last-run-result"], job["id"]))
def add_job(self, job):
"""Adds a new job into the cache.
:param dict job: The job dictionary
:returns: True
"""
self.cur.execute("INSERT INTO jobs VALUES(?,?,?,?,?)", (
job["id"], job["description"], job["last-run"], job["next-run"], job["last-run-result"]))
return True
def add_result(self, job):
"""Adds a job run result to the history table.
:param dict job: The job dictionary
:returns: True
"""
self.cur.execute(
"INSERT INTO history VALUES(?,?,?,?)",
(job["id"], job["description"], job["last-run"], job["last-run-result"]))
return True | yutiansut/QUANTAXIS | QUANTAXIS/QASetting/cache.py | Python | mit | 4,703 |
"""SCons.Variables.PathVariable
This file defines an option type for SCons implementing path settings.
To be used whenever a a user-specified path override should be allowed.
Arguments to PathVariable are:
option-name = name of this option on the command line (e.g. "prefix")
option-help = help string for option
option-dflt = default value for this option
validator = [optional] validator for option value. Predefined
validators are:
PathAccept -- accepts any path setting; no validation
PathIsDir -- path must be an existing directory
PathIsDirCreate -- path must be a dir; will create
PathIsFile -- path must be a file
PathExists -- path must exist (any type) [default]
The validator is a function that is called and which
should return True or False to indicate if the path
is valid. The arguments to the validator function
are: (key, val, env). The key is the name of the
option, the val is the path specified for the option,
and the env is the env to which the Otions have been
added.
Usage example:
Examples:
prefix=/usr/local
opts = Variables()
opts = Variables()
opts.Add(PathVariable('qtdir',
'where the root of Qt is installed',
qtdir, PathIsDir))
opts.Add(PathVariable('qt_includes',
'where the Qt includes are installed',
'$qtdir/includes', PathIsDirCreate))
opts.Add(PathVariable('qt_libraries',
'where the Qt library is installed',
'$qtdir/lib'))
"""
#
# Copyright (c) 2001 - 2014 The SCons Foundation
#
# Permission is hereby granted, free of charge, to any person obtaining
# a copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish,
# distribute, sublicense, and/or sell copies of the Software, and to
# permit persons to whom the Software is furnished to do so, subject to
# the following conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
# KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
# WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
__revision__ = "src/engine/SCons/Variables/PathVariable.py 2014/08/24 12:12:31 garyo"
__all__ = ['PathVariable',]
import os
import os.path
import SCons.Errors
class _PathVariableClass(object):
def PathAccept(self, key, val, env):
"""Accepts any path, no checking done."""
pass
def PathIsDir(self, key, val, env):
"""Validator to check if Path is a directory."""
if not os.path.isdir(val):
if os.path.isfile(val):
m = 'Directory path for option %s is a file: %s'
else:
m = 'Directory path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathIsDirCreate(self, key, val, env):
"""Validator to check if Path is a directory,
creating it if it does not exist."""
if os.path.isfile(val):
m = 'Path for option %s is a file, not a directory: %s'
raise SCons.Errors.UserError(m % (key, val))
if not os.path.isdir(val):
os.makedirs(val)
def PathIsFile(self, key, val, env):
"""validator to check if Path is a file"""
if not os.path.isfile(val):
if os.path.isdir(val):
m = 'File path for option %s is a directory: %s'
else:
m = 'File path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def PathExists(self, key, val, env):
"""validator to check if Path exists"""
if not os.path.exists(val):
m = 'Path for option %s does not exist: %s'
raise SCons.Errors.UserError(m % (key, val))
def __call__(self, key, help, default, validator=None):
# NB: searchfunc is currenty undocumented and unsupported
"""
The input parameters describe a 'path list' option, thus they
are returned with the correct converter and validator appended. The
result is usable for input to opts.Add() .
The 'default' option specifies the default path to use if the
user does not specify an override with this option.
validator is a validator, see this file for examples
"""
if validator is None:
validator = self.PathExists
if SCons.Util.is_List(key) or SCons.Util.is_Tuple(key):
return (key, '%s ( /path/to/%s )' % (help, key[0]), default,
validator, None)
else:
return (key, '%s ( /path/to/%s )' % (help, key), default,
validator, None)
PathVariable = _PathVariableClass()
# Local Variables:
# tab-width:4
# indent-tabs-mode:nil
# End:
# vim: set expandtab tabstop=4 shiftwidth=4:
| engineer0x47/SCONS | engine/SCons/Variables/PathVariable.py | Python | mit | 5,616 |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('bibtex', '0008_entry_downloadurl'),
]
operations = [
migrations.AlterField(
model_name='entry',
name='downloadurl',
field=models.CharField(default=b'', max_length=200, null=True),
),
migrations.AlterField(
model_name='entry',
name='html',
field=models.TextField(default=b'', null=True),
),
migrations.AlterField(
model_name='entry',
name='imgurl',
field=models.CharField(default=b'', max_length=100, null=True),
),
]
| RTSYork/bibtex | bibtex/migrations/0009_auto_20150720_2105.py | Python | mit | 765 |
from collections import OrderedDict, Mapping, Container
from pprint import pprint
from sys import getsizeof
def deep_compare(a, b, pointer='/'):
if a == b:
return
if type(a) != type(b):
reason = 'Different data types'
extra = str((type(a), type(b)))
x(pointer, reason, extra)
elif type(a) in (set, frozenset):
pointer += 'set()'
if len(a) != len(b):
pointer += 'set()'
reason = 'Different number of items'
extra = str((len(a), len(b)))
x(pointer, reason, extra)
reason = 'Different items'
extra = (a, b)
x(pointer, reason, extra)
for i in range(len(a)):
deep_compare(a[i], b[i], pointer + 'set()'.format(i))
elif type(a) in (list, tuple):
if len(a) != len(b):
pointer += '[]'
reason = 'Different number of items'
extra = str((len(a), len(b)))
x(pointer, reason, extra)
if sorted(a) == sorted(b):
pointer += '[]'
reason = 'Different sort order'
extra = 'N/A'
x(pointer, reason, extra)
for i in range(len(a)):
deep_compare(a[i], b[i], pointer + '[{}]'.format(i))
elif type(a) in (dict, OrderedDict):
if len(a) != len(b):
pointer += '{}'
reason = 'Different number of items'
extra = str((len(a), len(b)))
x(pointer, reason, extra)
if set(a.keys()) != set(b.keys()):
pointer += '{}'
reason = 'Different keys'
extra = (a.keys(), b.keys())
x(pointer, reason, extra)
for k in a:
deep_compare(a[k], b[k], pointer + '[{}]'.format(k))
else:
reason = 'Different objects'
extra = (a, b)
x(pointer, reason, extra)
def x(pointer, reason, extra):
message = 'Objects are not the same. Pointer: {}. Reason: {}. Extra: {}'
raise RuntimeError(message.format(pointer, reason, extra))
def compare(a, b):
try:
deep_compare(a, b, '/')
except RuntimeError as e:
pprint(e.message)
def deep_getsizeof(o, ids):
"""Find the memory footprint of a Python object
This is a recursive function that rills down a Python object graph
like a dictionary holding nested ditionaries with lists of lists
and tuples and sets.
The sys.getsizeof function does a shallow size of only. It counts each
object inside a container as pointer only regardless of how big it
really is.
:param o: the object
:param ids:
:return:
"""
d = deep_getsizeof
if id(o) in ids:
return 0
r = getsizeof(o)
ids.add(id(o))
if isinstance(o, str) or isinstance(0, unicode):
return r
if isinstance(o, Mapping):
return r + sum(d(k, ids) + d(v, ids) for k, v in o.iteritems())
if isinstance(o, Container):
return r + sum(d(x, ids) for x in o)
return r
| the-gigi/deep | deeper.py | Python | mit | 3,000 |
from django.conf.urls import url
from views import calendarpage, jsonsearch, fcdragmodify, EventManager, event_view
event_manager = EventManager.as_view()
urlpatterns = [
url(r'^$', calendarpage, name='events'),
url(r'^json/', jsonsearch, name='jsonsearch'),
url(r'^modify/', fcdragmodify, name='fcdragmodify'),
url(r'^new/', event_manager, name='newevent'),
url(r'^(?P<eventid>\d+)/edit/', event_manager, name='editevent'),
url(r'^recurring/(?P<eventid>\d+)/edit/(?:orig-(?P<originaleventid>\d+)/)?', event_manager, {'editingregularevent': False}, name='editrecurringevent'),
# Must be last...
url(r'^(?P<eventid>\d+)/', event_view, name='viewevent'),
]
| InfoSec-CSUSB/club-websystem | src/events/urls.py | Python | mit | 675 |
"""
Script that trains Tensorflow multitask models on QM8 dataset.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
import os
import deepchem as dc
import numpy as np
from qm8_datasets import load_qm8
np.random.seed(123)
qm8_tasks, datasets, transformers = load_qm8()
train_dataset, valid_dataset, test_dataset = datasets
fit_transformers = [dc.trans.CoulombFitTransformer(train_dataset)]
regression_metric = [
dc.metrics.Metric(dc.metrics.mean_absolute_error, mode="regression"),
dc.metrics.Metric(dc.metrics.pearson_r2_score, mode="regression")
]
model = dc.models.TensorflowMultiTaskFitTransformRegressor(
n_tasks=len(qm8_tasks),
n_features=[26, 26],
learning_rate=0.001,
momentum=.8,
batch_size=32,
weight_init_stddevs=[1 / np.sqrt(400), 1 / np.sqrt(100), 1 / np.sqrt(100)],
bias_init_consts=[0., 0., 0.],
layer_sizes=[400, 100, 100],
dropouts=[0.01, 0.01, 0.01],
fit_transformers=fit_transformers,
n_evals=10,
seed=123)
# Fit trained model
model.fit(train_dataset, nb_epoch=50)
model.save()
train_scores = model.evaluate(train_dataset, regression_metric, transformers)
print("Train scores [kcal/mol]")
print(train_scores)
valid_scores = model.evaluate(valid_dataset, regression_metric, transformers)
print("Valid scores [kcal/mol]")
print(valid_scores)
test_scores = model.evaluate(test_dataset, regression_metric, transformers)
print("Test scores [kcal/mol]")
print(test_scores)
| joegomes/deepchem | examples/qm8/qm8_tf_model.py | Python | mit | 1,512 |
from django.conf import settings
from django.contrib.auth import get_user_model
from django.core.cache import cache
from django.urls import reverse
from django.utils import timezone
from freezegun import freeze_time
from rest_framework import status, test
from rest_framework.authtoken.models import Token
from . import helpers
class TokenAuthenticationTest(test.APITransactionTestCase):
def setUp(self):
self.username = 'test'
self.password = 'secret'
self.auth_url = 'http://testserver' + reverse('auth-password')
self.test_url = 'http://testserver/api/'
get_user_model().objects.create_user(
self.username, 'admin@example.com', self.password
)
def tearDown(self):
cache.clear()
def test_user_can_authenticate_with_token(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_token_expires_based_on_user_token_lifetime(self):
user = get_user_model().objects.get(username=self.username)
configured_token_lifetime = settings.WALDUR_CORE.get(
'TOKEN_LIFETIME', timezone.timedelta(hours=1)
)
user_token_lifetime = configured_token_lifetime - timezone.timedelta(seconds=40)
user.token_lifetime = user_token_lifetime.seconds
user.save()
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
mocked_now = timezone.now() + user_token_lifetime
with freeze_time(mocked_now):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertEqual(response.data['detail'], 'Token has expired.')
def test_token_creation_time_is_updated_on_every_request(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
created1 = Token.objects.values_list('created', flat=True).get(key=token)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
self.client.get(self.test_url)
created2 = Token.objects.values_list('created', flat=True).get(key=token)
self.assertTrue(created1 < created2)
def test_account_is_blocked_after_five_failed_attempts(self):
for _ in range(5):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': 'WRONG'}
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
# this one should fail with a different error message
self.client.post(
self.auth_url, data={'username': self.username, 'password': 'WRONG'}
)
self.assertEqual(
response.data['detail'], 'Username is locked out. Try in 10 minutes.'
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
def test_expired_token_is_recreated_on_successful_authentication(self):
user = get_user_model().objects.get(username=self.username)
self.assertIsNotNone(user.token_lifetime)
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token1 = response.data['token']
mocked_now = timezone.now() + timezone.timedelta(seconds=user.token_lifetime)
with freeze_time(mocked_now):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
token2 = response.data['token']
self.assertNotEqual(token1, token2)
def test_not_expired_token_creation_time_is_updated_on_authentication(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token1 = response.data['token']
created1 = Token.objects.values_list('created', flat=True).get(key=token1)
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
token2 = response.data['token']
created2 = Token.objects.values_list('created', flat=True).get(key=token2)
self.assertEqual(token1, token2)
self.assertTrue(created1 < created2)
def test_token_never_expires_if_token_lifetime_is_none(self):
user = get_user_model().objects.get(username=self.username)
user.token_lifetime = None
user.save()
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
original_token = response.data['token']
year_ahead = timezone.now() + timezone.timedelta(days=365)
with freeze_time(year_ahead):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
token_in_a_year = response.data['token']
self.assertEqual(original_token, token_in_a_year)
def test_token_created_date_is_refreshed_even_if_token_lifetime_is_none(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
user = get_user_model().objects.get(username=self.username)
original_token_lifetime = user.token_lifetime
original_created_value = user.auth_token.created
user.token_lifetime = None
user.save()
last_refresh_time = timezone.now() + timezone.timedelta(
seconds=original_token_lifetime
)
with freeze_time(last_refresh_time):
response = self.client.post(
self.auth_url,
data={'username': self.username, 'password': self.password},
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
token = response.data['token']
user.auth_token.refresh_from_db()
self.assertTrue(user.auth_token.created > original_created_value)
user.token_lifetime = original_token_lifetime
user.save()
with freeze_time(last_refresh_time):
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token)
response = self.client.get(self.test_url)
self.assertEqual(response.status_code, status.HTTP_200_OK)
@helpers.override_waldur_core_settings(AUTHENTICATION_METHODS=[])
def test_authentication_fails_if_local_signin_is_disabled(self):
response = self.client.post(
self.auth_url, data={'username': self.username, 'password': self.password}
)
self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)
self.assertTrue(b'Authentication method is disabled.' in response.content)
| opennode/nodeconductor-assembly-waldur | src/waldur_core/core/tests/test_authentication.py | Python | mit | 7,832 |
"""Support for EcoNet products."""
from datetime import timedelta
import logging
from aiohttp.client_exceptions import ClientError
from pyeconet import EcoNetApiInterface
from pyeconet.equipment import EquipmentType
from pyeconet.errors import (
GenericHTTPError,
InvalidCredentialsError,
InvalidResponseFormat,
PyeconetError,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD, TEMP_FAHRENHEIT, Platform
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.entity import DeviceInfo, Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType
from .const import API_CLIENT, DOMAIN, EQUIPMENT
_LOGGER = logging.getLogger(__name__)
PLATFORMS = [
Platform.CLIMATE,
Platform.BINARY_SENSOR,
Platform.SENSOR,
Platform.WATER_HEATER,
]
PUSH_UPDATE = "econet.push_update"
INTERVAL = timedelta(minutes=60)
async def async_setup(hass: HomeAssistant, config: ConfigType) -> bool:
"""Set up the EcoNet component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][API_CLIENT] = {}
hass.data[DOMAIN][EQUIPMENT] = {}
return True
async def async_setup_entry(hass: HomeAssistant, config_entry: ConfigEntry) -> bool:
"""Set up EcoNet as config entry."""
email = config_entry.data[CONF_EMAIL]
password = config_entry.data[CONF_PASSWORD]
try:
api = await EcoNetApiInterface.login(email, password=password)
except InvalidCredentialsError:
_LOGGER.error("Invalid credentials provided")
return False
except PyeconetError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
try:
equipment = await api.get_equipment_by_type(
[EquipmentType.WATER_HEATER, EquipmentType.THERMOSTAT]
)
except (ClientError, GenericHTTPError, InvalidResponseFormat) as err:
raise ConfigEntryNotReady from err
hass.data[DOMAIN][API_CLIENT][config_entry.entry_id] = api
hass.data[DOMAIN][EQUIPMENT][config_entry.entry_id] = equipment
hass.config_entries.async_setup_platforms(config_entry, PLATFORMS)
api.subscribe()
def update_published():
"""Handle a push update."""
dispatcher_send(hass, PUSH_UPDATE)
for _eqip in equipment[EquipmentType.WATER_HEATER]:
_eqip.set_update_callback(update_published)
for _eqip in equipment[EquipmentType.THERMOSTAT]:
_eqip.set_update_callback(update_published)
async def resubscribe(now):
"""Resubscribe to the MQTT updates."""
await hass.async_add_executor_job(api.unsubscribe)
api.subscribe()
async def fetch_update(now):
"""Fetch the latest changes from the API."""
await api.refresh_equipment()
config_entry.async_on_unload(async_track_time_interval(hass, resubscribe, INTERVAL))
config_entry.async_on_unload(
async_track_time_interval(hass, fetch_update, INTERVAL + timedelta(minutes=1))
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a EcoNet config entry."""
unload_ok = await hass.config_entries.async_unload_platforms(entry, PLATFORMS)
if unload_ok:
hass.data[DOMAIN][API_CLIENT].pop(entry.entry_id)
hass.data[DOMAIN][EQUIPMENT].pop(entry.entry_id)
return unload_ok
class EcoNetEntity(Entity):
"""Define a base EcoNet entity."""
def __init__(self, econet):
"""Initialize."""
self._econet = econet
async def async_added_to_hass(self):
"""Subscribe to device events."""
await super().async_added_to_hass()
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
PUSH_UPDATE, self.on_update_received
)
)
@callback
def on_update_received(self):
"""Update was pushed from the ecoent API."""
self.async_write_ha_state()
@property
def available(self):
"""Return if the the device is online or not."""
return self._econet.connected
@property
def device_info(self) -> DeviceInfo:
"""Return device registry information for this entity."""
return DeviceInfo(
identifiers={(DOMAIN, self._econet.device_id)},
manufacturer="Rheem",
name=self._econet.device_name,
)
@property
def name(self):
"""Return the name of the entity."""
return self._econet.device_name
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"{self._econet.device_id}_{self._econet.device_name}"
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return False
| rohitranjan1991/home-assistant | homeassistant/components/econet/__init__.py | Python | mit | 5,194 |
# Example of managed attributes via properties
class String:
def __init__(self, name):
self.name = name
def __get__(self, instance, cls):
if instance is None:
return self
return instance.__dict__[self.name]
def __set__(self, instance, value):
if not isinstance(value, str):
raise TypeError('Expected a string')
instance.__dict__[self.name] = value
class Person:
name = String('name')
def __init__(self, name):
self.name = name
class SubPerson(Person):
@property
def name(self):
print('Getting name')
return super().name
@name.setter
def name(self, value):
print('Setting name to', value)
super(SubPerson, SubPerson).name.__set__(self, value)
@name.deleter
def name(self):
print('Deleting name')
super(SubPerson, SubPerson).name.__delete__(self)
if __name__ == '__main__':
a = Person('Guido')
print(a.name)
a.name = 'Dave'
print(a.name)
try:
a.name = 42
except TypeError as e:
print(e)
| tuanavu/python-cookbook-3rd | src/8/extending_a_property_in_a_subclass/example2.py | Python | mit | 1,089 |
#### NOTICE: THIS FILE IS AUTOGENERATED
#### MODIFICATIONS MAY BE LOST IF DONE IMPROPERLY
#### PLEASE SEE THE ONLINE DOCUMENTATION FOR EXAMPLES
from swgpy.object import *
def create(kernel):
result = Tangible()
result.template = "object/tangible/deed/pet_deed/shared_kimogila_deed.iff"
result.attribute_template_id = 2
result.stfName("pet_deed","kimogila")
#### BEGIN MODIFICATIONS ####
result.setStringAttribute("radial_filename", "radials/deed_datapad.py")
result.setStringAttribute("deed_pcd", "object/intangible/pet/shared_kimogila_hue.iff")
result.setStringAttribute("deed_mobile", "object/mobile/shared_kimogila_hue.iff")
#### END MODIFICATIONS ####
return result | obi-two/Rebelion | data/scripts/templates/object/tangible/deed/pet_deed/shared_kimogila_deed.py | Python | mit | 691 |
import threading
import urllib2
import time, json
import requests
import os.path
#"284330,150810,09,1109,52,071040,17,28432,7406"
countrylist = ["brazil","canada","china","france","japan","india","mexico","russia","uk","us"]
yrs = ["2011","2012","2013","2014","2015"]
dataTable ={"284330":"gold","150810":"crude","09":"coffee","1109":"wheat","52":"cotton","071040":"corn","17":"sugar","28432":"silver","7406":"copper","271111":"natural gas"};
file_list = []
start = time.time()
urls = ["http://comtrade.un.org/api/get?max=50000&type=C&freq=A&px=HS&ps=2015,2014,2013,2012,2011&r=76&p=all&rg=2&cc=284330,150810,09,1109,52,071040,17,28432,7406,271111&fmt=json",
"http://comtrade.un.org/api/get?max=50000&type=C&freq=A&px=HS&ps=2015,2014,2013,2012,2011&r=124&p=all&rg=2&cc=284330,150810,09,1109,52,071040,17,28432,7406,271111&fmt=json",
"http://comtrade.un.org/api/get?max=50000&type=C&freq=A&px=HS&ps=2015,2014,2013,2012,2011&r=156&p=all&rg=2&cc=284330,150810,09,1109,52,071040,17,28432,7406,271111&fmt=json",
"http://comtrade.un.org/api/get?max=50000&type=C&freq=A&px=HS&ps=2015,2014,2013,2012,2011&r=251&p=all&rg=2&cc=284330,150810,09,1109,52,071040,17,28432,7406,271111&fmt=json",
"http://comtrade.un.org/api/get?max=50000&type=C&freq=A&px=HS&ps=2015,2014,2013,2012,2011&r=392&p=all&rg=2&cc=284330,150810,09,1109,52,071040,17,28432,7406,271111&fmt=json",
"http://comtrade.un.org/api/get?max=50000&type=C&freq=A&px=HS&ps=2015,2014,2013,2012,2011&r=699&p=all&rg=2&cc=284330,150810,09,1109,52,071040,17,28432,7406,271111&fmt=json",
"http://comtrade.un.org/api/get?max=50000&type=C&freq=A&px=HS&ps=2015,2014,2013,2012,2011&r=484&p=all&rg=2&cc=284330,150810,09,1109,52,071040,17,28432,7406,271111&fmt=json",
"http://comtrade.un.org/api/get?max=50000&type=C&freq=A&px=HS&ps=2015,2014,2013,2012,2011&r=643&p=all&rg=2&cc=284330,150810,09,1109,52,071040,17,28432,7406,271111&fmt=json",
"http://comtrade.un.org/api/get?max=50000&type=C&freq=A&px=HS&ps=2015,2014,2013,2012,2011&r=826&p=all&rg=2&cc=284330,150810,09,1109,52,071040,17,28432,7406,271111&fmt=json",
"http://comtrade.un.org/api/get?max=50000&type=C&freq=A&px=HS&ps=2015,2014,2013,2012,2011&r=834&p=all&rg=2&cc=284330,150810,09,1109,52,071040,17,28432,7406,271111&fmt=json"]
def fetch_url(url, i):
file_name = "file_"+countrylist[i]+".json"
file_list.append(file_name)
response = requests.get(url, verify=False)
data = response.json()
if not os.path.isfile("./"+file_name):
with open(file_name, 'w') as outfile:
json.dump(data["dataset"], outfile)
print "'%s\' fetched in %ss" % (url, (time.time() - start))
#
threads = [threading.Thread(target=fetch_url, args=(urls[i],i,)) for i in range(0,len(urls)) ]
for thread in threads:
thread.start()
time.sleep(12)
for thread in threads:
thread.join()
#final data identifier
res_data = {}
for k in range(0,len(countrylist)):
with open(file_list[k],"r") as json_data:
data_json = json.load(json_data)
for j in range(0,len(data_json)):
if data_json[j]['yr'] == 2011:
a = (countrylist[k],"2011")
elif data_json[j]['yr'] == 2012:
a = (countrylist[k],"2012")
elif data_json[j]['yr'] == 2013:
a = (countrylist[k],"2013")
elif data_json[j]['yr'] == 2014:
a = (countrylist[k],"2014")
elif data_json[j]['yr'] == 2015:
a = (countrylist[k],"2015")
# insrt the key
a = "".join(a)
if a not in res_data.keys():
res_data[a] = []
if data_json[j]["ptTitle"].lower() not in countrylist:
continue
b = {}
b["country"] = data_json[j]["ptTitle"].lower()
b["commodity"] = dataTable[str(data_json[j]["cmdCode"])]
b["val"] = data_json[j]["TradeValue"]
res_data[a].append(b)
final_file_name = "exportData.json"
if not os.path.isfile("./"+final_file_name):
with open(final_file_name, 'w') as outfile:
json.dump(res_data, outfile)
#for i in range(0,len(file_list)):
print "Elapsed Time: %s" % (time.time() - start) | hellious/TradeViz | data_pre/collect_2.py | Python | mit | 4,253 |
"""Creates a user """
# :license: MIT, see LICENSE for more details.
import json
import string
import sys
import click
import SoftLayer
from SoftLayer.CLI import environment
from SoftLayer.CLI import exceptions
from SoftLayer.CLI import formatting
from SoftLayer.CLI import helpers
@click.command()
@click.argument('username')
@click.option('--email', '-e', required=True,
help="Email address for this user. Required for creation.")
@click.option('--password', '-p', default=None, show_default=True,
help="Password to set for this user. If no password is provided, user will be sent an email "
"to generate one, which expires in 24 hours. '-p generate' will create a password for you "
"(Requires Python 3.6+). Passwords require 8+ characters, upper and lowercase, a number "
"and a symbol.")
@click.option('--from-user', '-u', default=None,
help="Base user to use as a template for creating this user. "
"Will default to the user running this command. Information provided in --template "
"supersedes this template.")
@click.option('--template', '-t', default=None,
help="A json string describing https://softlayer.github.io/reference/datatypes/SoftLayer_User_Customer/")
@environment.pass_env
def cli(env, username, email, password, from_user, template):
"""Creates a user Users.
Remember to set the permissions and access for this new user.
Example::
slcli user create my@email.com -e my@email.com -p generate -a
-t '{"firstName": "Test", "lastName": "Testerson"}'
"""
mgr = SoftLayer.UserManager(env.client)
user_mask = ("mask[id, firstName, lastName, email, companyName, address1, city, country, postalCode, "
"state, userStatusId, timezoneId]")
from_user_id = None
if from_user is None:
user_template = mgr.get_current_user(objectmask=user_mask)
from_user_id = user_template['id']
else:
from_user_id = helpers.resolve_id(mgr.resolve_ids, from_user, 'username')
user_template = mgr.get_user(from_user_id, objectmask=user_mask)
# If we send the ID back to the API, an exception will be thrown
del user_template['id']
if template is not None:
try:
template_object = json.loads(template)
for key in template_object:
user_template[key] = template_object[key]
except ValueError as ex:
raise exceptions.ArgumentError("Unable to parse --template. %s" % ex)
user_template['username'] = username
if password == 'generate':
password = generate_password()
user_template['email'] = email
if not env.skip_confirmations:
table = formatting.KeyValueTable(['name', 'value'])
for key in user_template:
table.add_row([key, user_template[key]])
table.add_row(['password', password])
click.secho("You are about to create the following user...", fg='green')
env.fout(table)
if not formatting.confirm("Do you wish to continue?"):
raise exceptions.CLIAbort("Canceling creation!")
result = mgr.create_user(user_template, password)
table = formatting.Table(['Username', 'Email', 'Password'])
table.add_row([result['username'], result['email'], password])
env.fout(table)
def generate_password():
"""Returns a 23 character random string, with 3 special characters at the end"""
if sys.version_info > (3, 6):
import secrets # pylint: disable=import-error,import-outside-toplevel
alphabet = string.ascii_letters + string.digits
password = ''.join(secrets.choice(alphabet) for i in range(20))
special = ''.join(secrets.choice(string.punctuation) for i in range(3))
return password + special
else:
raise ImportError("Generating passwords require python 3.6 or higher")
| softlayer/softlayer-python | SoftLayer/CLI/user/create.py | Python | mit | 3,968 |