text
stringlengths 6
947k
| repo_name
stringlengths 5
100
| path
stringlengths 4
231
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 6
947k
| score
float64 0
0.34
|
|---|---|---|---|---|---|---|
from django.test import TestCase
from django.contrib.auth import authenticate
from userena.backends import UserenaAuthenticationBackend
from userena.utils import get_user_model
User = get_user_model()
class UserenaAuthenticationBackendTests(TestCase):
"""
Test the ``UserenaAuthenticationBackend`` which should return a ``User``
when supplied with a username/email and a correct password.
"""
fixtures = ['users',]
backend = UserenaAuthenticationBackend()
def test_with_username(self):
""" Test the backend when usernames are supplied. """
# Invalid usernames or passwords
invalid_data_dicts = [
# Invalid password
{'identification': 'john',
'password': 'inhalefish'},
# Invalid username
{'identification': 'alice',
'password': 'blowfish'},
]
for invalid_dict in invalid_data_dicts:
result = self.backend.authenticate(identification=invalid_dict['identification'],
password=invalid_dict['password'])
self.failIf(isinstance(result, User))
# Valid username and password
result = self.backend.authenticate(identification='john',
password='blowfish')
self.failUnless(isinstance(result, User))
def test_with_email(self):
""" Test the backend when email address is supplied """
# Invalid e-mail adressses or passwords
invalid_data_dicts = [
# Invalid password
{'identification': 'john@example.com',
'password': 'inhalefish'},
# Invalid e-mail address
{'identification': 'alice@example.com',
'password': 'blowfish'},
]
for invalid_dict in invalid_data_dicts:
result = self.backend.authenticate(identification=invalid_dict['identification'],
password=invalid_dict['password'])
self.failIf(isinstance(result, User))
# Valid e-email address and password
result = self.backend.authenticate(identification='john@example.com',
password='blowfish')
self.failUnless(isinstance(result, User))
def test_get_user(self):
""" Test that the user is returned """
user = self.backend.get_user(1)
self.failUnlessEqual(user.username, 'john')
# None should be returned when false id.
user = self.backend.get_user(99)
self.failIf(user)
|
moreati/django-userena
|
userena/tests/test_backends.py
|
Python
|
bsd-3-clause
| 2,608
| 0.001917
|
#
# genxmlif, Release 0.9.0
# file: __init__.py
#
# genxmlif package file
#
# history:
# 2005-04-25 rl created
#
# Copyright (c) 2005-2008 by Roland Leuthe. All rights reserved.
#
# --------------------------------------------------------------------
# The generic XML interface is
#
# Copyright (c) 2005-2008 by Roland Leuthe
#
# By obtaining, using, and/or copying this software and/or its
# associated documentation, you agree that you have read, understood,
# and will comply with the following terms and conditions:
#
# Permission to use, copy, modify, and distribute this software and
# its associated documentation for any purpose and without fee is
# hereby granted, provided that the above copyright notice appears in
# all copies, and that both that copyright notice and this permission
# notice appear in supporting documentation, and that the name of
# the author not be used in advertising or publicity
# pertaining to distribution of the software without specific, written
# prior permission.
#
# THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD
# TO THIS SOFTWARE, INCLUDING ALL IMPLIED WARRANTIES OF MERCHANT-
# ABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR
# BE LIABLE FOR ANY SPECIAL, INDIRECT OR CONSEQUENTIAL DAMAGES OR ANY
# DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS,
# WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
# OF THIS SOFTWARE.
# --------------------------------------------------------------------
######################################################################
# PUBLIC DEFINITIONS
######################################################################
# supported XML interfaces
XMLIF_MINIDOM = "XMLIF_MINIDOM"
XMLIF_4DOM = "XMLIF_4DOM"
XMLIF_ELEMENTTREE = "XMLIF_ELEMENTTREE"
# namespace definitions
XINC_NAMESPACE = "http://www.w3.org/2001/XInclude"
# definition of genxmlif path
import os
GENXMLIF_DIR = os.path.dirname(__file__)
########################################
# central function to choose the XML interface to be used
#
def chooseXmlIf (xmlIf, verbose=0, useCaching=1, processXInclude=1):
if xmlIf == XMLIF_MINIDOM:
import xmlifMinidom
return xmlifMinidom.XmlInterfaceMinidom(verbose, useCaching, processXInclude)
elif xmlIf == XMLIF_4DOM:
import xmlif4Dom
return xmlif4Dom.XmlInterface4Dom(verbose, useCaching, processXInclude)
elif xmlIf == XMLIF_ELEMENTTREE:
import xmlifElementTree
return xmlifElementTree.XmlInterfaceElementTree(verbose, useCaching, processXInclude)
else:
raise AttributeError, "Unknown XML interface: %s" %(xmlIf)
########################################
# define own exception for GenXmlIf errors
# The following errors/exceptions are mapped to a GenxmlIf exception:
# - Expat errors
# - XInclude errors
#
class GenXmlIfError (StandardError):
pass
|
Gussy/mavlink
|
pymavlink/generator/lib/genxmlif/__init__.py
|
Python
|
lgpl-3.0
| 3,040
| 0.003618
|
#!/usr/bin/env python2
from distutils.core import setup
from distutils.dist import Distribution
from distutils.cmd import Command
from distutils.command.install_data import install_data
from distutils.command.build import build
from distutils.dep_util import newer
from distutils.log import warn, info, error
from distutils.errors import DistutilsFileError
import glob
import os
import sys
import subprocess
import platform
from terminatorlib.version import APP_NAME, APP_VERSION
PO_DIR = 'po'
MO_DIR = os.path.join('build', 'mo')
CSS_DIR = os.path.join('terminatorlib', 'themes')
class TerminatorDist(Distribution):
global_options = Distribution.global_options + [
("build-documentation", None, "Build the documentation"),
("install-documentation", None, "Install the documentation"),
("without-gettext", None, "Don't build/install gettext .mo files"),
("without-icon-cache", None, "Don't attempt to run gtk-update-icon-cache")]
def __init__ (self, *args):
self.without_gettext = False
self.without_icon_cache = False
Distribution.__init__(self, *args)
class BuildData(build):
def run (self):
build.run (self)
if not self.distribution.without_gettext:
# Build the translations
for po in glob.glob (os.path.join (PO_DIR, '*.po')):
lang = os.path.basename(po[:-3])
mo = os.path.join(MO_DIR, lang, 'LC_MESSAGES', 'terminator.mo')
directory = os.path.dirname(mo)
if not os.path.exists(directory):
info('creating %s' % directory)
os.makedirs(directory)
if newer(po, mo):
info('compiling %s -> %s' % (po, mo))
try:
rc = subprocess.call(['msgfmt', '-o', mo, po])
if rc != 0:
raise Warning, "msgfmt returned %d" % rc
except Exception, e:
error("Building gettext files failed. Ensure you have gettext installed. Alternatively, try setup.py --without-gettext [build|install]")
error("Error: %s" % str(e))
sys.exit(1)
TOP_BUILDDIR='.'
INTLTOOL_MERGE='intltool-merge'
desktop_in='data/terminator.desktop.in'
desktop_data='data/terminator.desktop'
rc = os.system ("C_ALL=C " + INTLTOOL_MERGE + " -d -u -c " + TOP_BUILDDIR +
"/po/.intltool-merge-cache " + TOP_BUILDDIR + "/po " +
desktop_in + " " + desktop_data)
if rc != 0:
# run the desktop_in through a command to strip the "_" characters
with open(desktop_in) as file_in, open(desktop_data, 'w') as file_data:
[file_data.write(line.lstrip('_')) for line in file_in]
appdata_in='data/terminator.appdata.xml.in'
appdata_data='data/terminator.appdata.xml'
rc = os.system ("C_ALL=C " + INTLTOOL_MERGE + " -x -u -c " + TOP_BUILDDIR +
"/po/.intltool-merge-cache " + TOP_BUILDDIR + "/po " +
appdata_in + " " + appdata_data)
if rc != 0:
# run the appdata_in through a command to strip the "_" characters
with open(appdata_in) as file_in, open(appdata_data, 'w') as file_data:
[file_data.write(line.replace('<_','<').replace('</_','</')) for line in file_in]
class Uninstall(Command):
description = "Attempt an uninstall from an install --record file"
user_options = [('manifest=', None, 'Installation record filename')]
def initialize_options(self):
self.manifest = None
def finalize_options(self):
pass
def get_command_name(self):
return 'uninstall'
def run(self):
f = None
self.ensure_filename('manifest')
try:
try:
if not self.manifest:
raise DistutilsFileError("Pass manifest with --manifest=file")
f = open(self.manifest)
files = [file.strip() for file in f]
except IOError, e:
raise DistutilsFileError("unable to open install manifest: %s", str(e))
finally:
if f:
f.close()
for file in files:
if os.path.isfile(file) or os.path.islink(file):
info("removing %s" % repr(file))
if not self.dry_run:
try:
os.unlink(file)
except OSError, e:
warn("could not delete: %s" % repr(file))
elif not os.path.isdir(file):
info("skipping %s" % repr(file))
dirs = set()
for file in reversed(sorted(files)):
dir = os.path.dirname(file)
if dir not in dirs and os.path.isdir(dir) and len(os.listdir(dir)) == 0:
dirs.add(dir)
# Only nuke empty Python library directories, else we could destroy
# e.g. locale directories we're the only app with a .mo installed for.
if dir.find("site-packages/") > 0:
info("removing %s" % repr(dir))
if not self.dry_run:
try:
os.rmdir(dir)
except OSError, e:
warn("could not remove directory: %s" % str(e))
else:
info("skipping empty directory %s" % repr(dir))
class InstallData(install_data):
def run (self):
self.data_files.extend (self._find_css_files ())
self.data_files.extend (self._find_mo_files ())
install_data.run (self)
if not self.distribution.without_icon_cache:
self._update_icon_cache ()
# We should do this on uninstall too
def _update_icon_cache(self):
info("running gtk-update-icon-cache")
try:
subprocess.call(["gtk-update-icon-cache", "-q", "-f", "-t", os.path.join(self.install_dir, "share/icons/hicolor")])
except Exception, e:
warn("updating the GTK icon cache failed: %s" % str(e))
def _find_mo_files (self):
data_files = []
if not self.distribution.without_gettext:
for mo in glob.glob (os.path.join (MO_DIR, '*', 'LC_MESSAGES', 'terminator.mo')):
lang = os.path.basename(os.path.dirname(os.path.dirname(mo)))
dest = os.path.join('share', 'locale', lang, 'LC_MESSAGES')
data_files.append((dest, [mo]))
return data_files
def _find_css_files (self):
data_files = []
for css_dir in glob.glob (os.path.join (CSS_DIR, '*')):
srce = glob.glob (os.path.join(css_dir, 'gtk-3.0', 'apps', '*.css'))
dest = os.path.join('share', 'terminator', css_dir, 'gtk-3.0', 'apps')
data_files.append((dest, srce))
return data_files
class Test(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
import sys
errno = subprocess.call(['bash', 'run_tests'])
raise SystemExit(errno)
if platform.system() in ['FreeBSD', 'OpenBSD']:
man_dir = 'man'
else:
man_dir = 'share/man'
setup(name=APP_NAME,
version=APP_VERSION,
description='Terminator, the robot future of terminals',
author='Chris Jones',
author_email='cmsj@tenshu.net',
url='https://gnometerminator.blogspot.com/p/introduction.html',
license='GNU GPL v2',
scripts=['terminator', 'remotinator'],
data_files=[
('bin', ['terminator.wrapper']),
('share/appdata', ['data/terminator.appdata.xml']),
('share/applications', ['data/terminator.desktop']),
(os.path.join(man_dir, 'man1'), ['doc/terminator.1']),
(os.path.join(man_dir, 'man5'), ['doc/terminator_config.5']),
('share/pixmaps', ['data/icons/hicolor/48x48/apps/terminator.png']),
('share/icons/hicolor/scalable/apps', glob.glob('data/icons/hicolor/scalable/apps/*.svg')),
('share/icons/hicolor/16x16/apps', glob.glob('data/icons/hicolor/16x16/apps/*.png')),
('share/icons/hicolor/22x22/apps', glob.glob('data/icons/hicolor/22x22/apps/*.png')),
('share/icons/hicolor/24x24/apps', glob.glob('data/icons/hicolor/24x24/apps/*.png')),
('share/icons/hicolor/32x32/apps', glob.glob('data/icons/hicolor/32x32/apps/*.png')),
('share/icons/hicolor/48x48/apps', glob.glob('data/icons/hicolor/48x48/apps/*.png')),
('share/icons/hicolor/16x16/actions', glob.glob('data/icons/hicolor/16x16/actions/*.png')),
('share/icons/hicolor/16x16/status', glob.glob('data/icons/hicolor/16x16/status/*.png')),
('share/icons/HighContrast/scalable/apps', glob.glob('data/icons/HighContrast/scalable/apps/*.svg')),
('share/icons/HighContrast/16x16/apps', glob.glob('data/icons/HighContrast/16x16/apps/*.png')),
('share/icons/HighContrast/22x22/apps', glob.glob('data/icons/HighContrast/22x22/apps/*.png')),
('share/icons/HighContrast/24x24/apps', glob.glob('data/icons/HighContrast/24x24/apps/*.png')),
('share/icons/HighContrast/32x32/apps', glob.glob('data/icons/HighContrast/32x32/apps/*.png')),
('share/icons/HighContrast/48x48/apps', glob.glob('data/icons/HighContrast/48x48/apps/*.png')),
('share/icons/HighContrast/16x16/actions', glob.glob('data/icons/HighContrast/16x16/actions/*.png')),
('share/icons/HighContrast/16x16/status', glob.glob('data/icons/HighContrast/16x16/status/*.png')),
],
packages=['terminatorlib', 'terminatorlib.configobj',
'terminatorlib.plugins'],
package_data={'terminatorlib': ['preferences.glade', 'layoutlauncher.glade']},
cmdclass={'build': BuildData, 'install_data': InstallData, 'uninstall': Uninstall, 'test':Test},
distclass=TerminatorDist
)
|
albfan/terminator
|
setup.py
|
Python
|
gpl-2.0
| 9,520
| 0.013655
|
# ----------------------------------------------------------------
# Copyright 2016 Cisco Systems
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------
#
# Copied from the bigmuddy collector
#
from google.protobuf.message import Message
from google.protobuf.descriptor import FieldDescriptor
DECODE_FN_MAP = {
FieldDescriptor.TYPE_DOUBLE: float,
FieldDescriptor.TYPE_FLOAT: float,
FieldDescriptor.TYPE_INT32: int,
FieldDescriptor.TYPE_INT64: long,
FieldDescriptor.TYPE_UINT32: int,
FieldDescriptor.TYPE_UINT64: long,
FieldDescriptor.TYPE_SINT32: int,
FieldDescriptor.TYPE_SINT64: long,
FieldDescriptor.TYPE_FIXED32: int,
FieldDescriptor.TYPE_FIXED64: long,
FieldDescriptor.TYPE_SFIXED32: int,
FieldDescriptor.TYPE_SFIXED64: long,
FieldDescriptor.TYPE_BOOL: bool,
FieldDescriptor.TYPE_STRING: unicode,
FieldDescriptor.TYPE_BYTES: lambda b: bytes_to_string(b),
FieldDescriptor.TYPE_ENUM: int,
}
def bytes_to_string (bytes):
"""
Convert a byte array into a string aa:bb:cc
"""
return ":".join(["{:02x}".format(int(ord(c))) for c in bytes])
def field_type_to_fn(msg, field):
if field.type == FieldDescriptor.TYPE_MESSAGE:
# For embedded messages recursively call this function. If it is
# a repeated field return a list
result = lambda msg: proto_to_dict(msg)
elif field.type in DECODE_FN_MAP:
result = DECODE_FN_MAP[field.type]
else:
raise TypeError("Field %s.%s has unrecognised type id %d" % (
msg.__class__.__name__, field.name, field.type))
return result
def proto_to_dict(msg):
result_dict = {}
extensions = {}
for field, value in msg.ListFields():
conversion_fn = field_type_to_fn(msg, field)
# Skip extensions
if not field.is_extension:
# Repeated fields result in an array, otherwise just call the
# conversion function to store the value
if field.label == FieldDescriptor.LABEL_REPEATED:
result_dict[field.name] = [conversion_fn(v) for v in value]
else:
result_dict[field.name] = conversion_fn(value)
return result_dict
|
abhikeshav/ydk-py
|
core/ydk/mdt/proto_to_dict.py
|
Python
|
apache-2.0
| 2,788
| 0.001793
|
import subprocess
import math
def launch(script_name, num_partitions, num_machines, pos, coordinator, machine_name, debug=False):
if num_machines > num_partitions:
raise RuntimeError("Need more partitions than machine")
machine_size = int(math.ceil(float(num_partitions) / float(num_machines)))
start = pos * machine_size
end = min((pos+1)*machine_size, num_partitions)
processes = []
#launch content server processes
for i in range(start, end):
print("Starting processs #" + str(i))
if debug:
p = subprocess.Popen(["python3-dbg", script_name, coordinator, machine_name, str(i)])
else:
p = subprocess.Popen([script_name, coordinator, machine_name, str(i)])
processes.append(p)
while processes:
p = processes.pop()
p.wait()
|
kaimast/inanutshell
|
linear/common/launch.py
|
Python
|
bsd-2-clause
| 842
| 0.008314
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# frc_rekt documentation build configuration file, created by
# sphinx-quickstart on Wed Apr 12 00:19:47 2017.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = ['sphinx.ext.autodoc',
'sphinx.ext.todo',
'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = 'frc_rekt'
copyright = '2017, Author'
author = 'Author'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = ''
# The full version, including alpha/beta/rc tags.
release = ''
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = 'en'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This patterns also effect to html_static_path and html_extra_path
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# If true, `todo` and `todoList` produce output, else they produce nothing.
todo_include_todos = True
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
# html_theme_options = {}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# -- Options for HTMLHelp output ------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'frc_rektdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'frc_rekt.tex', 'frc\\_rekt Documentation',
'Author', 'manual'),
]
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'frc_rekt', 'frc_rekt Documentation',
[author], 1)
]
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'frc_rekt', 'frc_rekt Documentation',
author, 'frc_rekt', 'One line description of project.',
'Miscellaneous'),
]
# -- Options for Epub output ----------------------------------------------
# Bibliographic Dublin Core info.
epub_title = project
epub_author = author
epub_publisher = author
epub_copyright = copyright
# The unique identifier of the text. This can be a ISBN number
# or the project homepage.
#
# epub_identifier = ''
# A unique identification for the text.
#
# epub_uid = ''
# A list of files that should not be packed into the epub file.
epub_exclude_files = ['search.html']
|
jaustinpage/frc_rekt
|
docs/conf.py
|
Python
|
mit
| 5,302
| 0.000943
|
# Lint as: python2, python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TensorFlow Lite Python metrics helper TFLiteMetrics check."""
import gc
import os
import tempfile
import time
from unittest import mock
from absl.testing import parameterized
import numpy as np
import tensorflow as tf
from tensorflow.core.framework import graph_pb2
from tensorflow.lite.python import lite
from tensorflow.lite.python import metrics_nonportable as metrics
from tensorflow.lite.python.convert import ConverterError
from tensorflow.lite.python.convert import register_custom_opdefs
from tensorflow.lite.python.metrics_wrapper import converter_error_data_pb2
from tensorflow.python.client import session
from tensorflow.python.eager import monitoring
from tensorflow.python.framework import convert_to_constants
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.framework.importer import import_graph_def
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import string_ops
from tensorflow.python.ops.ragged import ragged_tensor
from tensorflow.python.platform import resource_loader
from tensorflow.python.platform import test
from tensorflow.python.saved_model import saved_model
from tensorflow.python.training.tracking import tracking
class MetricsNonportableTest(test_util.TensorFlowTestCase):
def test_TFLiteMetrics_creation_no_arg_success(self):
metrics.TFLiteMetrics()
def test_TFLiteMetrics_creation_arg_success(self):
metrics.TFLiteMetrics('hash', '/path/to/model')
def test_TFLiteMetrics_creation_fails_with_only_hash(self):
with self.assertRaises(ValueError):
metrics.TFLiteMetrics(model_hash='hash')
def test_TFLiteMetrics_creation_fail2_with_only_model_path(self):
with self.assertRaises(ValueError):
metrics.TFLiteMetrics(model_path='/path/to/model')
def test_debugger_creation_counter_increase_multiple_same_topic_success(self):
try:
stub = metrics.TFLiteMetrics()
stub.increase_counter_debugger_creation()
self.assertEqual(metrics._counter_debugger_creation.get_cell().value(), 1)
stub2 = metrics.TFLiteMetrics()
stub2.increase_counter_debugger_creation()
self.assertEqual(metrics._counter_debugger_creation.get_cell().value(), 2)
del stub
gc.collect()
stub2.increase_counter_debugger_creation()
self.assertEqual(metrics._counter_debugger_creation.get_cell().value(), 3)
except:
raise Exception('No exception should be raised.')
def test_interpreter_creation_counter_increase_success(self):
stub = metrics.TFLiteMetrics()
stub.increase_counter_interpreter_creation()
self.assertEqual(
metrics._counter_interpreter_creation.get_cell('python').value(), 1)
def test_converter_attempt_counter_increase_success(self):
stub = metrics.TFLiteMetrics()
stub.increase_counter_converter_attempt()
self.assertEqual(metrics._counter_conversion_attempt.get_cell().value(), 1)
def test_converter_success_counter_increase_success(self):
stub = metrics.TFLiteMetrics()
stub.increase_counter_converter_success()
self.assertEqual(metrics._counter_conversion_success.get_cell().value(), 1)
def test_converter_params_set_success(self):
stub = metrics.TFLiteMetrics()
stub.set_converter_param('name', 'value')
self.assertEqual(
metrics._gauge_conversion_params.get_cell('name').value(), 'value')
def test_converter_params_multiple_set_success(self):
stub = metrics.TFLiteMetrics()
stub.set_converter_param('name', 'value')
stub.set_converter_param('name', 'value1')
self.assertEqual(
metrics._gauge_conversion_params.get_cell('name').value(), 'value1')
def test_converter_params_multiple_label_success(self):
stub = metrics.TFLiteMetrics()
stub.set_converter_param('name1', 'value1')
stub.set_converter_param('name2', 'value2')
self.assertEqual(
metrics._gauge_conversion_params.get_cell('name1').value(), 'value1')
self.assertEqual(
metrics._gauge_conversion_params.get_cell('name2').value(), 'value2')
def test_converter_params_set_latency(self):
stub = metrics.TFLiteMetrics()
stub.set_converter_latency(34566)
self.assertEqual(metrics._gauge_conversion_latency.get_cell().value(),
34566)
class ConverterMetricsTest(test_util.TensorFlowTestCase):
"""Testing conversion metrics."""
def _constructGraphDef(self):
with ops.Graph().as_default():
in_tensor = array_ops.placeholder(
shape=[None, 16, 16, 3], dtype=dtypes.float32, name='in_tensor')
math_ops.add(in_tensor, in_tensor, name='add')
sess = session.Session()
return (
convert_to_constants.convert_variables_to_constants_from_session_graph(
sess, sess.graph_def, ['add']))
def test_conversion_from_constructor_success(self):
frozen_graph_def = self._constructGraphDef()
# Check metrics when conversion successed.
converter = lite.TFLiteConverter(frozen_graph_def, None, None,
[('in_tensor', [2, 16, 16, 3])], ['add'])
mock_metrics = mock.create_autospec(
metrics.TFLiteConverterMetrics, instance=True)
converter._tflite_metrics = mock_metrics
tflite_model = converter.convert()
self.assertIsNotNone(tflite_model)
mock_metrics.assert_has_calls([
mock.call.increase_counter_converter_attempt(),
mock.call.increase_counter_converter_success(),
mock.call.export_metrics(),
mock.call.set_converter_param('input_format', '1'),
mock.call.set_converter_param('enable_mlir_converter', 'True'),
mock.call.set_converter_param('allow_custom_ops', 'False'),
mock.call.set_converter_param('api_version', '1'),
], any_order=True) # pyformat: disable
def test_conversion_from_constructor_fail(self):
frozen_graph_def = self._constructGraphDef()
# Check metrics when conversion failed.
converter = lite.TFLiteConverter(frozen_graph_def, None, None,
[('wrong_tensor', [2, 16, 16, 3])],
['add'])
mock_metrics = mock.create_autospec(
metrics.TFLiteConverterMetrics, instance=True)
converter._tflite_metrics = mock_metrics
with self.assertRaises(ConverterError):
converter.convert()
mock_metrics.assert_has_calls([
mock.call.increase_counter_converter_attempt(),
mock.call.set_converter_param('output_format', '2'),
mock.call.set_converter_param('select_user_tf_ops', 'None'),
mock.call.set_converter_param('post_training_quantize', 'False'),
], any_order=True) # pyformat: disable
mock_metrics.increase_counter_converter_success.assert_not_called()
def _getIntegerQuantizeModel(self):
np.random.seed(0)
root = tracking.AutoTrackable()
@tf.function(
input_signature=[tf.TensorSpec(shape=[1, 5, 5, 3], dtype=tf.float32)])
def func(inp):
conv = tf.nn.conv2d(
inp, tf.ones([3, 3, 3, 16]), strides=[1, 1, 1, 1], padding='SAME')
output = tf.nn.relu(conv, name='output')
return output
def calibration_gen():
for _ in range(5):
yield [np.random.uniform(-1, 1, size=(1, 5, 5, 3)).astype(np.float32)]
root.f = func
to_save = root.f.get_concrete_function()
return (root, to_save, calibration_gen)
def test_conversion_from_frozen_graph_v2(self):
model, func, calibration_gen = self._getIntegerQuantizeModel()
quantized_converter = lite.TFLiteConverterV2.from_concrete_functions([func],
model)
mock_metrics = mock.create_autospec(
metrics.TFLiteConverterMetrics, instance=True)
quantized_converter._tflite_metrics = mock_metrics
quantized_converter.optimizations = [lite.Optimize.DEFAULT]
quantized_converter.representative_dataset = calibration_gen
quantized_tflite_model = quantized_converter.convert()
self.assertIsNotNone(quantized_tflite_model)
mock_metrics.assert_has_calls([
mock.call.increase_counter_converter_attempt(),
mock.call.increase_counter_converter_success(),
mock.call.set_converter_param(
'optimization_post_training_integer_quantize', 'True'),
mock.call.set_converter_param('inference_type', 'tf.int8'),
mock.call.set_converter_param('select_user_tf_ops', 'None'),
mock.call.set_converter_param('activations_type', 'tf.int8'),
], any_order=True) # pyformat: disable
def test_conversion_from_keras_v2(self):
x = [-1, 0, 1, 2, 3, 4]
y = [-3, -1, 1, 3, 5, 7]
model = tf.keras.models.Sequential(
[tf.keras.layers.Dense(units=1, input_shape=[1])])
model.compile(optimizer='sgd', loss='mean_squared_error')
model.fit(x, y, epochs=1)
converter = lite.TFLiteConverterV2.from_keras_model(model)
mock_metrics = mock.create_autospec(
metrics.TFLiteConverterMetrics, instance=True)
converter._tflite_metrics = mock_metrics
converter.convert()
mock_metrics.assert_has_calls([
mock.call.increase_counter_converter_attempt(),
mock.call.increase_counter_converter_success(),
mock.call.export_metrics(),
mock.call.set_converter_param('inference_type', 'tf.float32'),
mock.call.set_converter_param('target_ops', 'TFLITE_BUILTINS'),
mock.call.set_converter_param('optimization_default', 'False'),
], any_order=True) # pyformat: disable
def _createV1SavedModel(self, shape):
"""Create a simple SavedModel."""
saved_model_dir = os.path.join(self.get_temp_dir(), 'simple_savedmodel')
with tf.Graph().as_default():
with tf.compat.v1.Session() as sess:
in_tensor_1 = tf.compat.v1.placeholder(
shape=shape, dtype=tf.float32, name='inputB')
in_tensor_2 = tf.compat.v1.placeholder(
shape=shape, dtype=tf.float32, name='inputA')
variable_node = tf.Variable(1.0, name='variable_node')
out_tensor = in_tensor_1 + in_tensor_2 * variable_node
inputs = {'x': in_tensor_1, 'y': in_tensor_2}
outputs = {'z': out_tensor}
sess.run(tf.compat.v1.variables_initializer([variable_node]))
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
return saved_model_dir
def test_conversion_from_saved_model(self):
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteSavedModelConverter(saved_model_dir, set(['serve']),
['serving_default'])
converter.experimental_new_converter = True
mock_metrics = mock.create_autospec(
metrics.TFLiteConverterMetrics, instance=True)
converter._tflite_metrics = mock_metrics
time.process_time = mock.Mock(side_effect=np.arange(1, 1000, 2).tolist())
converter.convert()
mock_metrics.assert_has_calls([
mock.call.increase_counter_converter_attempt(),
mock.call.increase_counter_converter_success(),
mock.call.set_converter_latency(2000),
mock.call.export_metrics(),
mock.call.set_converter_param('enable_mlir_converter', 'True'),
], any_order=True) # pyformat: disable
def test_conversion_from_saved_model_v2(self):
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
converter.experimental_new_converter = False
mock_metrics = mock.create_autospec(
metrics.TFLiteConverterMetrics, instance=True)
converter._tflite_metrics = mock_metrics
converter.convert()
mock_metrics.assert_has_calls([
mock.call.increase_counter_converter_attempt(),
mock.call.increase_counter_converter_success(),
mock.call.export_metrics(),
mock.call.set_converter_param('enable_mlir_converter', 'False'),
mock.call.set_converter_param('api_version', '2'),
], any_order=True) # pyformat: disable
def disable_converter_counter_metrics(self, tflite_metrics):
def empty_func():
pass
tflite_metrics.increase_counter_converter_attempt = empty_func
tflite_metrics.increase_counter_converter_success = empty_func
def test_export_at_conversion_done(self):
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
tflite_metrics = converter._tflite_metrics
mock_exporter = mock.MagicMock()
tflite_metrics._metrics_exporter = mock_exporter
self.disable_converter_counter_metrics(tflite_metrics)
mock_exporter.ExportMetrics.assert_not_called()
converter.convert()
mock_exporter.ExportMetrics.assert_called_once()
tflite_metrics.__del__()
mock_exporter.ExportMetrics.assert_called_once()
def test_export_at_exit(self):
saved_model_dir = self._createV1SavedModel(shape=[1, 16, 16, 3])
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
tflite_metrics = converter._tflite_metrics
mock_exporter = mock.MagicMock()
tflite_metrics._metrics_exporter = mock_exporter
self.disable_converter_counter_metrics(tflite_metrics)
mock_exporter.ExportMetrics.assert_not_called()
tflite_metrics.__del__()
mock_exporter.ExportMetrics.assert_called_once()
def mock_ngrams(data, width, axis=-1, string_separator=' ', name=None):
"""This mock Ngrams lack the width attr, causing conversion to fail."""
experimental_implements = [
'name: "tftext:Ngrams"',
'attr { key: "axis" value { i: %d } }' % axis,
'attr { key: "reduction_type" value { s: "STRING_JOIN" } }',
'attr { key: "string_separator" value { s: "%s" } }' % string_separator,
]
experimental_implements = ' '.join(experimental_implements)
@tf.function(experimental_implements=experimental_implements)
def func(data):
with ops.name_scope(name, 'NGrams', [data, width]):
data = ragged_tensor.convert_to_tensor_or_ragged_tensor(data, name='data')
slices = []
for start in range(width):
stop = None if start - width + 1 == 0 else start - width + 1
if axis >= 0:
idx = [slice(None)] * axis + [slice(start, stop)]
else:
idx = [Ellipsis, slice(start, stop)] + [slice(None)] * (-axis - 1)
slices.append(data[idx])
# Stack the slices.
stack_axis = axis + 1 if axis >= 0 else axis
windowed_data = array_ops.stack(slices, stack_axis)
return string_ops.reduce_join(
windowed_data, axis=axis, separator=string_separator)
return func(data)
class ConverterErrorMetricTest(test_util.TensorFlowTestCase,
parameterized.TestCase):
"""Testing conversion error metric."""
def setUp(self):
super(ConverterErrorMetricTest, self).setUp()
# Mock metrics instance except errors so other test cases are not affected.
mock_attempt = mock.create_autospec(monitoring.Counter, instance=True)
self._counter_conversion_attempt = metrics._counter_conversion_attempt
metrics._counter_conversion_attempt = mock_attempt
mock_success = mock.create_autospec(monitoring.Counter, instance=True)
self._counter_conversion_success = metrics._counter_conversion_success
metrics._counter_conversion_success = mock_success
mock_params = mock.create_autospec(monitoring.StringGauge, instance=True)
self._gauge_conversion_params = metrics._gauge_conversion_params
metrics._gauge_conversion_params = mock_params
def tearDown(self):
super(ConverterErrorMetricTest, self).tearDown()
# # Restore metrics instances.
metrics._counter_conversion_attempt = self._counter_conversion_attempt
metrics._counter_conversion_success = self._counter_conversion_success
metrics._gauge_conversion_params = self._gauge_conversion_params
def convert_and_check_location_info(self,
converter,
expected_type,
expected_sources=None):
# The custom attribute of ConverterError can't be accessed with
# assertRaises so use try-catch block instead.
try:
tflite_model = converter.convert()
self.assertIsNone(tflite_model)
except ConverterError as converter_error:
# pylint: disable=g-assert-in-except
self.assertLen(converter_error.errors, 1)
location = converter_error.errors[0].location
self.assertEqual(location.type, expected_type)
if expected_sources:
debug_string = str(location)
for source in expected_sources:
self.assertIn(source, debug_string)
# pylint: enable=g-assert-in-except
def test_failure_at_PrepareCompositeFunctionsPass(self):
class NgramsLayer(tf.keras.layers.Layer):
def call(self, input_tensor, **kwargs):
return mock_ngrams(input_tensor, width=2, axis=-1, string_separator=' ')
# Registers a fake WhitespaceTokenizeWithOffsets so the TFText fusing logic
# is enable in MLIR side.
custom_opdefs_str = (
'name: \'WhitespaceTokenizeWithOffsets\' input_arg: {name: \'Input1\' '
'type: DT_FLOAT} input_arg: {name: \'Input2\' type: DT_FLOAT} '
'output_arg: {name: \'Output\' type: DT_FLOAT}')
register_custom_opdefs([custom_opdefs_str])
model = tf.keras.models.Sequential([NgramsLayer()])
model.predict(tf.constant(['test']))
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.allow_custom_ops = True
self.convert_and_check_location_info(
converter, converter_error_data_pb2.ConverterErrorData.UNKNOWNLOC)
exported_error = metrics._gauge_conversion_errors.get_cell(
'CONVERT_TF_TO_TFLITE_MODEL', 'PrepareCompositeFunctionsPass', '',
'UNKNOWN').value()
self.assertEqual(exported_error,
"\'width\' attribute is not set or not an integer\n")
def test_need_flex_ops(self):
def create_graph_with_custom_add(opname='CustomAdd'):
custom_opdefs_str = (
'name: \'' + opname +
'\' input_arg: {name: \'Input1\' type: DT_FLOAT} '
'input_arg: {name: \'Input2\' type: DT_FLOAT} output_arg: {name: '
'\'Output\' type: DT_FLOAT}')
# Create a graph that has one add op.
new_graph = graph_pb2.GraphDef()
with ops.Graph().as_default():
with session.Session() as sess:
in_tensor = array_ops.placeholder(
shape=[1, 16, 16, 3], dtype=dtypes.float32, name='input')
out_tensor = in_tensor + in_tensor
inputs = {'x': in_tensor}
outputs = {'z': out_tensor}
new_graph.CopyFrom(sess.graph_def)
# Rename Add op name to opname.
for node in new_graph.node:
if node.op.startswith('Add'):
node.op = opname
del node.attr['T']
# Register custom op defs to import modified graph def.
register_custom_opdefs([custom_opdefs_str])
return (new_graph, inputs, outputs)
new_graph, inputs, outputs = create_graph_with_custom_add()
# Import to load the custom opdef.
saved_model_dir = os.path.join(self.get_temp_dir(), 'model')
with ops.Graph().as_default():
with session.Session() as sess:
import_graph_def(new_graph, name='')
saved_model.simple_save(sess, saved_model_dir, inputs, outputs)
converter = lite.TFLiteConverterV2.from_saved_model(saved_model_dir)
self.convert_and_check_location_info(
converter,
converter_error_data_pb2.ConverterErrorData.NAMELOC,
expected_sources='add')
exported_error = metrics._gauge_conversion_errors.get_cell(
'CONVERT_TF_TO_TFLITE_MODEL', 'CONVERT_SAVED_MODEL', 'tf.CustomAdd',
'ERROR_NEEDS_CUSTOM_OPS').value()
self.assertEqual(
exported_error,
"\'tf.CustomAdd\' op is neither a custom op nor a flex op\n"
"Error code: ERROR_NEEDS_CUSTOM_OPS"
)
def test_unsupported_control_flow_v1(self):
filename = resource_loader.get_path_to_datafile(
'testdata/control_flow_v1_saved_model')
converter = lite.TFLiteConverterV2.from_saved_model(filename)
self.convert_and_check_location_info(
converter, converter_error_data_pb2.ConverterErrorData.UNKNOWNLOC)
exported_error = metrics._gauge_conversion_errors.get_cell(
'CONVERT_TF_TO_TFLITE_MODEL', 'CONVERT_SAVED_MODEL', '',
'ERROR_UNSUPPORTED_CONTROL_FLOW_V1').value()
self.assertEqual(
exported_error,
'Merge only has 4 inputs, while only merge nodes with two inputs '
'supported.\n\tFailed to functionalize Control Flow V1 ops. Consider '
'using Control Flow V2 ops instead. See https://www.tensorflow.org/'
'api_docs/python/tf/compat/v1/enable_control_flow_v2.')
def test_location_from_concrete_functions(self):
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 2, 3, 3], dtype=tf.complex64),
tf.TensorSpec(shape=[None, None, 1, 3, 3], dtype=tf.complex64),
])
def model(a, b):
return tf.add(a, b, name='add')
converter = tf.lite.TFLiteConverter.from_concrete_functions(
[model.get_concrete_function()], model)
self.convert_and_check_location_info(
converter,
converter_error_data_pb2.ConverterErrorData.CALLSITELOC,
expected_sources=[
'tensorflow/lite/python/metrics_nonportable_test.py',
])
def test_location_from_saved_model(self):
with tempfile.TemporaryDirectory() as tmp_dir:
class Adder(tf.Module):
@tf.function(input_signature=[
tf.TensorSpec(shape=[None, None, 2, 3, 3], dtype=tf.complex64),
tf.TensorSpec(shape=[None, None, 1, 3, 3], dtype=tf.complex64),
])
def serving_default(self, a, b):
return tf.add(a, b, name='add')
tf.saved_model.save(
Adder(),
tmp_dir,
options=tf.saved_model.SaveOptions(save_debug_info=True))
converter = tf.lite.TFLiteConverter.from_saved_model(tmp_dir)
self.convert_and_check_location_info(
converter,
converter_error_data_pb2.ConverterErrorData.CALLSITELOC,
expected_sources=[
'tensorflow/lite/python/metrics_nonportable_test.py',
])
@parameterized.named_parameters(
('_WithoutLoweringToSavedModel', False, None),
('_WithLoweringToSavedModel', True,
'tensorflow/lite/python/metrics_nonportable_test.py'))
def test_location_from_keras_model(self, lower_to_saved_model,
expected_source):
input_tensor1 = tf.keras.layers.Input(
shape=[None, None, 2, 3, 3], dtype=tf.complex64)
input_tensor2 = tf.keras.layers.Input(
shape=[None, None, 2, 3, 3], dtype=tf.complex64)
output = tf.keras.layers.Add()([input_tensor1, input_tensor2])
model = tf.keras.Model(
inputs=[input_tensor1, input_tensor2], outputs=output)
model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.experimental_lower_to_saved_model = lower_to_saved_model
# The location does not contain callsite to the current file.
self.convert_and_check_location_info(
converter,
converter_error_data_pb2.ConverterErrorData.CALLSITELOC,
expected_sources=[expected_source] if expected_source else None)
if __name__ == '__main__':
test.main()
|
frreiss/tensorflow-fred
|
tensorflow/lite/python/metrics_nonportable_test.py
|
Python
|
apache-2.0
| 24,385
| 0.004757
|
# Copyright 2015-2017 Cisco Systems, Inc.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from .linkstate import LinkState # noqa
from .node.local_router_id import LocalRouterID # noqa
from .node.name import NodeName # noqa
from .node.isisarea import ISISArea # noqa
from .node.sr_capabilities import SRCapabilities # noqa
from .node.sr_algorithm import SRAlgorithm # noqa
from .node.node_msd import NodeMSD # noqa
from .node.nodeflags import NodeFlags # noqa
from .node.opa_node_attr import OpaNodeAttr # noqa
from .node.sid_or_label import SIDorLabel # noqa
from .node.srlb import SRLB # noqa
from .link.admingroup import AdminGroup # noqa
from .link.remote_router_id import RemoteRouterID # noqa
from .link.max_bw import MaxBandwidth # noqa
from .link.max_rsv_bw import MaxResvBandwidth # noqa
from .link.unsrv_bw import UnrsvBandwidth # noqa
from .link.te_metric import TeMetric # noqa
from .link.link_name import LinkName # noqa
from .link.igp_metric import IGPMetric # noqa
from .link.adj_seg_id import AdjSegID # noqa
from .link.link_identifiers import LinkIdentifiers # noqa
from .link.link_msd import LinkMSD # noqa
from .link.lan_adj_sid import LanAdjSegID # noqa
from .link.srlg import SRLGList # noqa
from .link.mplsmask import MplsMask # noqa
from .link.protection_type import ProtectionType # noqa
from .link.opa_link_attr import OpaLinkAttr # noqa
from .link.peer_node_sid import PeerNodeSID # noqa
from .link.peer_adj_sid import PeerAdjSID # noqa
from .link.peer_set_sid import PeerSetSID # noqa
from .link.unidirect_link_delay import UnidirectLinkDelay # noqa
from .link.min_max_link_delay import MinMaxUnidirectLinkDelay # noqa
from .link.unidirect_delay_var import UnidirectDelayVar # noqa
from .link.unidirect_packet_loss import UnidirectPacketLoss # noqa
from .link.unidirect_residual_bw import UnidirectResidualBw # noqa
from .link.unidirect_avail_bw import UnidirectAvailBw # noqa
from .link.unidirect_bw_util import UnidirectBwUtil # noqa
from .prefix.prefix_metric import PrefixMetric # noqa
from .prefix.prefix_sid import PrefixSID # noqa
from .prefix.prefix_igp_attr import PrefixIGPAttr # noqa
from .prefix.src_router_id import SrcRouterID # noqa
from .prefix.igpflags import IGPFlags # noqa
from .prefix.igp_route_tag_list import IGPRouteTagList # noqa
from .prefix.ext_igp_route_tag_list import ExtIGPRouteTagList # noqa
from .prefix.ospf_forward_addr import OspfForwardingAddr # noqa
|
meidli/yabgp
|
yabgp/message/attribute/linkstate/__init__.py
|
Python
|
apache-2.0
| 3,042
| 0
|
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
This module contains classes that help to emulate xcodebuild behavior on top of
other build systems, such as make and ninja.
"""
import gyp.common
import os.path
import re
import shlex
import subprocess
import sys
from gyp.common import GypError
class XcodeSettings(object):
"""A class that understands the gyp 'xcode_settings' object."""
# Populated lazily by _SdkPath(). Shared by all XcodeSettings, so cached
# at class-level for efficiency.
_sdk_path_cache = {}
def __init__(self, spec):
self.spec = spec
self.isIOS = False
# Per-target 'xcode_settings' are pushed down into configs earlier by gyp.
# This means self.xcode_settings[config] always contains all settings
# for that config -- the per-target settings as well. Settings that are
# the same for all configs are implicitly per-target settings.
self.xcode_settings = {}
configs = spec['configurations']
for configname, config in configs.iteritems():
self.xcode_settings[configname] = config.get('xcode_settings', {})
if self.xcode_settings[configname].get('IPHONEOS_DEPLOYMENT_TARGET',
None):
self.isIOS = True
# If you need this, speak up at http://crbug.com/122592
conditional_keys = [key for key in self.xcode_settings[configname]
if key.endswith(']')]
if conditional_keys:
print 'Warning: Conditional keys not implemented, ignoring:', \
' '.join(conditional_keys)
for key in conditional_keys:
del self.xcode_settings[configname][key]
# This is only non-None temporarily during the execution of some methods.
self.configname = None
# Used by _AdjustLibrary to match .a and .dylib entries in libraries.
self.library_re = re.compile(r'^lib([^/]+)\.(a|dylib)$')
def _Settings(self):
assert self.configname
return self.xcode_settings[self.configname]
def _Test(self, test_key, cond_key, default):
return self._Settings().get(test_key, default) == cond_key
def _Appendf(self, lst, test_key, format_str, default=None):
if test_key in self._Settings():
lst.append(format_str % str(self._Settings()[test_key]))
elif default:
lst.append(format_str % str(default))
def _WarnUnimplemented(self, test_key):
if test_key in self._Settings():
print 'Warning: Ignoring not yet implemented key "%s".' % test_key
def _IsBundle(self):
return int(self.spec.get('mac_bundle', 0)) != 0
def GetFrameworkVersion(self):
"""Returns the framework version of the current target. Only valid for
bundles."""
assert self._IsBundle()
return self.GetPerTargetSetting('FRAMEWORK_VERSION', default='A')
def GetWrapperExtension(self):
"""Returns the bundle extension (.app, .framework, .plugin, etc). Only
valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('loadable_module', 'shared_library'):
default_wrapper_extension = {
'loadable_module': 'bundle',
'shared_library': 'framework',
}[self.spec['type']]
wrapper_extension = self.GetPerTargetSetting(
'WRAPPER_EXTENSION', default=default_wrapper_extension)
return '.' + self.spec.get('product_extension', wrapper_extension)
elif self.spec['type'] == 'executable':
return '.' + self.spec.get('product_extension', 'app')
else:
assert False, "Don't know extension for '%s', target '%s'" % (
self.spec['type'], self.spec['target_name'])
def GetProductName(self):
"""Returns PRODUCT_NAME."""
return self.spec.get('product_name', self.spec['target_name'])
def GetFullProductName(self):
"""Returns FULL_PRODUCT_NAME."""
if self._IsBundle():
return self.GetWrapperName()
else:
return self._GetStandaloneBinaryPath()
def GetWrapperName(self):
"""Returns the directory name of the bundle represented by this target.
Only valid for bundles."""
assert self._IsBundle()
return self.GetProductName() + self.GetWrapperExtension()
def GetBundleContentsFolderPath(self):
"""Returns the qualified path to the bundle's contents folder. E.g.
Chromium.app/Contents or Foo.bundle/Versions/A. Only valid for bundles."""
if self.isIOS:
return self.GetWrapperName()
assert self._IsBundle()
if self.spec['type'] == 'shared_library':
return os.path.join(
self.GetWrapperName(), 'Versions', self.GetFrameworkVersion())
else:
# loadable_modules have a 'Contents' folder like executables.
return os.path.join(self.GetWrapperName(), 'Contents')
def GetBundleResourceFolder(self):
"""Returns the qualified path to the bundle's resource folder. E.g.
Chromium.app/Contents/Resources. Only valid for bundles."""
assert self._IsBundle()
if self.isIOS:
return self.GetBundleContentsFolderPath()
return os.path.join(self.GetBundleContentsFolderPath(), 'Resources')
def GetBundlePlistPath(self):
"""Returns the qualified path to the bundle's plist file. E.g.
Chromium.app/Contents/Info.plist. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('executable', 'loadable_module'):
return os.path.join(self.GetBundleContentsFolderPath(), 'Info.plist')
else:
return os.path.join(self.GetBundleContentsFolderPath(),
'Resources', 'Info.plist')
def GetProductType(self):
"""Returns the PRODUCT_TYPE of this target."""
if self._IsBundle():
return {
'executable': 'com.apple.product-type.application',
'loadable_module': 'com.apple.product-type.bundle',
'shared_library': 'com.apple.product-type.framework',
}[self.spec['type']]
else:
return {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.apple.product-type.library.dynamic',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
}[self.spec['type']]
def GetMachOType(self):
"""Returns the MACH_O_TYPE of this target."""
# Weird, but matches Xcode.
if not self._IsBundle() and self.spec['type'] == 'executable':
return ''
return {
'executable': 'mh_execute',
'static_library': 'staticlib',
'shared_library': 'mh_dylib',
'loadable_module': 'mh_bundle',
}[self.spec['type']]
def _GetBundleBinaryPath(self):
"""Returns the name of the bundle binary of by this target.
E.g. Chromium.app/Contents/MacOS/Chromium. Only valid for bundles."""
assert self._IsBundle()
if self.spec['type'] in ('shared_library') or self.isIOS:
path = self.GetBundleContentsFolderPath()
elif self.spec['type'] in ('executable', 'loadable_module'):
path = os.path.join(self.GetBundleContentsFolderPath(), 'MacOS')
return os.path.join(path, self.GetExecutableName())
def _GetStandaloneExecutableSuffix(self):
if 'product_extension' in self.spec:
return '.' + self.spec['product_extension']
return {
'executable': '',
'static_library': '.a',
'shared_library': '.dylib',
'loadable_module': '.so',
}[self.spec['type']]
def _GetStandaloneExecutablePrefix(self):
return self.spec.get('product_prefix', {
'executable': '',
'static_library': 'lib',
'shared_library': 'lib',
# Non-bundled loadable_modules are called foo.so for some reason
# (that is, .so and no prefix) with the xcode build -- match that.
'loadable_module': '',
}[self.spec['type']])
def _GetStandaloneBinaryPath(self):
"""Returns the name of the non-bundle binary represented by this target.
E.g. hello_world. Only valid for non-bundles."""
assert not self._IsBundle()
assert self.spec['type'] in (
'executable', 'shared_library', 'static_library', 'loadable_module'), (
'Unexpected type %s' % self.spec['type'])
target = self.spec['target_name']
if self.spec['type'] == 'static_library':
if target[:3] == 'lib':
target = target[3:]
elif self.spec['type'] in ('loadable_module', 'shared_library'):
if target[:3] == 'lib':
target = target[3:]
target_prefix = self._GetStandaloneExecutablePrefix()
target = self.spec.get('product_name', target)
target_ext = self._GetStandaloneExecutableSuffix()
return target_prefix + target + target_ext
def GetExecutableName(self):
"""Returns the executable name of the bundle represented by this target.
E.g. Chromium."""
if self._IsBundle():
return self.spec.get('product_name', self.spec['target_name'])
else:
return self._GetStandaloneBinaryPath()
def GetExecutablePath(self):
"""Returns the directory name of the bundle represented by this target. E.g.
Chromium.app/Contents/MacOS/Chromium."""
if self._IsBundle():
return self._GetBundleBinaryPath()
else:
return self._GetStandaloneBinaryPath()
def GetActiveArchs(self, configname):
"""Returns the architectures this target should be built for."""
# TODO: Look at VALID_ARCHS, ONLY_ACTIVE_ARCH; possibly set
# CURRENT_ARCH / NATIVE_ARCH env vars?
return self.xcode_settings[configname].get('ARCHS', ['i386'])
def _GetSdkVersionInfoItem(self, sdk, infoitem):
job = subprocess.Popen(['xcodebuild', '-version', '-sdk', sdk, infoitem],
stdout=subprocess.PIPE)
out = job.communicate()[0]
if job.returncode != 0:
sys.stderr.write(out + '\n')
raise GypError('Error %d running xcodebuild' % job.returncode)
return out.rstrip('\n')
def _SdkPath(self):
sdk_root = self.GetPerTargetSetting('SDKROOT', default='macosx')
if sdk_root.startswith('/'):
return sdk_root
if sdk_root not in XcodeSettings._sdk_path_cache:
XcodeSettings._sdk_path_cache[sdk_root] = self._GetSdkVersionInfoItem(
sdk_root, 'Path')
return XcodeSettings._sdk_path_cache[sdk_root]
def _AppendPlatformVersionMinFlags(self, lst):
self._Appendf(lst, 'MACOSX_DEPLOYMENT_TARGET', '-mmacosx-version-min=%s')
if 'IPHONEOS_DEPLOYMENT_TARGET' in self._Settings():
# TODO: Implement this better?
sdk_path_basename = os.path.basename(self._SdkPath())
if sdk_path_basename.lower().startswith('iphonesimulator'):
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-mios-simulator-version-min=%s')
else:
self._Appendf(lst, 'IPHONEOS_DEPLOYMENT_TARGET',
'-miphoneos-version-min=%s')
def GetCflags(self, configname, arch=None):
"""Returns flags that need to be added to .c, .cc, .m, and .mm
compilations."""
# This functions (and the similar ones below) do not offer complete
# emulation of all xcode_settings keys. They're implemented on demand.
self.configname = configname
cflags = []
sdk_root = self._SdkPath()
if 'SDKROOT' in self._Settings():
cflags.append('-isysroot %s' % sdk_root)
if self._Test('CLANG_WARN_CONSTANT_CONVERSION', 'YES', default='NO'):
cflags.append('-Wconstant-conversion')
if self._Test('GCC_CHAR_IS_UNSIGNED_CHAR', 'YES', default='NO'):
cflags.append('-funsigned-char')
if self._Test('GCC_CW_ASM_SYNTAX', 'YES', default='YES'):
cflags.append('-fasm-blocks')
if 'GCC_DYNAMIC_NO_PIC' in self._Settings():
if self._Settings()['GCC_DYNAMIC_NO_PIC'] == 'YES':
cflags.append('-mdynamic-no-pic')
else:
pass
# TODO: In this case, it depends on the target. xcode passes
# mdynamic-no-pic by default for executable and possibly static lib
# according to mento
if self._Test('GCC_ENABLE_PASCAL_STRINGS', 'YES', default='YES'):
cflags.append('-mpascal-strings')
self._Appendf(cflags, 'GCC_OPTIMIZATION_LEVEL', '-O%s', default='s')
if self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES'):
dbg_format = self._Settings().get('DEBUG_INFORMATION_FORMAT', 'dwarf')
if dbg_format == 'dwarf':
cflags.append('-gdwarf-2')
elif dbg_format == 'stabs':
raise NotImplementedError('stabs debug format is not supported yet.')
elif dbg_format == 'dwarf-with-dsym':
cflags.append('-gdwarf-2')
else:
raise NotImplementedError('Unknown debug format %s' % dbg_format)
if self._Settings().get('GCC_STRICT_ALIASING') == 'YES':
cflags.append('-fstrict-aliasing')
elif self._Settings().get('GCC_STRICT_ALIASING') == 'NO':
cflags.append('-fno-strict-aliasing')
if self._Test('GCC_SYMBOLS_PRIVATE_EXTERN', 'YES', default='NO'):
cflags.append('-fvisibility=hidden')
if self._Test('GCC_TREAT_WARNINGS_AS_ERRORS', 'YES', default='NO'):
cflags.append('-Werror')
if self._Test('GCC_WARN_ABOUT_MISSING_NEWLINE', 'YES', default='NO'):
cflags.append('-Wnewline-eof')
self._AppendPlatformVersionMinFlags(cflags)
# TODO:
if self._Test('COPY_PHASE_STRIP', 'YES', default='NO'):
self._WarnUnimplemented('COPY_PHASE_STRIP')
self._WarnUnimplemented('GCC_DEBUGGING_SYMBOLS')
self._WarnUnimplemented('GCC_ENABLE_OBJC_EXCEPTIONS')
# TODO: This is exported correctly, but assigning to it is not supported.
self._WarnUnimplemented('MACH_O_TYPE')
self._WarnUnimplemented('PRODUCT_TYPE')
if arch is not None:
archs = [arch]
else:
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
cflags.append('-arch ' + archs[0])
if archs[0] in ('i386', 'x86_64'):
if self._Test('GCC_ENABLE_SSE3_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse3')
if self._Test('GCC_ENABLE_SUPPLEMENTAL_SSE3_INSTRUCTIONS', 'YES',
default='NO'):
cflags.append('-mssse3') # Note 3rd 's'.
if self._Test('GCC_ENABLE_SSE41_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.1')
if self._Test('GCC_ENABLE_SSE42_EXTENSIONS', 'YES', default='NO'):
cflags.append('-msse4.2')
cflags += self._Settings().get('WARNING_CFLAGS', [])
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
cflags.append('-F' + directory.replace('$(SDKROOT)', sdk_root))
self.configname = None
return cflags
def GetCflagsC(self, configname):
"""Returns flags that need to be added to .c, and .m compilations."""
self.configname = configname
cflags_c = []
if self._Settings().get('GCC_C_LANGUAGE_STANDARD', '') == 'ansi':
cflags_c.append('-ansi')
else:
self._Appendf(cflags_c, 'GCC_C_LANGUAGE_STANDARD', '-std=%s')
cflags_c += self._Settings().get('OTHER_CFLAGS', [])
self.configname = None
return cflags_c
def GetCflagsCC(self, configname):
"""Returns flags that need to be added to .cc, and .mm compilations."""
self.configname = configname
cflags_cc = []
clang_cxx_language_standard = self._Settings().get(
'CLANG_CXX_LANGUAGE_STANDARD')
# Note: Don't make c++0x to c++11 so that c++0x can be used with older
# clangs that don't understand c++11 yet (like Xcode 4.2's).
if clang_cxx_language_standard:
cflags_cc.append('-std=%s' % clang_cxx_language_standard)
self._Appendf(cflags_cc, 'CLANG_CXX_LIBRARY', '-stdlib=%s')
if self._Test('GCC_ENABLE_CPP_RTTI', 'NO', default='YES'):
cflags_cc.append('-fno-rtti')
if self._Test('GCC_ENABLE_CPP_EXCEPTIONS', 'NO', default='YES'):
cflags_cc.append('-fno-exceptions')
if self._Test('GCC_INLINES_ARE_PRIVATE_EXTERN', 'YES', default='NO'):
cflags_cc.append('-fvisibility-inlines-hidden')
if self._Test('GCC_THREADSAFE_STATICS', 'NO', default='YES'):
cflags_cc.append('-fno-threadsafe-statics')
# Note: This flag is a no-op for clang, it only has an effect for gcc.
if self._Test('GCC_WARN_ABOUT_INVALID_OFFSETOF_MACRO', 'NO', default='YES'):
cflags_cc.append('-Wno-invalid-offsetof')
other_ccflags = []
for flag in self._Settings().get('OTHER_CPLUSPLUSFLAGS', ['$(inherited)']):
# TODO: More general variable expansion. Missing in many other places too.
if flag in ('$inherited', '$(inherited)', '${inherited}'):
flag = '$OTHER_CFLAGS'
if flag in ('$OTHER_CFLAGS', '$(OTHER_CFLAGS)', '${OTHER_CFLAGS}'):
other_ccflags += self._Settings().get('OTHER_CFLAGS', [])
else:
other_ccflags.append(flag)
cflags_cc += other_ccflags
self.configname = None
return cflags_cc
def _AddObjectiveCGarbageCollectionFlags(self, flags):
gc_policy = self._Settings().get('GCC_ENABLE_OBJC_GC', 'unsupported')
if gc_policy == 'supported':
flags.append('-fobjc-gc')
elif gc_policy == 'required':
flags.append('-fobjc-gc-only')
def _AddObjectiveCARCFlags(self, flags):
if self._Test('CLANG_ENABLE_OBJC_ARC', 'YES', default='NO'):
flags.append('-fobjc-arc')
def GetCflagsObjC(self, configname):
"""Returns flags that need to be added to .m compilations."""
self.configname = configname
cflags_objc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objc)
self._AddObjectiveCARCFlags(cflags_objc)
self.configname = None
return cflags_objc
def GetCflagsObjCC(self, configname):
"""Returns flags that need to be added to .mm compilations."""
self.configname = configname
cflags_objcc = []
self._AddObjectiveCGarbageCollectionFlags(cflags_objcc)
self._AddObjectiveCARCFlags(cflags_objcc)
if self._Test('GCC_OBJC_CALL_CXX_CDTORS', 'YES', default='NO'):
cflags_objcc.append('-fobjc-call-cxx-cdtors')
self.configname = None
return cflags_objcc
def GetInstallNameBase(self):
"""Return DYLIB_INSTALL_NAME_BASE for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
install_base = self.GetPerTargetSetting(
'DYLIB_INSTALL_NAME_BASE',
default='/Library/Frameworks' if self._IsBundle() else '/usr/local/lib')
return install_base
def _StandardizePath(self, path):
"""Do :standardizepath processing for path."""
# I'm not quite sure what :standardizepath does. Just call normpath(),
# but don't let @executable_path/../foo collapse to foo.
if '/' in path:
prefix, rest = '', path
if path.startswith('@'):
prefix, rest = path.split('/', 1)
rest = os.path.normpath(rest) # :standardizepath
path = os.path.join(prefix, rest)
return path
def GetInstallName(self):
"""Return LD_DYLIB_INSTALL_NAME for this target."""
# Xcode sets this for shared_libraries, and for nonbundled loadable_modules.
if (self.spec['type'] != 'shared_library' and
(self.spec['type'] != 'loadable_module' or self._IsBundle())):
return None
default_install_name = \
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)/$(EXECUTABLE_PATH)'
install_name = self.GetPerTargetSetting(
'LD_DYLIB_INSTALL_NAME', default=default_install_name)
# Hardcode support for the variables used in chromium for now, to
# unblock people using the make build.
if '$' in install_name:
assert install_name in ('$(DYLIB_INSTALL_NAME_BASE:standardizepath)/'
'$(WRAPPER_NAME)/$(PRODUCT_NAME)', default_install_name), (
'Variables in LD_DYLIB_INSTALL_NAME are not generally supported '
'yet in target \'%s\' (got \'%s\')' %
(self.spec['target_name'], install_name))
install_name = install_name.replace(
'$(DYLIB_INSTALL_NAME_BASE:standardizepath)',
self._StandardizePath(self.GetInstallNameBase()))
if self._IsBundle():
# These are only valid for bundles, hence the |if|.
install_name = install_name.replace(
'$(WRAPPER_NAME)', self.GetWrapperName())
install_name = install_name.replace(
'$(PRODUCT_NAME)', self.GetProductName())
else:
assert '$(WRAPPER_NAME)' not in install_name
assert '$(PRODUCT_NAME)' not in install_name
install_name = install_name.replace(
'$(EXECUTABLE_PATH)', self.GetExecutablePath())
return install_name
def _MapLinkerFlagFilename(self, ldflag, gyp_to_build_path):
"""Checks if ldflag contains a filename and if so remaps it from
gyp-directory-relative to build-directory-relative."""
# This list is expanded on demand.
# They get matched as:
# -exported_symbols_list file
# -Wl,exported_symbols_list file
# -Wl,exported_symbols_list,file
LINKER_FILE = '(\S+)'
WORD = '\S+'
linker_flags = [
['-exported_symbols_list', LINKER_FILE], # Needed for NaCl.
['-unexported_symbols_list', LINKER_FILE],
['-reexported_symbols_list', LINKER_FILE],
['-sectcreate', WORD, WORD, LINKER_FILE], # Needed for remoting.
]
for flag_pattern in linker_flags:
regex = re.compile('(?:-Wl,)?' + '[ ,]'.join(flag_pattern))
m = regex.match(ldflag)
if m:
ldflag = ldflag[:m.start(1)] + gyp_to_build_path(m.group(1)) + \
ldflag[m.end(1):]
# Required for ffmpeg (no idea why they don't use LIBRARY_SEARCH_PATHS,
# TODO(thakis): Update ffmpeg.gyp):
if ldflag.startswith('-L'):
ldflag = '-L' + gyp_to_build_path(ldflag[len('-L'):])
return ldflag
def GetLdflags(self, configname, product_dir, gyp_to_build_path, arch=None):
"""Returns flags that need to be passed to the linker.
Args:
configname: The name of the configuration to get ld flags for.
product_dir: The directory where products such static and dynamic
libraries are placed. This is added to the library search path.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
self.configname = configname
ldflags = []
# The xcode build is relative to a gyp file's directory, and OTHER_LDFLAGS
# can contain entries that depend on this. Explicitly absolutify these.
for ldflag in self._Settings().get('OTHER_LDFLAGS', []):
ldflags.append(self._MapLinkerFlagFilename(ldflag, gyp_to_build_path))
if self._Test('DEAD_CODE_STRIPPING', 'YES', default='NO'):
ldflags.append('-Wl,-dead_strip')
if self._Test('PREBINDING', 'YES', default='NO'):
ldflags.append('-Wl,-prebind')
self._Appendf(
ldflags, 'DYLIB_COMPATIBILITY_VERSION', '-compatibility_version %s')
self._Appendf(
ldflags, 'DYLIB_CURRENT_VERSION', '-current_version %s')
self._AppendPlatformVersionMinFlags(ldflags)
if 'SDKROOT' in self._Settings():
ldflags.append('-isysroot ' + self._SdkPath())
for library_path in self._Settings().get('LIBRARY_SEARCH_PATHS', []):
ldflags.append('-L' + gyp_to_build_path(library_path))
if 'ORDER_FILE' in self._Settings():
ldflags.append('-Wl,-order_file ' +
'-Wl,' + gyp_to_build_path(
self._Settings()['ORDER_FILE']))
if arch is not None:
archs = [arch]
else:
archs = self._Settings().get('ARCHS', ['i386'])
if len(archs) != 1:
# TODO: Supporting fat binaries will be annoying.
self._WarnUnimplemented('ARCHS')
archs = ['i386']
ldflags.append('-arch ' + archs[0])
# Xcode adds the product directory by default.
ldflags.append('-L' + product_dir)
install_name = self.GetInstallName()
if install_name:
ldflags.append('-install_name ' + install_name.replace(' ', r'\ '))
for rpath in self._Settings().get('LD_RUNPATH_SEARCH_PATHS', []):
ldflags.append('-Wl,-rpath,' + rpath)
config = self.spec['configurations'][self.configname]
framework_dirs = config.get('mac_framework_dirs', [])
for directory in framework_dirs:
ldflags.append('-F' + directory.replace('$(SDKROOT)', self._SdkPath()))
self.configname = None
return ldflags
def GetLibtoolflags(self, configname):
"""Returns flags that need to be passed to the static linker.
Args:
configname: The name of the configuration to get ld flags for.
"""
self.configname = configname
libtoolflags = []
for libtoolflag in self._Settings().get('OTHER_LDFLAGS', []):
libtoolflags.append(libtoolflag)
# TODO(thakis): ARCHS?
self.configname = None
return libtoolflags
def GetPerTargetSettings(self):
"""Gets a list of all the per-target settings. This will only fetch keys
whose values are the same across all configurations."""
first_pass = True
result = {}
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = dict(self.xcode_settings[configname])
first_pass = False
else:
for key, value in self.xcode_settings[configname].iteritems():
if key not in result:
continue
elif result[key] != value:
del result[key]
return result
def GetPerTargetSetting(self, setting, default=None):
"""Tries to get xcode_settings.setting from spec. Assumes that the setting
has the same value in all configurations and throws otherwise."""
first_pass = True
result = None
for configname in sorted(self.xcode_settings.keys()):
if first_pass:
result = self.xcode_settings[configname].get(setting, None)
first_pass = False
else:
assert result == self.xcode_settings[configname].get(setting, None), (
"Expected per-target setting for '%s', got per-config setting "
"(target %s)" % (setting, spec['target_name']))
if result is None:
return default
return result
def _GetStripPostbuilds(self, configname, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to strip this target's binary. These should be run as postbuilds
before the actual postbuilds run."""
self.configname = configname
result = []
if (self._Test('DEPLOYMENT_POSTPROCESSING', 'YES', default='NO') and
self._Test('STRIP_INSTALLED_PRODUCT', 'YES', default='NO')):
default_strip_style = 'debugging'
if self._IsBundle():
default_strip_style = 'non-global'
elif self.spec['type'] == 'executable':
default_strip_style = 'all'
strip_style = self._Settings().get('STRIP_STYLE', default_strip_style)
strip_flags = {
'all': '',
'non-global': '-x',
'debugging': '-S',
}[strip_style]
explicit_strip_flags = self._Settings().get('STRIPFLAGS', '')
if explicit_strip_flags:
strip_flags += ' ' + _NormalizeEnvVarReferences(explicit_strip_flags)
if not quiet:
result.append('echo STRIP\\(%s\\)' % self.spec['target_name'])
result.append('strip %s %s' % (strip_flags, output_binary))
self.configname = None
return result
def _GetDebugInfoPostbuilds(self, configname, output, output_binary, quiet):
"""Returns a list of shell commands that contain the shell commands
neccessary to massage this target's debug information. These should be run
as postbuilds before the actual postbuilds run."""
self.configname = configname
# For static libraries, no dSYMs are created.
result = []
if (self._Test('GCC_GENERATE_DEBUGGING_SYMBOLS', 'YES', default='YES') and
self._Test(
'DEBUG_INFORMATION_FORMAT', 'dwarf-with-dsym', default='dwarf') and
self.spec['type'] != 'static_library'):
if not quiet:
result.append('echo DSYMUTIL\\(%s\\)' % self.spec['target_name'])
result.append('dsymutil %s -o %s' % (output_binary, output + '.dSYM'))
self.configname = None
return result
def GetTargetPostbuilds(self, configname, output, output_binary, quiet=False):
"""Returns a list of shell commands that contain the shell commands
to run as postbuilds for this target, before the actual postbuilds."""
# dSYMs need to build before stripping happens.
return (
self._GetDebugInfoPostbuilds(configname, output, output_binary, quiet) +
self._GetStripPostbuilds(configname, output_binary, quiet))
def _AdjustLibrary(self, library):
if library.endswith('.framework'):
l = '-framework ' + os.path.splitext(os.path.basename(library))[0]
else:
m = self.library_re.match(library)
if m:
l = '-l' + m.group(1)
else:
l = library
return l.replace('$(SDKROOT)', self._SdkPath())
def AdjustLibraries(self, libraries):
"""Transforms entries like 'Cocoa.framework' in libraries into entries like
'-framework Cocoa', 'libcrypto.dylib' into '-lcrypto', etc.
"""
libraries = [ self._AdjustLibrary(library) for library in libraries]
return libraries
class MacPrefixHeader(object):
"""A class that helps with emulating Xcode's GCC_PREFIX_HEADER feature.
This feature consists of several pieces:
* If GCC_PREFIX_HEADER is present, all compilations in that project get an
additional |-include path_to_prefix_header| cflag.
* If GCC_PRECOMPILE_PREFIX_HEADER is present too, then the prefix header is
instead compiled, and all other compilations in the project get an
additional |-include path_to_compiled_header| instead.
+ Compiled prefix headers have the extension gch. There is one gch file for
every language used in the project (c, cc, m, mm), since gch files for
different languages aren't compatible.
+ gch files themselves are built with the target's normal cflags, but they
obviously don't get the |-include| flag. Instead, they need a -x flag that
describes their language.
+ All o files in the target need to depend on the gch file, to make sure
it's built before any o file is built.
This class helps with some of these tasks, but it needs help from the build
system for writing dependencies to the gch files, for writing build commands
for the gch files, and for figuring out the location of the gch files.
"""
def __init__(self, xcode_settings,
gyp_path_to_build_path, gyp_path_to_build_output):
"""If xcode_settings is None, all methods on this class are no-ops.
Args:
gyp_path_to_build_path: A function that takes a gyp-relative path,
and returns a path relative to the build directory.
gyp_path_to_build_output: A function that takes a gyp-relative path and
a language code ('c', 'cc', 'm', or 'mm'), and that returns a path
to where the output of precompiling that path for that language
should be placed (without the trailing '.gch').
"""
# This doesn't support per-configuration prefix headers. Good enough
# for now.
self.header = None
self.compile_headers = False
if xcode_settings:
self.header = xcode_settings.GetPerTargetSetting('GCC_PREFIX_HEADER')
self.compile_headers = xcode_settings.GetPerTargetSetting(
'GCC_PRECOMPILE_PREFIX_HEADER', default='NO') != 'NO'
self.compiled_headers = {}
if self.header:
if self.compile_headers:
for lang in ['c', 'cc', 'm', 'mm']:
self.compiled_headers[lang] = gyp_path_to_build_output(
self.header, lang)
self.header = gyp_path_to_build_path(self.header)
def _CompiledHeader(self, lang, arch):
assert self.compile_headers
h = self.compiled_headers[lang]
if arch:
h += '.' + arch
return h
def GetInclude(self, lang, arch=None):
"""Gets the cflags to include the prefix header for language |lang|."""
if self.compile_headers and lang in self.compiled_headers:
return '-include %s' % self._CompiledHeader(lang, arch)
elif self.header:
return '-include %s' % self.header
else:
return ''
def _Gch(self, lang, arch):
"""Returns the actual file name of the prefix header for language |lang|."""
assert self.compile_headers
return self._CompiledHeader(lang, arch) + '.gch'
def GetObjDependencies(self, sources, objs, arch=None):
"""Given a list of source files and the corresponding object files, returns
a list of (source, object, gch) tuples, where |gch| is the build-directory
relative path to the gch file each object file depends on. |compilable[i]|
has to be the source file belonging to |objs[i]|."""
if not self.header or not self.compile_headers:
return []
result = []
for source, obj in zip(sources, objs):
ext = os.path.splitext(source)[1]
lang = {
'.c': 'c',
'.cpp': 'cc', '.cc': 'cc', '.cxx': 'cc',
'.m': 'm',
'.mm': 'mm',
}.get(ext, None)
if lang:
result.append((source, obj, self._Gch(lang, arch)))
return result
def GetPchBuildCommands(self, arch=None):
"""Returns [(path_to_gch, language_flag, language, header)].
|path_to_gch| and |header| are relative to the build directory.
"""
if not self.header or not self.compile_headers:
return []
return [
(self._Gch('c', arch), '-x c-header', 'c', self.header),
(self._Gch('cc', arch), '-x c++-header', 'cc', self.header),
(self._Gch('m', arch), '-x objective-c-header', 'm', self.header),
(self._Gch('mm', arch), '-x objective-c++-header', 'mm', self.header),
]
def MergeGlobalXcodeSettingsToSpec(global_dict, spec):
"""Merges the global xcode_settings dictionary into each configuration of the
target represented by spec. For keys that are both in the global and the local
xcode_settings dict, the local key gets precendence.
"""
# The xcode generator special-cases global xcode_settings and does something
# that amounts to merging in the global xcode_settings into each local
# xcode_settings dict.
global_xcode_settings = global_dict.get('xcode_settings', {})
for config in spec['configurations'].values():
if 'xcode_settings' in config:
new_settings = global_xcode_settings.copy()
new_settings.update(config['xcode_settings'])
config['xcode_settings'] = new_settings
def IsMacBundle(flavor, spec):
"""Returns if |spec| should be treated as a bundle.
Bundles are directories with a certain subdirectory structure, instead of
just a single file. Bundle rules do not produce a binary but also package
resources into that directory."""
is_mac_bundle = (int(spec.get('mac_bundle', 0)) != 0 and flavor == 'mac')
if is_mac_bundle:
assert spec['type'] != 'none', (
'mac_bundle targets cannot have type none (target "%s")' %
spec['target_name'])
return is_mac_bundle
def GetMacBundleResources(product_dir, xcode_settings, resources):
"""Yields (output, resource) pairs for every resource in |resources|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
resources: A list of bundle resources, relative to the build directory.
"""
dest = os.path.join(product_dir,
xcode_settings.GetBundleResourceFolder())
for res in resources:
output = dest
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in res, (
"Spaces in resource filenames not supported (%s)" % res)
# Split into (path,file).
res_parts = os.path.split(res)
# Now split the path into (prefix,maybe.lproj).
lproj_parts = os.path.split(res_parts[0])
# If the resource lives in a .lproj bundle, add that to the destination.
if lproj_parts[1].endswith('.lproj'):
output = os.path.join(output, lproj_parts[1])
output = os.path.join(output, res_parts[1])
# Compiled XIB files are referred to by .nib.
if output.endswith('.xib'):
output = output[0:-3] + 'nib'
yield output, res
def GetMacInfoPlist(product_dir, xcode_settings, gyp_path_to_build_path):
"""Returns (info_plist, dest_plist, defines, extra_env), where:
* |info_plist| is the source plist path, relative to the
build directory,
* |dest_plist| is the destination plist path, relative to the
build directory,
* |defines| is a list of preprocessor defines (empty if the plist
shouldn't be preprocessed,
* |extra_env| is a dict of env variables that should be exported when
invoking |mac_tool copy-info-plist|.
Only call this for mac bundle targets.
Args:
product_dir: Path to the directory containing the output bundle,
relative to the build directory.
xcode_settings: The XcodeSettings of the current target.
gyp_to_build_path: A function that converts paths relative to the
current gyp file to paths relative to the build direcotry.
"""
info_plist = xcode_settings.GetPerTargetSetting('INFOPLIST_FILE')
if not info_plist:
return None, None, [], {}
# The make generator doesn't support it, so forbid it everywhere
# to keep the generators more interchangable.
assert ' ' not in info_plist, (
"Spaces in Info.plist filenames not supported (%s)" % info_plist)
info_plist = gyp_path_to_build_path(info_plist)
# If explicitly set to preprocess the plist, invoke the C preprocessor and
# specify any defines as -D flags.
if xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESS', default='NO') == 'YES':
# Create an intermediate file based on the path.
defines = shlex.split(xcode_settings.GetPerTargetSetting(
'INFOPLIST_PREPROCESSOR_DEFINITIONS', default=''))
else:
defines = []
dest_plist = os.path.join(product_dir, xcode_settings.GetBundlePlistPath())
extra_env = xcode_settings.GetPerTargetSettings()
return info_plist, dest_plist, defines, extra_env
def _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings=None):
"""Return the environment variables that Xcode would set. See
http://developer.apple.com/library/mac/#documentation/DeveloperTools/Reference/XcodeBuildSettingRef/1-Build_Setting_Reference/build_setting_ref.html#//apple_ref/doc/uid/TP40003931-CH3-SW153
for a full list.
Args:
xcode_settings: An XcodeSettings object. If this is None, this function
returns an empty dict.
built_products_dir: Absolute path to the built products dir.
srcroot: Absolute path to the source root.
configuration: The build configuration name.
additional_settings: An optional dict with more values to add to the
result.
"""
if not xcode_settings: return {}
# This function is considered a friend of XcodeSettings, so let it reach into
# its implementation details.
spec = xcode_settings.spec
# These are filled in on a as-needed basis.
env = {
'BUILT_PRODUCTS_DIR' : built_products_dir,
'CONFIGURATION' : configuration,
'PRODUCT_NAME' : xcode_settings.GetProductName(),
# See /Developer/Platforms/MacOSX.platform/Developer/Library/Xcode/Specifications/MacOSX\ Product\ Types.xcspec for FULL_PRODUCT_NAME
'SRCROOT' : srcroot,
'SOURCE_ROOT': '${SRCROOT}',
# This is not true for static libraries, but currently the env is only
# written for bundles:
'TARGET_BUILD_DIR' : built_products_dir,
'TEMP_DIR' : '${TMPDIR}',
}
if xcode_settings.GetPerTargetSetting('SDKROOT'):
env['SDKROOT'] = xcode_settings._SdkPath()
else:
env['SDKROOT'] = ''
if spec['type'] in (
'executable', 'static_library', 'shared_library', 'loadable_module'):
env['EXECUTABLE_NAME'] = xcode_settings.GetExecutableName()
env['EXECUTABLE_PATH'] = xcode_settings.GetExecutablePath()
env['FULL_PRODUCT_NAME'] = xcode_settings.GetFullProductName()
mach_o_type = xcode_settings.GetMachOType()
if mach_o_type:
env['MACH_O_TYPE'] = mach_o_type
env['PRODUCT_TYPE'] = xcode_settings.GetProductType()
if xcode_settings._IsBundle():
env['CONTENTS_FOLDER_PATH'] = \
xcode_settings.GetBundleContentsFolderPath()
env['UNLOCALIZED_RESOURCES_FOLDER_PATH'] = \
xcode_settings.GetBundleResourceFolder()
env['INFOPLIST_PATH'] = xcode_settings.GetBundlePlistPath()
env['WRAPPER_NAME'] = xcode_settings.GetWrapperName()
install_name = xcode_settings.GetInstallName()
if install_name:
env['LD_DYLIB_INSTALL_NAME'] = install_name
install_name_base = xcode_settings.GetInstallNameBase()
if install_name_base:
env['DYLIB_INSTALL_NAME_BASE'] = install_name_base
if not additional_settings:
additional_settings = {}
else:
# Flatten lists to strings.
for k in additional_settings:
if not isinstance(additional_settings[k], str):
additional_settings[k] = ' '.join(additional_settings[k])
additional_settings.update(env)
for k in additional_settings:
additional_settings[k] = _NormalizeEnvVarReferences(additional_settings[k])
return additional_settings
def _NormalizeEnvVarReferences(str):
"""Takes a string containing variable references in the form ${FOO}, $(FOO),
or $FOO, and returns a string with all variable references in the form ${FOO}.
"""
# $FOO -> ${FOO}
str = re.sub(r'\$([a-zA-Z_][a-zA-Z0-9_]*)', r'${\1}', str)
# $(FOO) -> ${FOO}
matches = re.findall(r'(\$\(([a-zA-Z0-9\-_]+)\))', str)
for match in matches:
to_replace, variable = match
assert '$(' not in match, '$($(FOO)) variables not supported: ' + match
str = str.replace(to_replace, '${' + variable + '}')
return str
def ExpandEnvVars(string, expansions):
"""Expands ${VARIABLES}, $(VARIABLES), and $VARIABLES in string per the
expansions list. If the variable expands to something that references
another variable, this variable is expanded as well if it's in env --
until no variables present in env are left."""
for k, v in reversed(expansions):
string = string.replace('${' + k + '}', v)
string = string.replace('$(' + k + ')', v)
string = string.replace('$' + k, v)
return string
def _TopologicallySortedEnvVarKeys(env):
"""Takes a dict |env| whose values are strings that can refer to other keys,
for example env['foo'] = '$(bar) and $(baz)'. Returns a list L of all keys of
env such that key2 is after key1 in L if env[key2] refers to env[key1].
Throws an Exception in case of dependency cycles.
"""
# Since environment variables can refer to other variables, the evaluation
# order is important. Below is the logic to compute the dependency graph
# and sort it.
regex = re.compile(r'\$\{([a-zA-Z0-9\-_]+)\}')
def GetEdges(node):
# Use a definition of edges such that user_of_variable -> used_varible.
# This happens to be easier in this case, since a variable's
# definition contains all variables it references in a single string.
# We can then reverse the result of the topological sort at the end.
# Since: reverse(topsort(DAG)) = topsort(reverse_edges(DAG))
matches = set([v for v in regex.findall(env[node]) if v in env])
for dependee in matches:
assert '${' not in dependee, 'Nested variables not supported: ' + dependee
return matches
try:
# Topologically sort, and then reverse, because we used an edge definition
# that's inverted from the expected result of this function (see comment
# above).
order = gyp.common.TopologicallySorted(env.keys(), GetEdges)
order.reverse()
return order
except gyp.common.CycleError, e:
raise GypError(
'Xcode environment variables are cyclically dependent: ' + str(e.nodes))
def GetSortedXcodeEnv(xcode_settings, built_products_dir, srcroot,
configuration, additional_settings=None):
env = _GetXcodeEnv(xcode_settings, built_products_dir, srcroot, configuration,
additional_settings)
return [(key, env[key]) for key in _TopologicallySortedEnvVarKeys(env)]
def GetSpecPostbuildCommands(spec, quiet=False):
"""Returns the list of postbuilds explicitly defined on |spec|, in a form
executable by a shell."""
postbuilds = []
for postbuild in spec.get('postbuilds', []):
if not quiet:
postbuilds.append('echo POSTBUILD\\(%s\\) %s' % (
spec['target_name'], postbuild['postbuild_name']))
postbuilds.append(gyp.common.EncodePOSIXShellList(postbuild['action']))
return postbuilds
|
MIPS/external-chromium_org-tools-gyp
|
pylib/gyp/xcode_emulation.py
|
Python
|
bsd-3-clause
| 44,910
| 0.007459
|
import time
from random import random
import traceback
WARN_TIME = 3600.
def wait(check,timeout=None,delay=0.5, *args,**kwarg):
start_time = time.time()
warned = False
while True:
try:
result = check(*args,**kwarg)
break
except Exception as exc:
if timeout and (time.time()-start_time) > timeout:
raise exc
#if (time.time()-start_time) > WARN_TIME and not warned:
#print "wait(): warning, waiting for a long time - \n%s" % traceback.format_exc()
#warned = True
time.sleep( delay*(1.+0.2*random()) )
return result
#class wait(object):
#def __init__(self,block=True, timeout=None, interval=1.0):
#self.block = block
#self.timeout = timeout
#self.interval = interval
#self._start_time = time.time()
#def __iter__(self):
#return self
#def next(self):
#if self.timeout and (time.time()-self._start_time) > self.timeout:
#raise StopIteration
#time.sleep(self.interval)
#try:
#return True
#except:
#print 'caught'
#for now in wait(timeout=None):
#print 'hello!'
|
aerialhedgehog/VyPy
|
trunk/VyPy/tools/wait.py
|
Python
|
bsd-3-clause
| 1,311
| 0.039664
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import division
import collections
from registration.models import RegistrationProfile
from django.conf import settings
from django.contrib.sites.requests import RequestSite
from django.core.exceptions import ValidationError
from django.core.urlresolvers import reverse
from django.db.models.expressions import RawSQL
from django.db.models.functions import Length
from django.http import HttpResponseRedirect
from django.http.request import QueryDict
from django.shortcuts import render, get_object_or_404
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from opentreemap.util import json_from_request, dotted_split
from treemap.decorators import get_instance_or_404
from treemap.images import save_image_from_request
from treemap.util import package_field_errors
from treemap.models import User, Favorite, MapFeaturePhoto, InstanceUser
from treemap.lib.user import get_audits, get_user_instances, get_audits_params
USER_PROFILE_FIELDS = collections.OrderedDict([
('first_name',
{'label': _('First Name'),
'identifier': 'user.first_name',
'visibility': 'public'}),
('last_name',
{'label': _('Last Name'),
'identifier': 'user.last_name',
'visibility': 'public'}),
('organization',
{'label': _('Organization'),
'identifier': 'user.organization',
'visibility': 'public'}),
('make_info_public',
{'label': _('Make Info Visible'),
'identifier': 'user.make_info_public',
'visibility': 'private',
'template': "treemap/field/make_info_public_div.html"}),
('email',
{'label': _('Email'),
'identifier': 'user.email',
'visibility': 'private'}),
('allow_email_contact',
{'label': _('Email Updates'),
'identifier': 'user.allow_email_contact',
'visibility': 'private',
'template': "treemap/field/email_subscription_div.html"})
])
def user_audits(request, username):
user = get_object_or_404(User, username=username)
instance_id = request.GET.get('instance_id', None)
instance = (get_instance_or_404(pk=instance_id)
if instance_id else None)
params = get_audits_params(request)
return get_audits(request.user, instance, request.GET.copy(), user=user,
**params)
def instance_user_audits(request, instance_url_name, username):
instance = get_instance_or_404(url_name=instance_url_name)
return HttpResponseRedirect(
reverse('user_audits', kwargs={'username': username})
+ '?instance_id=%s' % instance.pk)
def update_user(request, user):
new_values = json_from_request(request) or {}
for key in new_values:
try:
model, field = dotted_split(key, 2, cls=ValueError)
if model != 'user':
raise ValidationError(
'All fields should be prefixed with "user."')
if field not in USER_PROFILE_FIELDS:
raise ValidationError(field + ' is not an updatable field')
except ValueError:
raise ValidationError('All fields should be prefixed with "user."')
setattr(user, field, new_values[key])
try:
user.save()
return {"ok": True}
except ValidationError as ve:
raise ValidationError(package_field_errors('user', ve))
def upload_user_photo(request, user):
"""
Saves a user profile photo whose data is in the request.
The callee or decorator is reponsible for ensuring request.user == user
"""
user.photo, user.thumbnail = save_image_from_request(
request, name_prefix="user-%s" % user.pk, thumb_size=(85, 85))
user.save_with_user(request.user)
return {'url': user.thumbnail.url}
def instance_user(request, instance_url_name, username):
instance = get_instance_or_404(url_name=instance_url_name)
url = reverse('user', kwargs={'username': username}) +\
'?instance_id=%s' % instance.pk
return HttpResponseRedirect(url)
def profile_to_user(request):
if request.user and request.user.username:
return HttpResponseRedirect('/users/%s/' % request.user.username)
else:
return HttpResponseRedirect(settings.LOGIN_URL)
def forgot_username(request):
user_email = request.POST['email']
if not user_email:
raise ValidationError({
'user.email': [_('Email field is required')]
})
users = User.objects.filter(email=user_email)
# Don't reveal if we don't have that email, to prevent email harvesting
if len(users) == 1:
user = users[0]
password_reset_url = request.build_absolute_uri(
reverse('auth_password_reset'))
subject = _('Account Recovery')
body = render_to_string('treemap/partials/forgot_username_email.txt',
{'user': user,
'password_url': password_reset_url})
user.email_user(subject, body, settings.DEFAULT_FROM_EMAIL)
return {'email': user_email}
def resend_activation_email_page(request):
return {'username': request.GET.get('username')}
def resend_activation_email(request):
username = request.POST['username']
def error(error):
return render(request, 'treemap/resend_activation_email.html',
{'username': username, 'error': error})
if not username:
return error(_('Username field is required'))
users = User.objects \
.filter(username=username)
if len(users) != 1:
return error(_('There is no user with that username'))
user = users[0]
if user.is_active:
return error(_('This user has already been verified'))
success = RegistrationProfile.objects.resend_activation_mail(
users[0].email, RequestSite(request), request)
if not success:
return error(_('Unable to resend activation email'))
return {'user': user}
def _small_feature_photo_url(feature):
feature = feature.cast_to_subtype()
if feature.is_plot:
tree = feature.current_tree()
if tree:
photos = tree.photos()
else:
photos = MapFeaturePhoto.objects.none()
else:
photos = feature.photos()
if len(photos) > 0:
return photos[0].thumbnail.url
else:
return None
def user(request, username):
user = get_object_or_404(User, username=username)
instance_id = request.GET.get('instance_id', None)
instance = (get_instance_or_404(pk=instance_id)
if instance_id else None)
query_vars = QueryDict(mutable=True)
if instance_id:
query_vars['instance_id'] = instance_id
audit_dict = get_audits(request.user, instance, query_vars,
user=user, should_count=True)
reputation = user.get_reputation(instance) if instance else None
favorites_qs = Favorite.objects.filter(user=user).order_by('-created')
favorites = [{
'map_feature': f.map_feature,
'title': f.map_feature.title(),
'instance': f.map_feature.instance,
'address': f.map_feature.address_full,
'photo': _small_feature_photo_url(f.map_feature)
} for f in favorites_qs]
public_fields = []
private_fields = []
for field in USER_PROFILE_FIELDS.values():
field_tuple = (field['label'], field['identifier'],
field.get('template', "treemap/field/div.html"))
if field['visibility'] == 'public' and user.make_info_public is True:
public_fields.append(field_tuple)
else:
private_fields.append(field_tuple)
return {'user': user,
'its_me': user.id == request.user.id,
'reputation': reputation,
'instance_id': instance_id,
'instances': get_user_instances(request.user, user, instance),
'total_edits': audit_dict['total_count'],
'audits': audit_dict['audits'],
'next_page': audit_dict['next_page'],
'public_fields': public_fields,
'private_fields': private_fields,
'favorites': favorites}
def users(request, instance):
max_items = request.GET.get('max_items', None)
query = request.GET.get('q', None)
users_qs = InstanceUser.objects \
.filter(instance=instance)\
.order_by('user__username')\
.values('user_id', 'user__username',
'user__first_name', 'user__last_name',
'user__make_info_public')
if query:
users_qs = users_qs.filter(user__username__icontains=query)\
.order_by(
RawSQL('treemap_user.username ILIKE %s OR NULL', (query,)),
RawSQL('treemap_user.username ILIKE %s OR NULL',
(query + '%',)),
Length('user__username'),
'user__username'
)
if max_items:
users_qs = users_qs[:int(max_items)]
def annotate_user_dict(udict):
user = {
'id': udict['user_id'],
'username': udict['user__username'],
'first_name': '',
'last_name': ''
}
if udict['user__make_info_public']:
user['first_name'] = udict['user__first_name']
user['last_name'] = udict['user__last_name']
return user
return [annotate_user_dict(user) for user in users_qs]
|
maurizi/otm-core
|
opentreemap/treemap/views/user.py
|
Python
|
agpl-3.0
| 9,600
| 0
|
import re
import json
import sqlite3
import nltk
stop = nltk.corpus.stopwords.words("english")
stop.append('rt')
contractions = []
with open('contractions.txt', 'rb') as f:
contractions = [c.strip() for c in f.readlines()]
lemmatizer = nltk.stem.wordnet.WordNetLemmatizer()
tokenizer = nltk.tokenize.RegexpTokenizer(r'\w+')
url_regex = re.compile(r"http[s]?[^\s]*")
contractions_regex = re.compile("|".join(contractions))
first_tweet = None
con = sqlite3.connect("D:/TWEETS/2014-11-05-22-45-54.db")
c = con.cursor()
with open("out.csv", "wb") as out_file:
for row in c.execute("SELECT tweet FROM tweets"):
if first_tweet is None:
first_tweet = row[0]
j = json.loads(row[0])
tweet_id = j['id']
timestamp = j['timestamp_ms']
text = j['text']
text = text.lower()
text = url_regex.sub('', text)
text = contractions_regex.sub('', text)
all_tokens = tokenizer.tokenize(text)
tokens = []
for token in all_tokens:
token = lemmatizer.lemmatize(token)
if token not in stop:
tokens.append(token)
#items = [str(id), json.dumps(text)] + [token.encode('utf8') for token in tokens]
items = [str(tweet_id), str(timestamp)] + [token.encode('utf8') for token in tokens]
out_file.write(" ".join(items) + "\n")
with open("tweet.json", "wb") as f:
f.write(first_tweet)
con.close()
|
maroy/TSTA
|
cse-581-project-2/src/extract_keywords.py
|
Python
|
mit
| 1,499
| 0.002668
|
# Copyright (c) 2013 Alon Swartz <alon@turnkeylinux.org>
#
# This file is part of OctoHub.
#
# OctoHub is free software; you can redistribute it and/or modify it under the
# terms of the GNU General Public License as published by the Free Software
# Foundation; either version 3 of the License, or (at your option) any later
# version.
import simplejson as json
class ResponseError(Exception):
"""Accessible attributes: error
error (AttrDict): Parsed error response
"""
def __init__(self, error):
Exception.__init__(self, error)
self.error = error
def __str__(self):
return json.dumps(self.error, indent=1)
class OctoHubError(Exception):
pass
|
bmya/odoo-support
|
adhoc_modules_server/octohub/exceptions.py
|
Python
|
lgpl-3.0
| 701
| 0.002853
|
from django.db import models
from olc_webportalv2.users.models import User
from django.contrib.postgres.fields.jsonb import JSONField
import os
from django.core.exceptions import ValidationError
# Create your models here.
def validate_fastq(fieldfile):
filename = os.path.basename(fieldfile.name)
if filename.endswith('.fastq.gz') or filename.endswith('.fastq'):
print('File extension for {} confirmed valid'.format(filename))
else:
raise ValidationError(
_('%(file)s does not end with .fastq or .fastq.gz'),
params={'filename': filename},
)
class ProjectMulti(models.Model):
user = models.ForeignKey(User, on_delete=models.CASCADE)
project_title = models.CharField(max_length=256)
description = models.CharField(max_length=200, blank=True)
date = models.DateTimeField(auto_now_add=True)
forward_id = models.CharField(max_length=256, default='_R1')
reverse_id = models.CharField(max_length=256, default='_R2')
def __str__(self):
return self.project_title
class Sample(models.Model):
project = models.ForeignKey(ProjectMulti, on_delete=models.CASCADE, related_name='samples')
file_R1 = models.FileField(upload_to='%Y%m%d%s', blank=True)
file_R2 = models.FileField(upload_to='%Y%m%d%s', blank=True)
file_fasta = models.FileField(upload_to='%Y%m%d%s', blank=True)
title = models.CharField(max_length=200, blank=True)
genesippr_status = models.CharField(max_length=128,
default="Unprocessed")
sendsketch_status = models.CharField(max_length=128,
default="Unprocessed")
confindr_status = models.CharField(max_length=128,
default="Unprocessed")
genomeqaml_status = models.CharField(max_length=128,
default="Unprocessed")
amr_status = models.CharField(max_length=128,
default="Unprocessed")
def __str__(self):
return self.title
class GenomeQamlResult(models.Model):
class Meta:
verbose_name_plural = "GenomeQAML Results"
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='genomeqaml_result')
predicted_class = models.CharField(max_length=128, default='N/A')
percent_fail = models.CharField(max_length=128, default='N/A')
percent_pass = models.CharField(max_length=128, default='N/A')
percent_reference = models.CharField(max_length=118, default='N/A')
def __str__(self):
return '{}'.format(self.sample)
class SendsketchResult(models.Model):
class Meta:
verbose_name_plural = "Sendsketch Results"
def __str__(self):
return 'pk {}: Rank {}: Sample {}'.format(self.pk, self.rank, self.sample.pk)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE)
rank = models.CharField(max_length=8, default='N/A')
wkid = models.CharField(max_length=256, default='N/A')
kid = models.CharField(max_length=256, default='N/A')
ani = models.CharField(max_length=256, default='N/A')
complt = models.CharField(max_length=256, default='N/A')
contam = models.CharField(max_length=256, default='N/A')
matches = models.CharField(max_length=256, default='N/A')
unique = models.CharField(max_length=256, default='N/A')
nohit = models.CharField(max_length=256, default='N/A')
taxid = models.CharField(max_length=256, default='N/A')
gsize = models.CharField(max_length=256, default='N/A')
gseqs = models.CharField(max_length=256, default='N/A')
taxname = models.CharField(max_length=256, default='N/A')
class GenesipprResults(models.Model):
# For admin panel
def __str__(self):
return '{}'.format(self.sample)
# TODO: Accomodate seqID
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='genesippr_results')
# genesippr.csv
strain = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
# STEC
serotype = models.CharField(max_length=256, default="N/A")
o26 = models.CharField(max_length=256, default="N/A")
o45 = models.CharField(max_length=256, default="N/A")
o103 = models.CharField(max_length=256, default="N/A")
o111 = models.CharField(max_length=256, default="N/A")
o121 = models.CharField(max_length=256, default="N/A")
o145 = models.CharField(max_length=256, default="N/A")
o157 = models.CharField(max_length=256, default="N/A")
uida = models.CharField(max_length=256, default="N/A")
eae = models.CharField(max_length=256, default="N/A")
eae_1 = models.CharField(max_length=256, default="N/A")
vt1 = models.CharField(max_length=256, default="N/A")
vt2 = models.CharField(max_length=256, default="N/A")
vt2f = models.CharField(max_length=256, default="N/A")
# listeria
igs = models.CharField(max_length=256, default="N/A")
hlya = models.CharField(max_length=256, default="N/A")
inlj = models.CharField(max_length=256, default="N/A")
# salmonella
inva = models.CharField(max_length=256, default="N/A")
stn = models.CharField(max_length=256, default="N/A")
def inva_number(self):
return float(self.inva.split('%')[0])
def uida_number(self):
return float(self.uida.split('%')[0])
def vt1_number(self):
return float(self.vt1.split('%')[0])
def vt2_number(self):
return float(self.vt2.split('%')[0])
def vt2f_number(self):
return float(self.vt2f.split('%')[0])
def eae_number(self):
return float(self.eae.split('%')[0])
def eae_1_number(self):
return float(self.eae_1.split('%')[0])
def hlya_number(self):
return float(self.hlya.split('%')[0])
def igs_number(self):
return float(self.igs.split('%')[0])
def inlj_number(self):
return float(self.inlj.split('%')[0])
class Meta:
verbose_name_plural = "Genesippr Results"
class GenesipprResultsSixteens(models.Model):
class Meta:
verbose_name_plural = "SixteenS Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='sixteens_results')
# sixteens_full.csv
strain = models.CharField(max_length=256, default="N/A")
gene = models.CharField(max_length=256, default="N/A")
percentidentity = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
foldcoverage = models.CharField(max_length=256, default="N/A")
@property
def gi_accession(self):
# Split by | delimiter, pull second element which should be the GI#
gi_accession = self.gene.split('|')[1]
return gi_accession
class GenesipprResultsGDCS(models.Model):
class Meta:
verbose_name_plural = "GDCS Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='gdcs_results')
# GDCS.csv
strain = models.CharField(max_length=256, default="N/A")
genus = models.CharField(max_length=256, default="N/A")
matches = models.CharField(max_length=256, default="N/A")
meancoverage = models.CharField(max_length=128, default="N/A")
passfail = models.CharField(max_length=16, default="N/A")
allele_dict = JSONField(blank=True, null=True, default=dict)
class ConFindrResults(models.Model):
class Meta:
verbose_name_plural = 'Confindr Results'
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='confindr_results')
strain = models.CharField(max_length=256, default="N/A")
genera_present = models.CharField(max_length=256, default="N/A")
contam_snvs = models.CharField(max_length=256, default="N/A")
contaminated = models.CharField(max_length=256, default="N/A")
class GenesipprResultsSerosippr(models.Model):
class Meta:
verbose_name_plural = "Serosippr Results"
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE)
class AMRResult(models.Model):
class Meta:
verbose_name_plural = 'AMR Results'
def __str__(self):
return '{}'.format(self.sample)
sample = models.ForeignKey(Sample, on_delete=models.CASCADE, related_name='amr_results')
results_dict = JSONField(blank=True, null=True, default=dict)
species = models.CharField(max_length=88, default='N/A')
|
forestdussault/olc_webportalv2
|
olc_webportalv2/new_multisample/models.py
|
Python
|
mit
| 8,667
| 0.000923
|
# Copyright 2016 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ament_flake8.main import main_with_errors
def test_flake8():
rc, errors = main_with_errors(argv=[])
assert rc == 0, \
'Found %d code style errors / warnings:\n' % len(errors) + \
'\n'.join(errors)
|
ros2/launch
|
launch_testing/test/launch_testing/test_flake8.py
|
Python
|
apache-2.0
| 830
| 0
|
import os
from setuptools import setup, find_packages
from jw2html import VERSION
setup(
name='JW2HTML',
version=VERSION,
description='JW2HTML converts an issue of the Jungle World from the website to a single HTML file to be used for epub conversion by e.g. calibre.',
long_description='Alas, there is no epub version of the Jungle World, http://jungle-world.com . Hence this little module to download the current issue and pack it into one HTML file which can then be converted to epub (using e.g. http://calibre-ebook.com). It also downloads the cover image for easy inclusion when creating the book in calibre.',
license='GPL',
keywords='jungle world newspaper html epub convert',
url='https://github.com/marmorkuchen/jw2html',
author='marmorkuchen',
author_email='marmorkuchen@kodeaffe.de',
packages=find_packages(),
include_package_data=True,
data_files=[
('doc', ['README.rst', 'LICENSE']),
(os.path.join(os.sep, 'etc'), ['jw2html.ini',]),
],
entry_points={
'console_scripts': [
'jw2html = jw2html:main',
]
},
install_requires=[
'beautifulsoup4',
],
)
|
marmorkuchen/jw2html
|
setup.py
|
Python
|
gpl-3.0
| 1,182
| 0.002538
|
# coding: utf8
from functools import wraps
from logging import getLogger
logger = getLogger(__name__)
__author__ = 'marcos.costa'
class request_logger(object):
def __init__(self, method=None):
self.method = method
def __call__(self, func):
method = self.method
if method is None:
method = func.func_name
@wraps(func)
def wrapper(instance, request, *args, **kwargs):
response = func(instance, request, *args, **kwargs)
msg = ("\nCalled method: {method}\nrequest: {request}"
"\nresponse: {response}").format(method=method,
request=request,
response=response)
logger.info(msg)
return response
return wrapper
|
GTACSolutions/python-braspag
|
python_braspag/decorators.py
|
Python
|
apache-2.0
| 849
| 0.001178
|
from werkzeug.local import LocalProxy
from . import extension_access
def extension_access_proxy(name):
return LocalProxy(lambda: getattr(extension_access, name, None))
# Mostly for backwards compatibility
cache = extension_access_proxy("cache")
mongo = extension_access_proxy("mongo")
mail = extension_access_proxy("mail")
admin = extension_access_proxy("admin")
rest_api = extension_access_proxy("rest_api")
markdown = extension_access_proxy("markdown")
assets = extension_access_proxy("assets")
|
JunctionAt/JunctionWWW
|
blueprints/base.py
|
Python
|
agpl-3.0
| 504
| 0.001984
|
import AI.pad
import AI.state
class Character:
def __init__(self, pad_path):
self.action_list = []
self.last_action = 0
self.pad = AI.pad.Pad(pad_path)
self.state = AI.state.State()
#Set False to enable character selection
self.test_mode = True
self.sm = AI.state_manager.StateManager(self.state, self.test_mode)
#test_mode = False, Selects character each run
def make_action(self, mm):
if self.state.menu == AI.state.Menu.Game:
self.advance()
elif self.state.menu == AI.state.Menu.Characters:
mm.pick_fox(self.state, self.pad)
elif self.state.menu == AI.state.Menu.Stages:
self.pad.tilt_stick(AI.pad.Stick.C, 0.5, 0.5)
elif self.state.menu == AI.state.Menu.PostGame:
mm.press_start_lots(self.state, self.pad)
#test_mode = True, AI starts fighting each run, saves time during testing
def make_action_test(self, mm):
if self.state.menu == AI.state.Menu.Game:
self.advance()
elif self.state.menu == AI.state.Menu.PostGame:
mm.press_start_lots(self.state, self.pad)
#implemented by each character to decide what to do
#includes some states where each character will respond the same
def logic(self):
if AI.state.is_spawning(self.state.players[2].action_state):
self.tilt_stick(60, 'DOWN')
self.tilt_stick(3, None)
#compare AI's current state
def compare_AI_state(self, test_state):
return self.state.players[2].action_state is test_state
#compare P1 current state
def compare_P1_state(self, test_state):
return self.state.players[0].action_state is test_state
#executes button presses defined in action_list, runs logic() once list is empty
def advance(self):
while self.action_list:
wait, func, args = self.action_list[0]
if self.state.frame - self.last_action < wait:
return
else:
self.action_list.pop(0)
if func is not None:
func(*args)
self.last_action = self.state.frame
else:
self.logic()
'''Methods simulate controller input; appends necessary tuple to action_list'''
def press_button(self, wait, button):
self.action_list.append((wait, self.pad.press_button, [button]))
def release_button(self, wait, button):
self.action_list.append((wait, self.pad.release_button, [button]))
def tilt_stick(self, wait, direction):
if direction is 'UP':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 1.0]))
elif direction is 'DOWN':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 0.0]))
elif direction is 'DOWN_LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.25, 0.25]))
elif direction is 'DOWN_RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.75, 0.25]))
elif direction is 'RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 1.0, 0.5]))
elif direction is 'LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.0, 0.5]))
elif direction is None:
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.MAIN, 0.5, 0.5]))
def tilt_c_stick(self, wait, direction):
if direction is 'UP':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 1.0]))
elif direction is 'DOWN':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 0.0]))
elif direction is 'RIGHT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 1.0, 0.5]))
elif direction is 'LEFT':
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.0, 0.5]))
elif direction is None:
self.action_list.append((wait, self.pad.tilt_stick, [AI.pad.Stick.C, 0.5, 0.5]))
def press_trigger(self, wait, amount):
self.action_list.append((wait, self.pad.press_trigger, [AI.pad.Trigger.L, amount]))
def wait(self, wait):
self.action_list.append((wait, None, []))
'''Execute actions shared among all characters'''
def style(self, wait):
pass
def side_b(self, wait):
self.tilt_stick(wait, 'RIGHT')
self.press_button(1, AI.pad.Button.B)
self.release_button(2, AI.pad.Button.B)
self.tilt_stick(2, None)
def shield(self, wait, length):
self.press_trigger(wait, 0.3)
self.press_trigger(length, 0.0)
def dashdance(self, wait, length):
self.wait(wait)
for _ in range(length):
self.tilt_stick(4, 'LEFT')
self.tilt_stick(4, 'RIGHT')
self.tilt_stick(1, None)
def shorthop(self, wait):
self.press_button(wait, AI.pad.Button.X)
self.release_button(1, AI.pad.Button.X)
'''Execute similar actions that is dependent on character frame data'''
def wavedash(self, wait, direction, wait_airdodge):
self.tilt_stick(wait, direction)
self.shorthop(1)
self.press_button(wait_airdodge, AI.pad.Button.L)
self.release_button(2, AI.pad.Button.L)
self.tilt_stick(1, None)
def shorthop_nair(self, wait, wait_attack, wait_ff):
self.shorthop(wait)
self.press_button(wait_attack, AI.pad.Button.A)
self.release_button(1, AI.pad.Button.A)
self.tilt_stick(wait_ff, 'DOWN')
self.tilt_stick(3, None)
self.press_trigger(2, 0.5)
self.press_trigger(1, 0.0)
|
alex-zoltowski/SSBM-AI
|
AI/Characters/character.py
|
Python
|
gpl-3.0
| 5,818
| 0.004641
|
import sys
folder_loc = sys.argv[1]
filename =sys.argv[2]
fileobj = open(folder_loc + filename);
flag=0
real_rev = open(folder_loc + "real_"+filename,"w+")
fake_rev = open(folder_loc + "fake_"+filename,"w+")
for line in fileobj:
for i in range(len(line)):
if (line[i] == '[' and flag == 0): #for beginning of real reviews
flag = 1
elif (line[i-1]==']' and flag == 1): #for end of real reviews
flag = 2
elif (line[i]=='[' and flag == 2): #for beginning of fake reviews
flag = 3
elif (line[i-1] == ']' and flag == 3): #for end of fake reviews
flag = 4
if(flag ==1):
real_rev.write(line[i])
elif(flag==3):
fake_rev.write(line[i])
|
JoshuaEbenezer/SNLP-reviews
|
SNLP/src/read_file.py
|
Python
|
gpl-3.0
| 795
| 0.023899
|
# -*- coding: utf-8 -*-
"""tests decoration handling functions that are used by checks"""
from translate.filters import decoration
def test_spacestart():
"""test operation of spacestart()"""
assert decoration.spacestart(" Start") == " "
assert decoration.spacestart(u"\u0020\u00a0Start") == u"\u0020\u00a0"
# non-breaking space
assert decoration.spacestart(u"\u00a0\u202fStart") == u"\u00a0\u202f"
# Some exotic spaces
assert decoration.spacestart(u"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200aStart") == u"\u2000\u2001\u2002\u2003\u2004\u2005\u2006\u2007\u2008\u2009\u200a"
def test_isvalidaccelerator():
"""test the isvalidaccelerator() function"""
# Mostly this tests the old code path where acceptlist is None
assert not decoration.isvalidaccelerator(u"")
assert decoration.isvalidaccelerator(u"a")
assert decoration.isvalidaccelerator(u"1")
assert not decoration.isvalidaccelerator(u"ḽ")
# Test new code path where we actually have an acceptlist
assert decoration.isvalidaccelerator(u"a", u"aeiou")
assert decoration.isvalidaccelerator(u"ḽ", u"ḓṱḽṋṅ")
assert not decoration.isvalidaccelerator(u"a", u"ḓṱḽṋṅ")
def test_find_marked_variables():
"""check that we can identify variables correctly, the first returned
value is the start location, the second returned value is the actual
variable sans decoations"""
variables = decoration.findmarkedvariables("The <variable> string", "<", ">")
assert variables == [(4, "variable")]
variables = decoration.findmarkedvariables("The $variable string", "$", 1)
assert variables == [(4, "v")]
variables = decoration.findmarkedvariables("The $variable string", "$", None)
assert variables == [(4, "variable")]
variables = decoration.findmarkedvariables("The $variable string", "$", 0)
assert variables == [(4, "")]
variables = decoration.findmarkedvariables("The &variable; string", "&", ";")
assert variables == [(4, "variable")]
variables = decoration.findmarkedvariables("The &variable.variable; string", "&", ";")
assert variables == [(4, "variable.variable")]
def test_getnumbers():
"""test operation of getnumbers()"""
assert decoration.getnumbers(u"") == []
assert decoration.getnumbers(u"No numbers") == []
assert decoration.getnumbers(u"Nine 9 nine") == ["9"]
assert decoration.getnumbers(u"Two numbers: 2 and 3") == ["2", "3"]
assert decoration.getnumbers(u"R5.99") == ["5.99"]
# TODO fix these so that we are able to consider locale specific numbers
#assert decoration.getnumbers(u"R5,99") == ["5.99"]
#assert decoration.getnumbers(u"1\u00a0000,99") == ["1000.99"]
assert decoration.getnumbers(u"36°") == [u"36°"]
assert decoration.getnumbers(u"English 123, Bengali \u09e7\u09e8\u09e9") == [u"123", u"\u09e7\u09e8\u09e9"]
def test_getfunctions():
"""test operation of getfunctions()"""
assert decoration.getfunctions(u"") == []
assert decoration.getfunctions(u"There is no function") == []
assert decoration.getfunctions(u"Use the getfunction() function.") == ["getfunction()"]
assert decoration.getfunctions(u"Use the getfunction1() function or the getfunction2() function.") == ["getfunction1()", "getfunction2()"]
assert decoration.getfunctions(u"The module.getfunction() method") == ["module.getfunction()"]
assert decoration.getfunctions(u"The module->getfunction() method") == ["module->getfunction()"]
assert decoration.getfunctions(u"The module::getfunction() method") == ["module::getfunction()"]
assert decoration.getfunctions(u"The function().function() function") == ["function().function()"]
assert decoration.getfunctions(u"Deprecated, use function().") == ["function()"]
assert decoration.getfunctions(u"Deprecated, use function() or other().") == ["function()", "other()"]
|
bluemini/kuma
|
vendor/packages/translate/filters/test_decoration.py
|
Python
|
mpl-2.0
| 3,921
| 0.004108
|
# POS Census v0.1
# Copyright (c) 2012 Andrew Austin
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Names and data specific to Eve Online are Copyright CCP Games H.F.
import eveapi
import csv
import sqlite3
# Put your API information here
keyID = XXXXXXX
vCode = 'XXXXXXXXXXXXXXXXXXXXXXXXXXXXXX'
# Put the system ID you want to take a census of here
systemID = XXXXXXXXX
def build_database():
"""
Build a sqlite3 database from the mapDenormalize.csv data file.
"""
print "Building database...\n"
conn = sqlite3.Connection("mapData.db")
c = conn.cursor()
c.execute('''CREATE TABLE mapDenormalize (id int, name text)''')
reader = csv.reader(open('mapDenormalize.csv'))
# Skip the header row
next(reader)
# Build a list from which we'll populate the sqlite DB
records = []
for row in reader:
records.append((int(row[0]), row[11]))
print "Inserting %s rows to mapData.db..." % len(records)
c.executemany("INSERT INTO mapDenormalize VALUES (?,?)", records)
conn.commit()
conn.close()
class POS:
"""
A POS object, contains a location string, and lists of chas and smas.
The lists of chas and smas are lists of (itemID, name) tuples.
"""
def __init__(self, name, location, x, y, z, smas=[], chas=[]):
self.name = name
self.location = location
self.smas = smas
self.chas = chas
self.x = x
self.y = y
self.z = z
def report(self):
"""
Output the report for this POS.
"""
print "*****************************"
print "POS: %s at %s" % (self.name, self.location)
print "\t %s CHAs:" % len(self.chas)
for cha in self.chas:
print "\t \t itemID: %s \t Name: %s" % (cha[0], cha[1])
print "\t %s SMAs:" % len(self.smas)
for sma in self.smas:
print "\t \t itemID: %s \t Name: %s" % (sma[0], sma[1])
print "*****************************"
def is_owner(self, x, y, z):
"""
Returns True if the given x,y,z coordinates are within 350km of the POS.
"""
minx = self.x - 350000
maxx = self.x + 350000
miny = self.y - 350000
maxy = self.y + 350000
minz = self.z - 350000
maxz = self.z + 350000
return minx <= x <= maxx and miny <= y <= maxy and minz <= z <= maxz
def generate_report():
"""
Main entry point for the program.
Generates POS objects StarbaseList API and populates them
using AssetList and Locations API calls.
"""
api = eveapi.EVEAPIConnection()
auth = api.auth(keyID=keyID, vCode=vCode)
conn = sqlite3.Connection('mapData.db')
c = conn.cursor()
print "Downloading Corporation Asset List..."
assets = auth.corp.AssetList()
print "Downloading Starbase List..."
starbases = auth.corp.StarbaseList()
rawCHAList = []
rawSMAList = []
poslist = []
for asset in assets.assets:
if asset.locationID == systemID:
if asset.typeID == 17621:
rawCHAList.append(asset.itemID)
if asset.typeID == 12237:
rawSMAList.append(asset.itemID)
print "Building POS List..."
for pos in starbases.starbases:
locationapi = auth.corp.Locations(IDs=pos.itemID).locations[0]
moon = c.execute("SELECT name from mapDenormalize WHERE id = %s" % pos.moonID).fetchone()[0]
poslist.append(POS(name=locationapi.itemName,
location=moon, smas=[], chas=[], x=locationapi.x,
y=locationapi.y, z=locationapi.z))
print "Processing SMAs..."
for sma in rawSMAList:
locationapi = auth.corp.Locations(IDs=sma).locations[0]
x = locationapi.x
y = locationapi.y
z = locationapi.z
name = locationapi.itemName
for pos in poslist:
if pos.is_owner(x=x, y=y, z=z):
pos.smas.append((sma, name))
print "Processing CHAs..."
for cha in rawCHAList:
locationapi = auth.corp.Locations(IDs=cha).locations[0]
x = locationapi.x
y = locationapi.y
z = locationapi.z
name = locationapi.itemName
for pos in poslist:
if pos.is_owner(x=x, y=y, z=z):
pos.chas.append((cha, name))
print "Displaying Report..."
for pos in poslist:
pos.report()
# Make sure we enter at generate_report()
if __name__ == "__main__":
generate_report()
|
marbindrakon/eve-poscensus
|
census.py
|
Python
|
gpl-3.0
| 5,093
| 0.001767
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import sys
from nose.plugins.skip import SkipTest
if sys.version_info < (2, 7):
raise SkipTest("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_vcmp_guest import Parameters
from library.modules.bigip_vcmp_guest import ModuleManager
from library.modules.bigip_vcmp_guest import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
try:
from ansible.modules.network.f5.bigip_vcmp_guest import Parameters
from ansible.modules.network.f5.bigip_vcmp_guest import ModuleManager
from ansible.modules.network.f5.bigip_vcmp_guest import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
except ImportError:
raise SkipTest("F5 Ansible modules require the f5-sdk Python library")
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
initial_image='BIGIP-12.1.0.1.0.1447-HF1.iso',
mgmt_network='bridged',
mgmt_address='1.2.3.4/24',
vlans=[
'vlan1',
'vlan2'
]
)
p = Parameters(params=args)
assert p.initial_image == 'BIGIP-12.1.0.1.0.1447-HF1.iso'
assert p.mgmt_network == 'bridged'
def test_module_parameters_mgmt_bridged_without_subnet(self):
args = dict(
mgmt_network='bridged',
mgmt_address='1.2.3.4'
)
p = Parameters(params=args)
assert p.mgmt_network == 'bridged'
assert p.mgmt_address == '1.2.3.4/32'
def test_module_parameters_mgmt_address_cidr(self):
args = dict(
mgmt_network='bridged',
mgmt_address='1.2.3.4/24'
)
p = Parameters(params=args)
assert p.mgmt_network == 'bridged'
assert p.mgmt_address == '1.2.3.4/24'
def test_module_parameters_mgmt_address_subnet(self):
args = dict(
mgmt_network='bridged',
mgmt_address='1.2.3.4/255.255.255.0'
)
p = Parameters(params=args)
assert p.mgmt_network == 'bridged'
assert p.mgmt_address == '1.2.3.4/24'
def test_module_parameters_mgmt_route(self):
args = dict(
mgmt_route='1.2.3.4'
)
p = Parameters(params=args)
assert p.mgmt_route == '1.2.3.4'
def test_module_parameters_vcmp_software_image_facts(self):
# vCMP images may include a forward slash in their names. This is probably
# related to the slots on the system, but it is not a valid value to specify
# that slot when providing an initial image
args = dict(
initial_image='BIGIP-12.1.0.1.0.1447-HF1.iso/1',
)
p = Parameters(params=args)
assert p.initial_image == 'BIGIP-12.1.0.1.0.1447-HF1.iso/1'
def test_api_parameters(self):
args = dict(
initialImage="BIGIP-tmos-tier2-13.1.0.0.0.931.iso",
managementGw="2.2.2.2",
managementIp="1.1.1.1/24",
managementNetwork="bridged",
state="deployed",
vlans=[
"/Common/vlan1",
"/Common/vlan2"
]
)
p = Parameters(params=args)
assert p.initial_image == 'BIGIP-tmos-tier2-13.1.0.0.0.931.iso'
assert p.mgmt_route == '2.2.2.2'
assert p.mgmt_address == '1.1.1.1/24'
assert '/Common/vlan1' in p.vlans
assert '/Common/vlan2' in p.vlans
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
self.patcher1 = patch('time.sleep')
self.patcher1.start()
def tearDown(self):
self.patcher1.stop()
def test_create_vlan(self, *args):
set_module_args(dict(
name="guest1",
mgmt_network="bridged",
mgmt_address="10.10.10.10/24",
initial_image="BIGIP-13.1.0.0.0.931.iso",
server='localhost',
password='password',
user='admin'
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods to force specific logic in the module to happen
mm = ModuleManager(module=module)
mm.create_on_device = Mock(return_value=True)
mm.exists = Mock(return_value=False)
mm.is_deployed = Mock(side_effect=[False, True, True, True, True])
mm.deploy_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
assert results['name'] == 'guest1'
|
alexlo03/ansible
|
test/units/modules/network/f5/test_bigip_vcmp_guest.py
|
Python
|
gpl-3.0
| 5,742
| 0.000522
|
from __future__ import division, print_function
from os.path import join, split, dirname
import os
import sys
from distutils.dep_util import newer
from distutils.msvccompiler import get_build_version as get_msvc_build_version
def needs_mingw_ftime_workaround():
# We need the mingw workaround for _ftime if the msvc runtime version is
# 7.1 or above and we build with mingw ...
# ... but we can't easily detect compiler version outside distutils command
# context, so we will need to detect in randomkit whether we build with gcc
msver = get_msvc_build_version()
if msver and msver >= 8:
return True
return False
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, get_mathlibs
config = Configuration('random', parent_package, top_path)
def generate_libraries(ext, build_dir):
config_cmd = config.get_config_cmd()
libs = get_mathlibs()
tc = testcode_wincrypt()
if config_cmd.try_run(tc):
libs.append('Advapi32')
ext.libraries.extend(libs)
return None
# enable unix large file support on 32 bit systems
# (64 bit off_t, lseek -> lseek64 etc.)
defs = [('_FILE_OFFSET_BITS', '64'),
('_LARGEFILE_SOURCE', '1'),
('_LARGEFILE64_SOURCE', '1'),
]
if needs_mingw_ftime_workaround():
defs.append(("NPY_NEEDS_MINGW_TIME_WORKAROUND", None))
libs = []
# Configure mtrand
try:
import cffi
have_cffi = True
except ImportError:
have_cffi = False
if have_cffi:
#create the dll/so for the cffi version
if sys.platform == 'win32':
libs.append('Advapi32')
defs.append(('_MTRAND_DLL',None))
config.add_shared_library('_mtrand',
sources=[join('mtrand', x) for x in
['randomkit.c', 'distributions.c', 'initarray.c']],
build_info = {
'libraries': libs,
'depends': [join('mtrand', '*.h'),
],
'macros': defs,
}
)
else:
config.add_extension('mtrand',
sources=[join('mtrand', x) for x in
['mtrand.c', 'randomkit.c', 'initarray.c',
'distributions.c']]+[generate_libraries],
libraries=libs,
depends=[join('mtrand', '*.h'),
join('mtrand', '*.pyx'),
join('mtrand', '*.pxi'),],
define_macros=defs,
)
config.add_data_files(('.', join('mtrand', 'randomkit.h')))
config.add_data_dir('tests')
return config
def testcode_wincrypt():
return """\
/* check to see if _WIN32 is defined */
int main(int argc, char *argv[])
{
#ifdef _WIN32
return 0;
#else
return 1;
#endif
}
"""
if __name__ == '__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
|
NextThought/pypy-numpy
|
numpy/random/setup.py
|
Python
|
bsd-3-clause
| 3,228
| 0.006196
|
# -*- coding: UTF-8 -*-
"""
Options quotes.
"""
import os
import json
import logging
from datetime import datetime
import collections
import requests
from landscape.finance import consts, database, dates, utils
from landscape.finance.volatility import math
OptionQuote = collections.namedtuple(
'OptionQuote',
'symbol type expiration strike date time bid ask stock iv_bid iv_ask')
CBOE_URL = 'http://www.cboe.com/DelayedQuote/QuoteTableDownload.aspx'
current_dir = os.path.dirname(__file__)
with open(os.path.join(current_dir, 'data/cboe_headers.json')) as f:
CBOE_HEADERS = json.load(f)
with open(os.path.join(current_dir, 'data/cboe_post_data.json')) as f:
CBOE_POST_DATA = json.load(f)
CBOE_POST_DATA_TICKER_KEY = 'ctl00$ctl00$AllContent$ContentMain$' \
'QuoteTableDownloadCtl1$txtTicker'
MONTHS = {'Jan': 1, 'Feb': 2, 'Mar': 3, 'Apr': 4, 'May': 5, 'Jun': 6,
'Jul': 7, 'Aug': 8, 'Sep': 9, 'Oct': 10, 'Nov': 11, 'Dec': 12}
SKIP_SYMBOLS = ['SPXW', 'SPXQ', 'SPY7', 'SPYJ', 'VXX2']
def save_quote(db, quote):
"""Saves quote to database"""
expiration = database.encode_date(quote.expiration)
date = database.encode_date(quote.date)
time = database.encode_time(quote.time)
db.execute('UPDATE options SET time=?, bid=?, ask=?, stock=?, ' \
'iv_bid=?, iv_ask=? ' \
'WHERE symbol=? AND type=? AND expiration=? ' \
'AND strike=? AND date=?;', [time, quote.bid, quote.ask,
quote.stock, quote.iv_bid, quote.iv_ask, quote.symbol,
quote.type, expiration, quote.strike, date])
if db.rowcount == 0:
db.execute('INSERT INTO options VALUES ' \
'(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);',
[quote.symbol, quote.type, expiration, quote.strike,
date, time, quote.bid, quote.ask, quote.stock,
quote.iv_bid, quote.iv_ask])
def quote_factory(_, row):
"""Converts row to quote"""
symbol, type_, expiration, strike, date, time, \
bid, ask, stock, iv_bid, iv_ask = row
expiration = database.decode_date(expiration)
date = database.decode_date(date)
time = database.decode_time(time)
return OptionQuote(symbol, type_, expiration, strike, date, time,
bid, ask, stock, iv_bid, iv_ask)
def _fetch_data(symbol):
"""Fetches realtime (delayed) options quotes from CBOE as a raw text.
Args:
symbol (str): Symbol to fetch.
Returns:
str: Raw quotes, or None if failed.
"""
logger = logging.getLogger(__name__)
logger.info('Fetching options quotes from CBOE for %s ...', symbol)
data = dict(CBOE_POST_DATA)
data[CBOE_POST_DATA_TICKER_KEY] = symbol
response = requests.post(CBOE_URL, data=data, headers=CBOE_HEADERS)
if response.status_code == 200:
return response.text
else:
logger.error('Cannot fetch options quotes from CBOE for %s', symbol)
def _parse_data(symbol, data, is_eod, db_name=None, timestamp=None):
"""Parses realtime (delayed) options quotes from CBOE and saves to
database.
Args:
symbol (str): Symbol.
data (str): Raw quotes for the symbol.
is_eod (bool): If True: mark received quotes as EOD (time=None),
if False: store actual time.
db_name (str): Optional database name.
timestamp (datetime): Optional datetime for the data.
Returns:
list: List of OptionQuote objects.
"""
logger = logging.getLogger(__name__)
if timestamp is None:
timestamp = dates.get_database_timestamp()
date = timestamp.date()
time = None if is_eod else timestamp.time()
quotes = []
stock_price = None
expirations = dates.get_expirations(symbol)
with database.connect_db(db_name) as db:
for line in data.splitlines():
values = line.strip().split(',')
if (len(values) == 4) and (stock_price is None):
stock_price = utils.to_float(values[1])
continue
if len(values) != 15:
continue
if values[0] == 'Calls' or values[0].find('-') >= 0:
continue
code_values = values[0].split(' ')
if len(code_values) != 4:
continue
position = code_values[3].find(code_values[0])
if code_values[3][1:position] in SKIP_SYMBOLS:
continue
expiration_year = 2000 + int(code_values[0])
expiration_month = MONTHS[code_values[1]]
expiration_day = int(code_values[3][position + 2:position + 4])
expiration = datetime(expiration_year, expiration_month,
expiration_day).date()
if expiration not in expirations:
continue
strike = utils.to_float(code_values[2])
for type_, bid, ask in [
(consts.CALL, values[3], values[4]),
(consts.PUT, values[10], values[11]),
]:
bid = utils.to_float(bid)
ask = utils.to_float(ask)
quote = OptionQuote(
symbol, type_, expiration, strike, date, time,
bid, ask, stock_price, None, None)
iv_bid = math.calc_iv(quote, bid) * 100
iv_ask = math.calc_iv(quote, ask) * 100
quote = OptionQuote(
symbol, type_, expiration, strike, date, time,
bid, ask, stock_price, iv_bid, iv_ask)
save_quote(db, quote)
quotes.append(quote)
logger.info('... quotes parsed: %d', len(quotes))
return quotes
def fetch_realtime(symbol, db_name=None):
"""Fetches realtime (delayed) options quotes from CBOE and saves to
database.
Args:
symbol (str): Symbol to fetch.
db_name (str): Optional database name.
Returns:
list: list of OptionQuote objects
"""
data = _fetch_data(symbol)
return _parse_data(symbol, data, False, db_name) if data else []
def fetch_historical(symbol, db_name=None):
"""Actually stores realtime data to database.
There's no free EOD options quotes provider so you need to call this method
at the end of each business day.
Args:
symbol: Symbol to fetch.
db_name (str): Optional database name.
Returns:
Number of quotes fetched.
"""
data = _fetch_data(symbol)
return len(_parse_data(symbol, data, True, db_name)) if data else 0
def query_historical(symbol, date, db_name=None):
"""Queries historical quotes from local database for given symbol and date.
Mimics fetch_realtime.
Args:
symbol (str): Stock symbol.
date (date): Date to query.
db_name (str): Optional database name.
Returns:
See fetch_realtime.
"""
with database.connect_db(db_name) as db:
db.row_factory = quote_factory
db.execute('SELECT * FROM options WHERE symbol=? AND date=?;',
[symbol, database.encode_date(date)])
return db.fetchall()
|
zzzoidberg/landscape
|
finance/quotes/options.py
|
Python
|
mit
| 7,159
| 0.000978
|
import string
# Manages Local "database" for ZBWarDrive:
# This keeps track of current ZBWarDrive and Sniffing Device State.
# It is different from the online logging database.
class ZBScanDB:
"""
API to interact with the "database" storing information
for the zbscanning program.
"""
def __init__(self):
self.channels = {11:None, 12:None, 13:None, 14:None, 15:None, 16:None, 17:None, 18:None, 19:None, 20:None, 21:None, 22:None, 23:None, 24:None, 25:None, 26:None}
# Devices is indexed by deviceId and stores a 4-tuple of device string, device serial, current status, and current channel
self.devices = {}
def close(self):
pass
# Add a new devices to the DB
def store_devices(self, devid, devstr, devserial):
self.devices[devid] = (devstr, devserial, 'Free', None)
# Returns the devid of a device marked 'Free',
# or None if there are no Free devices in the DB.
def get_devices_nextFree(self):
for devid, dev in self.devices.items():
if dev[2] == 'Free':
return devid
def update_devices_status(self, devid, newstatus):
if devid not in self.devices:
return None
(devstr, devserial, _, chan) = self.devices[devid]
self.devices[devid] = (devstr, devserial, newstatus, chan)
def update_devices_start_capture(self, devid, channel):
if devid not in self.devices:
return None
(devstr, devserial, _, _) = self.devices[devid]
self.devices[devid] = (devstr, devserial, "Capture", channel)
# Add a new network to the DB
def store_networks(self, key, spanid, source, channel, packet):
if channel not in self.channels:
return None
# TODO note this only stores the most recent in the channel
self.channels[channel] = (key, spanid, source, packet)
# Return the channel of the network identified by key,
# or None if it doesn't exist in the DB.
def get_networks_channel(self, key):
#print "Looking up channel for network with key of %s" % (key)
for chan, data in self.channels:
if data[0] == key: return chan
return None
def channel_status_logging(self, chan):
'''
Returns False if we have not seen the network or are not currently
logging it's channel, and returns True if we are currently logging it.
@return boolean
'''
if chan == None: raise Exception("None given for channel number")
elif chan not in self.channels: raise Exception("Invalid channel")
for dev in self.devices.values():
if dev[3] == chan and dev[2] == 'Capture':
return True
return False
# end of ZBScanDB class
def toHex(bin):
return ''.join(["%02x" % ord(x) for x in bin])
|
JonathonReinhart/killerbee
|
killerbee/zbwardrive/db.py
|
Python
|
bsd-3-clause
| 2,845
| 0.009842
|
"""Support for interfacing with Monoprice 6 zone home audio controller."""
import logging
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerDevice, PLATFORM_SCHEMA
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_NAME,
CONF_PORT,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, SERVICE_RESTORE, SERVICE_SNAPSHOT
_LOGGER = logging.getLogger(__name__)
SUPPORT_MONOPRICE = (
SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
)
ZONE_SCHEMA = vol.Schema({vol.Required(CONF_NAME): cv.string})
SOURCE_SCHEMA = vol.Schema({vol.Required(CONF_NAME): cv.string})
CONF_ZONES = "zones"
CONF_SOURCES = "sources"
DATA_MONOPRICE = "monoprice"
# Valid zone ids: 11-16 or 21-26 or 31-36
ZONE_IDS = vol.All(
vol.Coerce(int),
vol.Any(
vol.Range(min=11, max=16), vol.Range(min=21, max=26), vol.Range(min=31, max=36)
),
)
# Valid source ids: 1-6
SOURCE_IDS = vol.All(vol.Coerce(int), vol.Range(min=1, max=6))
MEDIA_PLAYER_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.comp_entity_ids})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PORT): cv.string,
vol.Required(CONF_ZONES): vol.Schema({ZONE_IDS: ZONE_SCHEMA}),
vol.Required(CONF_SOURCES): vol.Schema({SOURCE_IDS: SOURCE_SCHEMA}),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Monoprice 6-zone amplifier platform."""
port = config.get(CONF_PORT)
from serial import SerialException
from pymonoprice import get_monoprice
try:
monoprice = get_monoprice(port)
except SerialException:
_LOGGER.error("Error connecting to Monoprice controller")
return
sources = {
source_id: extra[CONF_NAME] for source_id, extra in config[CONF_SOURCES].items()
}
hass.data[DATA_MONOPRICE] = []
for zone_id, extra in config[CONF_ZONES].items():
_LOGGER.info("Adding zone %d - %s", zone_id, extra[CONF_NAME])
hass.data[DATA_MONOPRICE].append(
MonopriceZone(monoprice, sources, zone_id, extra[CONF_NAME])
)
add_entities(hass.data[DATA_MONOPRICE], True)
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if entity_ids:
devices = [
device
for device in hass.data[DATA_MONOPRICE]
if device.entity_id in entity_ids
]
else:
devices = hass.data[DATA_MONOPRICE]
for device in devices:
if service.service == SERVICE_SNAPSHOT:
device.snapshot()
elif service.service == SERVICE_RESTORE:
device.restore()
hass.services.register(
DOMAIN, SERVICE_SNAPSHOT, service_handle, schema=MEDIA_PLAYER_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_RESTORE, service_handle, schema=MEDIA_PLAYER_SCHEMA
)
class MonopriceZone(MediaPlayerDevice):
"""Representation of a Monoprice amplifier zone."""
def __init__(self, monoprice, sources, zone_id, zone_name):
"""Initialize new zone."""
self._monoprice = monoprice
# dict source_id -> source name
self._source_id_name = sources
# dict source name -> source_id
self._source_name_id = {v: k for k, v in sources.items()}
# ordered list of all source names
self._source_names = sorted(
self._source_name_id.keys(), key=lambda v: self._source_name_id[v]
)
self._zone_id = zone_id
self._name = zone_name
self._snapshot = None
self._state = None
self._volume = None
self._source = None
self._mute = None
def update(self):
"""Retrieve latest state."""
state = self._monoprice.zone_status(self._zone_id)
if not state:
return False
self._state = STATE_ON if state.power else STATE_OFF
self._volume = state.volume
self._mute = state.mute
idx = state.source
if idx in self._source_id_name:
self._source = self._source_id_name[idx]
else:
self._source = None
return True
@property
def name(self):
"""Return the name of the zone."""
return self._name
@property
def state(self):
"""Return the state of the zone."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._volume is None:
return None
return self._volume / 38.0
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._mute
@property
def supported_features(self):
"""Return flag of media commands that are supported."""
return SUPPORT_MONOPRICE
@property
def media_title(self):
"""Return the current source as medial title."""
return self._source
@property
def source(self):
"""Return the current input source of the device."""
return self._source
@property
def source_list(self):
"""List of available input sources."""
return self._source_names
def snapshot(self):
"""Save zone's current state."""
self._snapshot = self._monoprice.zone_status(self._zone_id)
def restore(self):
"""Restore saved state."""
if self._snapshot:
self._monoprice.restore_zone(self._snapshot)
self.schedule_update_ha_state(True)
def select_source(self, source):
"""Set input source."""
if source not in self._source_name_id:
return
idx = self._source_name_id[source]
self._monoprice.set_source(self._zone_id, idx)
def turn_on(self):
"""Turn the media player on."""
self._monoprice.set_power(self._zone_id, True)
def turn_off(self):
"""Turn the media player off."""
self._monoprice.set_power(self._zone_id, False)
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._monoprice.set_mute(self._zone_id, mute)
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._monoprice.set_volume(self._zone_id, int(volume * 38))
def volume_up(self):
"""Volume up the media player."""
if self._volume is None:
return
self._monoprice.set_volume(self._zone_id, min(self._volume + 1, 38))
def volume_down(self):
"""Volume down media player."""
if self._volume is None:
return
self._monoprice.set_volume(self._zone_id, max(self._volume - 1, 0))
|
joopert/home-assistant
|
homeassistant/components/monoprice/media_player.py
|
Python
|
apache-2.0
| 7,123
| 0.000421
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-13 18:08
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ProxyGrantingTicket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_key', models.CharField(blank=True, max_length=255, null=True)),
('pgtiou', models.CharField(blank=True, max_length=255, null=True)),
('pgt', models.CharField(blank=True, max_length=255, null=True)),
('date', models.DateTimeField(auto_now_add=True)),
('user', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='SessionTicket',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('session_key', models.CharField(max_length=255)),
('ticket', models.CharField(max_length=255)),
],
),
migrations.AlterUniqueTogether(
name='proxygrantingticket',
unique_together=set([('session_key', 'user')]),
),
]
|
bgroff/django-cas-ng
|
django_cas_ng/migrations/0001_initial.py
|
Python
|
mit
| 1,628
| 0.003686
|
from numpy.testing import *
import numpy as np
import lulu
import lulu.connected_region_handler as crh
class TestLULU:
img = np.zeros((5, 5)).astype(int)
img[0, 0:5] = 0
img[:, 4] = 1
img[1:3, 1:4] = 2
"""
[[0 0 0 0 1]
[0 2 2 2 1]
[0 2 2 2 1]
[0 0 0 0 1]
[0 0 0 0 1]]
"""
def test_connected_regions(self):
labels, regions = lulu.connected_regions(self.img)
assert_array_equal(labels, self.img)
assert_equal(len(regions), 3)
crh.set_value(regions[0], 5)
assert_array_equal(crh.todense(regions[0]),
[[5, 5, 5, 5, 0],
[5, 0, 0, 0, 0],
[5, 0, 0, 0, 0],
[5, 5, 5, 5, 0],
[5, 5, 5, 5, 0]])
assert_array_equal(crh.todense(regions[1]),
[[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1],
[0, 0, 0, 0, 1]])
assert_array_equal(crh.todense(regions[2]),
[[0, 0, 0, 0, 0],
[0, 2, 2, 2, 0],
[0, 2, 2, 2, 0],
[0, 0, 0, 0, 0],
[0, 0, 0, 0, 0]])
class TestReconstruction:
def test_basic(self):
img = np.random.randint(255, size=(200, 200))
pulses = lulu.decompose(img)
img_, areas, area_count = lulu.reconstruct(pulses, img.shape)
# Write assert this way so that we can see how many
# pixels mismatch as a percent of the total nr of pixels
assert_array_equal(img_, img)
assert_equal(np.sum(img_ != img) / float(np.prod(img.shape)) * 100,
0, "Percentage mismatch =")
if __name__ == "__main__":
run_module_suite()
|
stefanv/lulu
|
lulu/tests/test_lulu.py
|
Python
|
bsd-3-clause
| 1,934
| 0.001551
|
#!/usr/bin/env python
from hts_barcode_checker import Taxon, TaxonDB
import logging, datetime, argparse, sqlite3
# NCBI taxonomy tree database 10.6084/m9.figshare.4620733
parser = argparse.ArgumentParser(description = 'Create a table containing the CITES species')
parser.add_argument('-db', '--CITES_db', metavar='CITES database name', dest='db',type=str,
help='Name and path to the output location for the CITES database')
parser.add_argument('-csv', '--CITES_dump', metavar='CITES CSV dump', dest='dmp', type=str,
help='Location of the CSV dump downloaded from CITES')
parser.add_argument('-ncbi', '--NCBI_taxonomy', metavar='NCBI taxonomy tree database', dest='n', type=str,
help='Location of sqlite database with NCBI taxonomy tree')
parser.add_argument('-l', '--logging', metavar='log level', dest='l', type=str,
help = 'Set log level to: debug, info, warning (default) or critical see readme for more details.', default='warning')
parser.add_argument('-lf', '--log_file', metavar='log file', dest='lf', type=str,
help = 'Path to the log file')
args = parser.parse_args()
def main ():
# configure logger
log_level = getattr(logging, args.l.upper(), None)
log_format = '%(funcName)s [%(lineno)d]: %(levelname)s: %(message)s'
if not isinstance(log_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
return
if args.lf == '':
logging.basicConfig(format=log_format, level=log_level)
else:
logging.basicConfig(filename=args.lf, filemode='a', format=log_format, level=log_level)
# instantiate DB object, parse CITES dump
db = TaxonDB(date=str(datetime.datetime.now()))
db.from_dump(args.dmp)
# configure local sqlite database
conn = sqlite3.connect(args.n)
curr = conn.cursor()
# iterate over parsed taxa, resolve NCBI taxid and expand higher taxa
counter = 1
expanded = []
for taxon in db.taxa:
taxon.tnrs(cursor=curr)
result = taxon.expand(cursor=curr)
for taxid in result.keys():
expanded.append(Taxon(
appendix=taxon.appendix,
name=taxon.name,
description=taxon.description,
footnotes=taxon.footnotes,
ncbi={taxid:result[taxid]}
))
logging.info('%d/%d' % ( counter, len(db.taxa) ))
counter += 1
# write output
for taxon in expanded:
db.taxa.append(taxon)
handle = open(args.db, 'w')
db.to_csv(handle)
handle.close()
if __name__ == "__main__":
main()
|
naturalis/HTS-barcode-checker
|
src/Parse_CITES.py
|
Python
|
bsd-3-clause
| 2,371
| 0.033319
|
#!/usr/bin/env python3
# Copyright 2015 gRPC authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the appropriate JSON data for LB interop test scenarios."""
import json
import os
import yaml
all_scenarios = []
# TODO(https://github.com/grpc/grpc-go/issues/2347): enable
# client_falls_back_because_no_backends_* scenarios for Java/Go.
# TODO(https://github.com/grpc/grpc-java/issues/4887): enable
# *short_stream* scenarios for Java.
# TODO(https://github.com/grpc/grpc-java/issues/4912): enable
# Java TLS tests involving TLS to the balancer.
def server_sec(transport_sec):
if transport_sec == 'google_default_credentials':
return 'alts', 'alts', 'tls'
return transport_sec, transport_sec, transport_sec
def generate_no_balancer_because_lb_a_record_returns_nx_domain():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_nx_domain_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_nx_domain()
def generate_no_balancer_because_lb_a_record_returns_no_data():
all_configs = []
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
config = {
'name':
'no_balancer_because_lb_a_record_returns_no_data_%s' %
transport_sec,
'skip_langs': [],
'transport_sec':
transport_sec,
'balancer_configs': [],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
True,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_no_balancer_because_lb_a_record_returns_no_data()
def generate_client_referred_to_backend():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend()
def generate_client_referred_to_backend_fallback_broken():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_fallback_broken_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}],
'fallback_configs': [{
'transport_sec': 'insecure',
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_fallback_broken()
def generate_client_referred_to_backend_multiple_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_backends_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [{
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}, {
'transport_sec': backend_sec,
}],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_backends()
def generate_client_falls_back_because_no_backends():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = ['go', 'java']
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_falls_back_because_no_backends_%s_short_stream_%s' %
(transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
}],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_no_backends()
def generate_client_falls_back_because_balancer_connection_broken():
all_configs = []
for transport_sec in ['alts', 'tls', 'google_default_credentials']:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs = ['java']
config = {
'name':
'client_falls_back_because_balancer_connection_broken_%s' %
transport_sec,
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [{
'transport_sec': 'insecure',
'short_stream': False,
}],
'backend_configs': [],
'fallback_configs': [{
'transport_sec': fallback_sec,
}],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_falls_back_because_balancer_connection_broken()
def generate_client_referred_to_backend_multiple_balancers():
all_configs = []
for balancer_short_stream in [True, False]:
for transport_sec in [
'insecure', 'alts', 'tls', 'google_default_credentials'
]:
balancer_sec, backend_sec, fallback_sec = server_sec(transport_sec)
skip_langs = []
if transport_sec == 'tls':
skip_langs += ['java']
if balancer_short_stream:
skip_langs += ['java']
config = {
'name':
'client_referred_to_backend_multiple_balancers_%s_short_stream_%s'
% (transport_sec, balancer_short_stream),
'skip_langs':
skip_langs,
'transport_sec':
transport_sec,
'balancer_configs': [
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
{
'transport_sec': balancer_sec,
'short_stream': balancer_short_stream,
},
],
'backend_configs': [{
'transport_sec': backend_sec,
},],
'fallback_configs': [],
'cause_no_error_no_data_for_balancer_a_record':
False,
}
all_configs.append(config)
return all_configs
all_scenarios += generate_client_referred_to_backend_multiple_balancers()
print((yaml.dump({
'lb_interop_test_scenarios': all_scenarios,
})))
|
ctiller/grpc
|
tools/run_tests/lb_interop_tests/gen_build_yaml.py
|
Python
|
apache-2.0
| 12,124
| 0.000577
|
# postgresql/ext.py
# Copyright (C) 2005-2015 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from ...sql import expression
from ...sql import elements
from ...sql import functions
from ...sql.schema import ColumnCollectionConstraint
from .array import ARRAY
class aggregate_order_by(expression.ColumnElement):
"""Represent a Postgresql aggregate order by expression.
E.g.::
from sqlalchemy.dialects.postgresql import aggregate_order_by
expr = func.array_agg(aggregate_order_by(table.c.a, table.c.b.desc()))
stmt = select([expr])
would represent the expression::
SELECT array_agg(a ORDER BY b DESC) FROM table;
Similarly::
expr = func.string_agg(
table.c.a,
aggregate_order_by(literal_column("','"), table.c.a)
)
stmt = select([expr])
Would represent::
SELECT string_agg(a, ',' ORDER BY a) FROM table;
.. versionadded:: 1.1
.. seealso::
:class:`.array_agg`
"""
__visit_name__ = 'aggregate_order_by'
def __init__(self, target, order_by):
self.target = elements._literal_as_binds(target)
self.order_by = elements._literal_as_binds(order_by)
def self_group(self, against=None):
return self
def get_children(self, **kwargs):
return self.target, self.order_by
def _copy_internals(self, clone=elements._clone, **kw):
self.target = clone(self.target, **kw)
self.order_by = clone(self.order_by, **kw)
@property
def _from_objects(self):
return self.target._from_objects + self.order_by._from_objects
class ExcludeConstraint(ColumnCollectionConstraint):
"""A table-level EXCLUDE constraint.
Defines an EXCLUDE constraint as described in the `postgres
documentation`__.
__ http://www.postgresql.org/docs/9.0/\
static/sql-createtable.html#SQL-CREATETABLE-EXCLUDE
"""
__visit_name__ = 'exclude_constraint'
where = None
def __init__(self, *elements, **kw):
"""
:param \*elements:
A sequence of two tuples of the form ``(column, operator)`` where
column must be a column name or Column object and operator must
be a string containing the operator to use.
:param name:
Optional, the in-database name of this constraint.
:param deferrable:
Optional bool. If set, emit DEFERRABLE or NOT DEFERRABLE when
issuing DDL for this constraint.
:param initially:
Optional string. If set, emit INITIALLY <value> when issuing DDL
for this constraint.
:param using:
Optional string. If set, emit USING <index_method> when issuing DDL
for this constraint. Defaults to 'gist'.
:param where:
Optional string. If set, emit WHERE <predicate> when issuing DDL
for this constraint.
"""
columns = []
render_exprs = []
self.operators = {}
expressions, operators = zip(*elements)
for (expr, column, strname, add_element), operator in zip(
self._extract_col_expression_collection(expressions),
operators
):
if add_element is not None:
columns.append(add_element)
name = column.name if column is not None else strname
if name is not None:
# backwards compat
self.operators[name] = operator
expr = expression._literal_as_text(expr)
render_exprs.append(
(expr, name, operator)
)
self._render_exprs = render_exprs
ColumnCollectionConstraint.__init__(
self,
*columns,
name=kw.get('name'),
deferrable=kw.get('deferrable'),
initially=kw.get('initially')
)
self.using = kw.get('using', 'gist')
where = kw.get('where')
if where is not None:
self.where = expression._literal_as_text(where)
def copy(self, **kw):
elements = [(col, self.operators[col])
for col in self.columns.keys()]
c = self.__class__(*elements,
name=self.name,
deferrable=self.deferrable,
initially=self.initially)
c.dispatch._update(self.dispatch)
return c
def array_agg(*arg, **kw):
"""Postgresql-specific form of :class:`.array_agg`, ensures
return type is :class:`.postgresql.ARRAY` and not
the plain :class:`.types.ARRAY`.
.. versionadded:: 1.1
"""
kw['type_'] = ARRAY(functions._type_from_args(arg))
return functions.func.array_agg(*arg, **kw)
|
ThiefMaster/sqlalchemy
|
lib/sqlalchemy/dialects/postgresql/ext.py
|
Python
|
mit
| 4,889
| 0.000205
|
#
"""test_python_compat - Python output compatibility tests"""
# Copyright © 2012-2018 James Rowe <jnrowe@gmail.com>
#
# SPDX-License-Identifier: GPL-3.0-or-later
#
# This file is part of versionah.
#
# versionah is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# versionah is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# versionah. If not, see <http://www.gnu.org/licenses/>.
from os import getenv
from shutil import which
from subprocess import PIPE, call
from pytest import mark, skip
from versionah.cmdline import CliVersion
@mark.requires_exec
@mark.requires_write
@mark.parametrize('interp', [
'python2.6',
'python2.7',
'python3.2',
'python3.3',
])
def test_python_compatibility(interp, tmpdir):
if not which(interp):
skip('Interpreter {!r} unavailable'.format(interp))
file_loc = tmpdir.join('test_wr.py').strpath
CliVersion('1.0.1').write(file_loc, 'py')
retval = call([interp, '-W', 'all', file_loc], stdout=PIPE,
stderr=PIPE)
assert retval == 0
# Test interps not available on travis-ci.org, but available on all our test
# machines
@mark.skipif(getenv('TRAVIS_PYTHON_VERSION'), reason='Unavailable on travis')
@mark.requires_exec
@mark.requires_write
@mark.parametrize('interp', [
'python2.4',
'python2.5',
'python3.1',
'python3.4',
])
def test_python_compatibility_extra(interp):
if not which(interp):
skip('Interpreter {!r} unavailable'.format(interp))
test_python_compatibility(interp)
|
JNRowe/versionah
|
tests/test_python_compat.py
|
Python
|
gpl-3.0
| 1,950
| 0
|
''' Test bug 389: http://bugs.openbossa.org/show_bug.cgi?id=389'''
import sys
import unittest
from helper import UsesQApplication
from PySide import QtCore,QtGui
class BugTest(UsesQApplication):
def testCase(self):
s = QtGui.QWidget().style()
i = s.standardIcon(QtGui.QStyle.SP_TitleBarMinButton)
self.assertEqual(type(i), QtGui.QIcon)
if __name__ == '__main__':
unittest.main()
|
enthought/pyside
|
tests/QtGui/bug_389.py
|
Python
|
lgpl-2.1
| 414
| 0.007246
|
from django.contrib import admin
from models import *
from cards.actions import export_as_xls
class ScanAdmin(admin.ModelAdmin):
list_filter = ['readerLocation', 'added']
search_fields = ['card__code']
# Register your models here.
admin.site.register(Batch)
admin.site.register(Card)
admin.site.register(Reader)
admin.site.register(Location)
admin.site.register(ReaderLocation)
admin.site.register(Scan, ScanAdmin)
class MyAdmin(admin.ModelAdmin):
actions = [export_as_xls]
admin.site.add_action(export_as_xls)
|
i-DAT-Qualia/Card-Backend
|
cards/admin.py
|
Python
|
apache-2.0
| 528
| 0.007576
|
from django.contrib.auth.backends import ModelBackend
from django.contrib.sites.models import Site
from socialregistration.contrib.twitter.models import TwitterProfile
class TwitterAuth(ModelBackend):
def authenticate(self, twitter_id=None):
try:
return TwitterProfile.objects.get(
twitter_id=twitter_id,
site=Site.objects.get_current()
).user
except TwitterProfile.DoesNotExist:
return None
|
lgapontes/django-socialregistration
|
socialregistration/contrib/twitter/auth.py
|
Python
|
mit
| 487
| 0.002053
|
import _plotly_utils.basevalidators
class SizeValidator(_plotly_utils.basevalidators.AnyValidator):
def __init__(self, plotly_name="size", parent_name="histogram2d.xbins", **kwargs):
super(SizeValidator, self).__init__(
plotly_name=plotly_name,
parent_name=parent_name,
edit_type=kwargs.pop("edit_type", "calc"),
**kwargs
)
|
plotly/plotly.py
|
packages/python/plotly/plotly/validators/histogram2d/xbins/_size.py
|
Python
|
mit
| 394
| 0.002538
|
info=[]
for line in open('./info_mappable_50.txt').read().rstrip().split('\n'):
a=line.split('\t')
info.append(a[0])
line_out=''
line_out2=''
seq=''
index=0
for line in open('./seqList_mappable_50.fa'):
if line[0]=='>':
if seq:
replace=seq[7:-7]
if replace!=ref:
print header
seq_alt=seq[:7]+alt+seq[-7:]
line_out+='>'+header+'_ref'+'\n'+seq+'\n'
line_out2+='>'+header+'_alt'+'\n'+seq_alt+'\n'
header=info[index]
ref_alt=header.split('_')[1]
[ref,alt]=ref_alt.split('>')
index+=1
seq=''
if index/1000000==index/1000000.0:
print index
else:
seq+=line.split('\n')[0]
if seq:
replace=seq[7:-7]
if replace!=ref:
print header
seq_alt=seq[:7]+alt+seq[-7:]
line_out+='>'+header+'_ref'+'\n'+seq+'\n'
line_out2+='>'+header+'_alt'+'\n'+seq_alt+'\n'
open('./seqList_mappable_50_ref.fa','wb').write(line_out)
open('./seqList_mappable_50_alt.fa','wb').write(line_out2)
|
wzhang1984/Noncoding-tumor-mutation-paper
|
motif_analysis/ref2alt.py
|
Python
|
gpl-3.0
| 1,057
| 0.02649
|
#!/usr/bin/env python
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "clone.settings")
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
|
sigma-geosistemas/clone
|
src/manage.py
|
Python
|
lgpl-3.0
| 248
| 0
|
# coding: utf8
# OeQ autogenerated lookup function for 'Window/Wall Ratio East in correlation to year of construction, based on the source data of the survey for the "German Building Typology developed by the "Institut für Wohnen und Umwelt", Darmstadt/Germany, 2011-2013'
import math
import numpy as np
import oeqLookuptable as oeq
def get(*xin):
l_lookup = oeq.lookuptable(
[
1849,0.031,
1850,0.031,
1851,0.03,
1852,0.027,
1853,0.024,
1854,0.025,
1855,0.03,
1856,0.042,
1857,0.06,
1858,0.082,
1859,0.105,
1860,0.128,
1861,0.15,
1862,0.168,
1863,0.18,
1864,0.18,
1865,0.18,
1866,0.18,
1867,0.18,
1868,0.179,
1869,0.179,
1870,0.179,
1871,0.18,
1872,0.18,
1873,0.18,
1874,0.18,
1875,0.18,
1876,0.18,
1877,0.18,
1878,0.18,
1879,0.18,
1880,0.18,
1881,0.18,
1882,0.18,
1883,0.18,
1884,0.18,
1885,0.18,
1886,0.18,
1887,0.18,
1888,0.18,
1889,0.18,
1890,0.18,
1891,0.18,
1892,0.18,
1893,0.18,
1894,0.18,
1895,0.18,
1896,0.18,
1897,0.18,
1898,0.18,
1899,0.18,
1900,0.18,
1901,0.18,
1902,0.18,
1903,0.18,
1904,0.18,
1905,0.18,
1906,0.18,
1907,0.18,
1908,0.179,
1909,0.179,
1910,0.179,
1911,0.18,
1912,0.18,
1913,0.18,
1914,0.18,
1915,0.18,
1916,0.168,
1917,0.15,
1918,0.128,
1919,0.105,
1920,0.082,
1921,0.06,
1922,0.042,
1923,0.03,
1924,0.025,
1925,0.024,
1926,0.027,
1927,0.03,
1928,0.031,
1929,0.031,
1930,0.031,
1931,0.03,
1932,0.03,
1933,0.03,
1934,0.03,
1935,0.03,
1936,0.03,
1937,0.03,
1938,0.03,
1939,0.03,
1940,0.03,
1941,0.03,
1942,0.03,
1943,0.03,
1944,0.03,
1945,0.03,
1946,0.029,
1947,0.026,
1948,0.02,
1949,0.012,
1950,0.003,
1951,0,
1952,0,
1953,0,
1954,0.014,
1955,0.036,
1956,0.062,
1957,0.09,
1958,0.118,
1959,0.144,
1960,0.165,
1961,0.18,
1962,0.18,
1963,0.18,
1964,0.18,
1965,0.18,
1966,0.173,
1967,0.166,
1968,0.158,
1969,0.15,
1970,0.141,
1971,0.133,
1972,0.125,
1973,0.12,
1974,0.118,
1975,0.117,
1976,0.118,
1977,0.12,
1978,0.121,
1979,0.12,
1980,0.113,
1981,0.1,
1982,0.078,
1983,0.053,
1984,0.028,
1985,0.01,
1986,0.002,
1987,0.002,
1988,0.006,
1989,0.01,
1990,0.011,
1991,0.01,
1992,0.009,
1993,0.01,
1994,0.013,
1995,0.019,
1996,0.025,
1997,0.03,
1998,0.034,
1999,0.036,
2000,0.038,
2001,0.04,
2002,0.043,
2003,0.045,
2004,0.048,
2005,0.05,
2006,0.051,
2007,0.051,
2008,0.05,
2009,0.05,
2010,0.05,
2011,0.05,
2012,0.05,
2013,0.05,
2014,0.05,
2015,0.05,
2016,0.05,
2017,0.05,
2018,0.05,
2019,0.05,
2020,0.05,
2021,0.05])
return(l_lookup.lookup(xin))
|
UdK-VPT/Open_eQuarter
|
mole/stat_corr/window_wall_ratio_east_MFH_by_building_age_lookup.py
|
Python
|
gpl-2.0
| 2,380
| 0.147121
|
try:
from django.contrib.auth.tests.utils import skipIfCustomUser
except ImportError:
def skipIfCustomUser(wrapped):
return wrapped
|
maroux/django-oauth2-provider
|
provider/compat/__init__.py
|
Python
|
mit
| 148
| 0
|
# Copyright (c) 2016, LE GOFF Vincent
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of ytranslate nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""This file contains the SharpEditor class."""
import wx
from ytranslate.tools import t
class SharpEditor(wx.Panel):
"""SharpScript editor panel.
This panel can be added into dialogs that have to support SharpScript
editing. On the top, at the left of the panel, is an optional
text field to edit SharpScript directly. To the right is a list
of functions already associated with this entry. After buttons
to edit and remove is a second list with new functions to be added.
"""
def __init__(self, dialog, engine, sharp, object, attribute,
text=False, escape=False):
"""Creates the frame.
Arguments:
dialog: the parent dialog.
engine: the game engine.
sharp: the SharpScript engine.
object: the object containing the field to be edited.
attribute: the attribute's name of the object to edit.
text (default to False): should a text field be added?
escape (default to false): the #send calls are removed.
If the SharpEditor is to modify a trigger, for instance,
particularly its "action" attribute, the trigger is the object
and "action" is the attribute's name.
"""
wx.Panel.__init__(self, dialog)
self.engine = engine
self.sharp_engine = sharp
self.object = object
self.attribute = attribute
self.text = None
self.escape = escape
script = getattr(self.object, self.attribute)
self.functions = sorted(sharp.functions.values(),
key=lambda function: function.name)
self.functions = [f for f in self.functions if f.description]
# Shape
sizer = wx.BoxSizer(wx.VERTICAL)
top = wx.BoxSizer(wx.HORIZONTAL)
bottom = wx.BoxSizer(wx.HORIZONTAL)
self.SetSizer(sizer)
# Insert a text field
if text:
s_text = wx.BoxSizer(wx.VERTICAL)
l_text = wx.StaticText(self, label=t("common.action"))
t_text = wx.TextCtrl(self, value=script, style=wx.TE_MULTILINE)
self.text = t_text
s_text.Add(l_text)
s_text.Add(t_text)
top.Add(s_text)
# List of current functions
self.existing = wx.ListCtrl(self,
style=wx.LC_REPORT | wx.LC_SINGLE_SEL)
self.existing.InsertColumn(0, t("common.action"))
# Buttons
self.edit = wx.Button(self, label=t("ui.button.edit"))
self.remove = wx.Button(self, label=t("ui.button.remove"))
top.Add(self.existing)
top.Add(self.edit)
top.Add(self.remove)
self.populate_existing()
# List of functions
self.choices = wx.ListCtrl(self, style=wx.LC_REPORT | wx.LC_SINGLE_SEL)
self.choices.InsertColumn(0, t("common.description"))
self.populate_list()
bottom.Add(self.choices)
# Add button
self.add = wx.Button(self, label=t("ui.button.add_action"))
bottom.Add(self.add)
# Event binding
self.add.Bind(wx.EVT_BUTTON, self.OnAdd)
self.edit.Bind(wx.EVT_BUTTON, self.OnEdit)
self.remove.Bind(wx.EVT_BUTTON, self.OnRemove)
def populate_list(self):
"""Populate the list with function names."""
self.choices.DeleteAllItems()
for function in self.functions:
try:
description = t("sharp.{name}.description".format(
name=function.name))
except ValueError:
description = function.description
self.choices.Append((description, ))
self.choices.Select(0)
self.choices.Focus(0)
def populate_existing(self):
"""Populate the list with existing functions."""
self.existing.DeleteAllItems()
script = getattr(self.object, self.attribute)
if self.text:
self.text.SetValue(script)
lines = self.sharp_engine.format(script, return_str=False)
for line in lines:
self.existing.Append((line, ))
self.existing.Select(0)
self.existing.Focus(0)
if lines:
self.existing.Enable()
self.edit.Enable()
self.remove.Enable()
else:
self.existing.Disable()
self.edit.Disable()
self.remove.Disable()
def OnAdd(self, e):
"""The 'add' button is pressed."""
index = self.choices.GetFirstSelected()
try:
function = self.functions[index]
except IndexError:
wx.MessageBox(t("ui.message.sharp.missing"),
t("ui.message.error"), wx.OK | wx.ICON_ERROR)
else:
dialog = AddEditFunctionDialog(self.engine, self.sharp_engine,
function, self.object, self.attribute, escape=self.escape)
dialog.ShowModal()
self.populate_existing()
self.existing.SetFocus()
def OnEdit(self, e):
"""The 'edit' button is pressed."""
index = self.existing.GetFirstSelected()
script = getattr(self.object, self.attribute)
lines = self.sharp_engine.format(script, return_str=False)
try:
line = lines[index]
except IndexError:
wx.MessageBox(t("ui.message.sharp.missing"),
t("ui.message.error"), wx.OK | wx.ICON_ERROR)
else:
name, arguments, flags = self.sharp_engine.extract_arguments(line)
function = self.sharp_engine.functions[name[1:]]
dialog = AddEditFunctionDialog(self.engine, self.sharp_engine,
function, self.object, self.attribute, index,
escape=self.escape)
dialog.ShowModal()
self.populate_existing()
self.existing.SetFocus()
def OnRemove(self, e):
"""The 'remove' button is pressed."""
index = self.existing.GetFirstSelected()
script = getattr(self.object, self.attribute)
lines = self.sharp_engine.format(script, return_str=False)
try:
line = lines[index]
except IndexError:
wx.MessageBox(t("ui.message.sharp.missing"),
t("ui.message.error"), wx.OK | wx.ICON_ERROR)
else:
value = wx.MessageBox(t("ui.message.sharp.remove",
line=line), t("ui.alert.confirm"),
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_QUESTION)
if value == wx.YES:
del lines[index]
content = "\n".join(lines)
setattr(self.object, self.attribute, content)
self.populate_existing()
self.existing.SetFocus()
class AddEditFunctionDialog(wx.Dialog):
"""Add or edit a function."""
def __init__(self, engine, sharp_engine, function, object, attribute,
index=-1, escape=False):
super(AddEditFunctionDialog, self).__init__(None,
title=t("common.action"))
self.engine = engine
self.sharp_engine = sharp_engine
self.world = sharp_engine.world
self.function = function
self.object = object
self.attribute = attribute
self.index = index
self.escape = escape
arguments = []
flags = {}
if index >= 0:
script = getattr(self.object, self.attribute)
lines = self.sharp_engine.format(script, return_str=False)
line = lines[index]
function, arguments, flags = self.sharp_engine.extract_arguments(line)
# Dialog
sizer = wx.BoxSizer(wx.VERTICAL)
self.top = wx.BoxSizer(wx.VERTICAL)
buttons = self.CreateButtonSizer(wx.OK | wx.CANCEL)
self.SetSizer(sizer)
# Add the function-specific configuration
sizer.Add(self.top)
self.function.display(self, *arguments, **flags)
sizer.Add(buttons)
# Event binding
self.Bind(wx.EVT_BUTTON, self.OnOk, id=wx.ID_OK)
self.Bind(wx.EVT_BUTTON, self.OnCancel, id=wx.ID_CANCEL)
def OnOk(self, e):
"""The 'OK' button is pressed."""
arguments = self.function.complete(self)
if arguments is not None:
function = "#" + self.function.name
lines = (((function, ) + arguments), )
line = self.sharp_engine.format(lines)
# Add to the entire content
lines = self.sharp_engine.format(getattr(self.object,
self.attribute), return_str=False)
if self.index >= 0:
lines[self.index] = line
else:
lines.append(line)
if self.escape:
print("escaping lines")
for i, line in enumerate(lines):
if line.startswith("#send "):
line = line[6:]
if line.startswith("{"):
line = line[1:-1]
lines[i] = line
content = "\n".join(lines)
setattr(self.object, self.attribute, content)
self.Destroy()
def OnCancel(self, e):
"""The 'cancel' button is pressed."""
self.Destroy()
|
vlegoff/cocomud
|
src/ui/sharp_editor.py
|
Python
|
bsd-3-clause
| 10,805
| 0.001574
|
try:
import exceptions
except ImportError: # Python 3
import builtins as exceptions
class ObjectToReturn:
def __init__(self, name):
self.name = name
def __str__(self):
return self.name
def exception(self, name, msg=""):
exception = getattr(exceptions, name)
raise exception(msg)
|
userzimmermann/robotframework-python3
|
atest/testresources/testlibs/objecttoreturn.py
|
Python
|
apache-2.0
| 343
| 0.011662
|
# encoding: utf-8
"""
Test lldb data formatter subsystem.
"""
from __future__ import print_function
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test import lldbutil
from ObjCDataFormatterTestCase import ObjCDataFormatterTestCase
class ObjCDataFormatterNSError(ObjCDataFormatterTestCase):
@skipUnlessDarwin
def test_nserror_with_run_command(self):
"""Test formatters for NSError."""
self.appkit_tester_impl(self.nserror_data_formatter_commands)
def nserror_data_formatter_commands(self):
self.expect(
'frame variable nserror', substrs=['domain: @"Foobar" - code: 12'])
self.expect(
'frame variable nserrorptr',
substrs=['domain: @"Foobar" - code: 12'])
self.expect(
'frame variable nserror->_userInfo', substrs=['2 key/value pairs'])
self.expect(
'frame variable nserror->_userInfo --ptr-depth 1 -d run-target',
substrs=['@"a"', '@"b"', "1", "2"])
|
llvm-mirror/lldb
|
packages/Python/lldbsuite/test/functionalities/data-formatter/data-formatter-objc/TestDataFormatterObjCNSError.py
|
Python
|
apache-2.0
| 1,050
| 0
|
from app import write_entries
import datetime
import random
ts = datetime.datetime.now().strftime("%Y-%m-%d%H:%M-%S")
offset = random.randrange(0, 1475)
print("Enter user=%s" % ts)
print("Enter email=%s" % offset)
prime = write_entries.delay(ts, offset)
|
aseemm/flask-template
|
wentries.py
|
Python
|
bsd-3-clause
| 256
| 0
|
from enum import Enum
class ImageAlignType(Enum):
"""Image alignment"""
Default = 1
Left = 2
Right = 3
Center = 4
|
jablonskim/jupyweave
|
jupyweave/settings/align_types.py
|
Python
|
mit
| 137
| 0
|
import decor
from flask import Blueprint, redirect, request, url_for
import os, json
def construct_bp(gcal, JSON_DENT):
ALLOWED_ORIGIN = "*"
# JSON_DENT = 4
gcal_api = Blueprint('gcal_api', __name__, url_prefix="/gcal")
# GOOGLE CALENDAR API Routes
# Authenication routes
@gcal_api.route('/auth2callback')
def gauth_callback():
return redirect(gcal.auth_callback(request.args.get('code')))
@gcal_api.route('/gauth')
def gauth_call():
return redirect(gcal.get_auth_uri())
@gcal_api.route('/isauth')
def gauth_isauth():
return json.dumps({'is_needed': not gcal.need_auth()})
@gcal_api.route('/istblexist')
def gauth_istblex():
return json.dumps({'is_exist': gcal.if_cal_tbl()})
@gcal_api.route('/deauth')
def gauth_deauth():
return redirect(gcal.deauth_usr())
# Get todays events
@gcal_api.route('/today', methods=['GET','OPTIONS'])
@decor.crossdomain(origin=ALLOWED_ORIGIN)
def gcal_today():
return json.dumps(gcal.get_today(), indent=JSON_DENT)
# Get calendars
@gcal_api.route('/calendars', methods=['GET','OPTIONS'])
@decor.crossdomain(origin=ALLOWED_ORIGIN)
def gcal_cals():
return json.dumps(gcal.get_cals(), indent=JSON_DENT)
# Save calendars
@gcal_api.route('/add/calendars', methods=['POST','OPTIONS'])
@decor.crossdomain(origin=ALLOWED_ORIGIN)
def gcal_save_cals():
# print request.form.getlist('ids[]')
gcal.add_cals(request.form.getlist('ids[]'))
# print request.form
redirect(url_for('setcal'))
# return json.dumps(gcal.get_ucals(), indent=JSON_DENT)
return '<meta http-equiv="refresh" content ="0; URL=http://localhost:5000/setcal">'
# Get todays events
@gcal_api.route('/mail', methods=['GET'])
def gcal_mail():
return json.dumps(gcal.get_mail(), indent=JSON_DENT)
# === JSON Error Handling ===
# @gcal_api.errorhandler(400)
# def err_400(e):
# return '{"status": 400, "message":"Bad request"}', 400
@gcal_api.errorhandler(404)
def err_404(e):
return '{"status": 404, "message":"Page not found"}', 404
@gcal_api.errorhandler(500)
def err_500(e):
return '{"status": 500, "message":"Internal server error"}', 500
return gcal_api
|
Techblogogy/magic-mirror-base
|
server/routes/gcal.py
|
Python
|
gpl-3.0
| 2,343
| 0.003841
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com thumbor@googlegroups.com
from thumbor.filters import BaseFilter, filter_method
from thumbor.utils import logger
ALLOWED_FORMATS = ["png", "jpeg", "jpg", "gif", "webp"]
class Filter(BaseFilter):
@filter_method(BaseFilter.String)
async def format(self, file_format):
if file_format.lower() not in ALLOWED_FORMATS:
logger.debug("Format not allowed: %s", file_format.lower())
self.context.request.format = None
else:
logger.debug("Format specified: %s", file_format.lower())
self.context.request.format = file_format.lower()
|
kkopachev/thumbor
|
thumbor/filters/format.py
|
Python
|
mit
| 823
| 0
|
import numpy as np
import json
import scipy as sci
def get_decimal_delta(data, index,decimals):
'''
This function calculates the difference between the values of one column
:param data: the data array
:param time_index: the index of the column of interest
:param decimals: Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of positions to the left of the decimal point.
:return: a list of distances between all values in the column
'''
res = []
for t1, t2 in zip(data[:-1,int(index)], data[1:,int(index)]):
res.append(np.around(np.float64(t2) - np.float64(t1),decimals))
return np.array(res)
def get_delta(data, index):
'''
This function calculates the difference between the values of one column
:param data: the data array
:param time_index: the index of the column of interest
:param decimals: Number of decimal places to round to (default: 0).
If decimals is negative, it specifies the number of positions to the left of the decimal point.
:return: a list of distances between all values in the column
'''
realsol = []
i=1
while i < len(data[0:,index]):
intervall = data[i, index] - data[i - 1,index]
realsol.append(intervall)
i += 1
realsol = np.array(realsol)
return realsol
def get_average_delta(data, index):
'''
This function calculates the average difference between the values of one column
:param data: the data array
:param time_index: the index of the column of interest
:return: average between all values in the column
'''
deltas = get_decimal_delta(data, index, 7)
return sum(deltas) / len(deltas)
def numerical_approx(data, diff_Value1_Index, diff_Value2_Index = 0):
'''
This method derives one Data Column by another
Zeitwerte
Example: d Speed / d Time = Acceleration
:param data: the pandas DataFrame of the data
:param diff_Value1_Index: Index of the Column to get the derivative of
:param diff_Value2_Index: Index of the deriving Column (Usually the Time index)
:return:
'''
diff_Value = []
diff_Value.append(np.float_(0.000))
data = np.array(json.loads(data), dtype=np.float64)
for v1, t1 in zip(get_delta(data, int(diff_Value1_Index)), get_delta(data, int(diff_Value2_Index))):
diff_Value.append(v1 / t1)
return np.asarray(diff_Value)
def trapez_for_each(data, index_x, index_y):
"""
This method integrates the given Values with the Trapeziodal Rule
:param index_x: index der X Achse
:param index_y: index der Y Achse
:return: integrated Values from x,y
"""
i = 1
sol = []
data =np.array(json.loads(data),dtype=np.float64)
#data =np.array(json.loads(data),dtype=np.float_)
while i < len(data[:,index_x]):
res = sci.trapz(data[0:i, index_y], data[0:i, index_x])
res = np.float_(res)
sol.append(res)
i += 1
i = 0
realsol = []
while i < len(sol):
intervall = sol[i] - sol[i - 1]
if i == 0:
realsol.append(np.float_(0))
realsol.append(intervall)
i += 1
realsol= np.array(realsol)
return realsol
|
IT-PM-OpenAdaptronik/Webapp
|
apps/calc/measurement/calculus.py
|
Python
|
mit
| 3,360
| 0.009226
|
import Adafruit_BBIO.GPIO as GPIO
import time
a=0
b=0
def derecha(channel):
global a
a+=1
print 'cuenta derecha es {0}'.format(a)
def izquierda(channel):
global b
b+=1
print 'cuenta izquierda es {0}'.format(b)
GPIO.setup("P9_11", GPIO.IN)
GPIO.setup("P9_13", GPIO.IN)
GPIO.add_event_detect("P9_11", GPIO.BOTH)
GPIO.add_event_detect("P9_13", GPIO.BOTH)
GPIO.add_event_callback("P9_11",derecha)
GPIO.add_event_callback("P9_13",izquierda)
#if GPIO.event_detected("GPIO_31"):
# print "event detected"
while True:
print "cosas pasan"
time.sleep(1)
|
edwarod/quickbot_bbb
|
test.py
|
Python
|
bsd-3-clause
| 571
| 0.033275
|
#!/usr/bin/env python
from wsgiref.simple_server import make_server
import sys
import json
import traceback
import datetime
from multiprocessing import Process
from getopt import getopt, GetoptError
from jsonrpcbase import JSONRPCService, InvalidParamsError, KeywordError,\
JSONRPCError, ServerError, InvalidRequestError
from os import environ
from ConfigParser import ConfigParser
from biokbase import log
import biokbase.nexus
import requests as _requests
import urlparse as _urlparse
import random as _random
import os
DEPLOY = 'KB_DEPLOYMENT_CONFIG'
SERVICE = 'KB_SERVICE_NAME'
# Note that the error fields do not match the 2.0 JSONRPC spec
def get_config_file():
return environ.get(DEPLOY, None)
def get_service_name():
return environ.get(SERVICE, None)
def get_config():
if not get_config_file() or not get_service_name():
return None
retconfig = {}
config = ConfigParser()
config.read(get_config_file())
for nameval in config.items(get_service_name()):
retconfig[nameval[0]] = nameval[1]
return retconfig
config = get_config()
from GenomeFeatureComparatorImpl import GenomeFeatureComparator
impl_GenomeFeatureComparator = GenomeFeatureComparator(config)
class JSONObjectEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, set):
return list(obj)
if isinstance(obj, frozenset):
return list(obj)
if hasattr(obj, 'toJSONable'):
return obj.toJSONable()
return json.JSONEncoder.default(self, obj)
async_run_methods = {}
async_check_methods = {}
async_run_methods['GenomeFeatureComparator.compare_genome_features_async'] = ['GenomeFeatureComparator', 'compare_genome_features']
async_check_methods['GenomeFeatureComparator.compare_genome_features_check'] = ['GenomeFeatureComparator', 'compare_genome_features']
class AsyncJobServiceClient(object):
def __init__(self, timeout=30 * 60, token=None,
ignore_authrc=True, trust_all_ssl_certificates=False):
url = environ.get('KB_JOB_SERVICE_URL', None)
if url is None and config is not None:
url = config.get('job-service-url')
if url is None:
raise ValueError('Neither \'job-service-url\' parameter is defined in '+
'configuration nor \'KB_JOB_SERVICE_URL\' variable is defined in system')
scheme, _, _, _, _, _ = _urlparse.urlparse(url)
if scheme not in ['http', 'https']:
raise ValueError(url + " isn't a valid http url")
self.url = url
self.timeout = int(timeout)
self._headers = dict()
self.trust_all_ssl_certificates = trust_all_ssl_certificates
if token is None:
raise ValueError('Authentication is required for async methods')
self._headers['AUTHORIZATION'] = token
if self.timeout < 1:
raise ValueError('Timeout value must be at least 1 second')
def _call(self, method, params, json_rpc_call_context = None):
arg_hash = {'method': method,
'params': params,
'version': '1.1',
'id': str(_random.random())[2:]
}
if json_rpc_call_context:
arg_hash['context'] = json_rpc_call_context
body = json.dumps(arg_hash, cls=JSONObjectEncoder)
ret = _requests.post(self.url, data=body, headers=self._headers,
timeout=self.timeout,
verify=not self.trust_all_ssl_certificates)
if ret.status_code == _requests.codes.server_error:
if 'content-type' in ret.headers and ret.headers['content-type'] == 'application/json':
err = json.loads(ret.text)
if 'error' in err:
raise ServerError(**err['error'])
else:
raise ServerError('Unknown', 0, ret.text)
else:
raise ServerError('Unknown', 0, ret.text)
if ret.status_code != _requests.codes.OK:
ret.raise_for_status()
resp = json.loads(ret.text)
if 'result' not in resp:
raise ServerError('Unknown', 0, 'An unknown server error occurred')
return resp['result']
def run_job(self, run_job_params, json_rpc_call_context = None):
return self._call('KBaseJobService.run_job', [run_job_params], json_rpc_call_context)[0]
def check_job(self, job_id, json_rpc_call_context = None):
return self._call('KBaseJobService.check_job', [job_id], json_rpc_call_context)[0]
class JSONRPCServiceCustom(JSONRPCService):
def call(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format
"""
result = self.call_py(ctx, jsondata)
if result is not None:
return json.dumps(result, cls=JSONObjectEncoder)
return None
def _call_method(self, ctx, request):
"""Calls given method with given params and returns it value."""
method = self.method_data[request['method']]['method']
params = request['params']
result = None
try:
if isinstance(params, list):
# Does it have enough arguments?
if len(params) < self._man_args(method) - 1:
raise InvalidParamsError('not enough arguments')
# Does it have too many arguments?
if(not self._vargs(method) and len(params) >
self._max_args(method) - 1):
raise InvalidParamsError('too many arguments')
result = method(ctx, *params)
elif isinstance(params, dict):
# Do not accept keyword arguments if the jsonrpc version is
# not >=1.1.
if request['jsonrpc'] < 11:
raise KeywordError
result = method(ctx, **params)
else: # No params
result = method(ctx)
except JSONRPCError:
raise
except Exception as e:
# log.exception('method %s threw an exception' % request['method'])
# Exception was raised inside the method.
newerr = ServerError()
newerr.trace = traceback.format_exc()
newerr.data = e.message
raise newerr
return result
def call_py(self, ctx, jsondata):
"""
Calls jsonrpc service's method and returns its return value in python
object format or None if there is none.
This method is same as call() except the return value is a python
object instead of JSON string. This method is mainly only useful for
debugging purposes.
"""
rdata = jsondata
# we already deserialize the json string earlier in the server code, no
# need to do it again
# try:
# rdata = json.loads(jsondata)
# except ValueError:
# raise ParseError
# set some default values for error handling
request = self._get_default_vals()
if isinstance(rdata, dict) and rdata:
# It's a single request.
self._fill_request(request, rdata)
respond = self._handle_request(ctx, request)
# Don't respond to notifications
if respond is None:
return None
return respond
elif isinstance(rdata, list) and rdata:
# It's a batch.
requests = []
responds = []
for rdata_ in rdata:
# set some default values for error handling
request_ = self._get_default_vals()
self._fill_request(request_, rdata_)
requests.append(request_)
for request_ in requests:
respond = self._handle_request(ctx, request_)
# Don't respond to notifications
if respond is not None:
responds.append(respond)
if responds:
return responds
# Nothing to respond.
return None
else:
# empty dict, list or wrong type
raise InvalidRequestError
def _handle_request(self, ctx, request):
"""Handles given request and returns its response."""
if self.method_data[request['method']].has_key('types'): # @IgnorePep8
self._validate_params_types(request['method'], request['params'])
result = self._call_method(ctx, request)
# Do not respond to notifications.
if request['id'] is None:
return None
respond = {}
self._fill_ver(request['jsonrpc'], respond)
respond['result'] = result
respond['id'] = request['id']
return respond
class MethodContext(dict):
def __init__(self, logger):
self['client_ip'] = None
self['user_id'] = None
self['authenticated'] = None
self['token'] = None
self['module'] = None
self['method'] = None
self['call_id'] = None
self['rpc_context'] = None
self._debug_levels = set([7, 8, 9, 'DEBUG', 'DEBUG2', 'DEBUG3'])
self._logger = logger
def log_err(self, message):
self._log(log.ERR, message)
def log_info(self, message):
self._log(log.INFO, message)
def log_debug(self, message, level=1):
if level in self._debug_levels:
pass
else:
level = int(level)
if level < 1 or level > 3:
raise ValueError("Illegal log level: " + str(level))
level = level + 6
self._log(level, message)
def set_log_level(self, level):
self._logger.set_log_level(level)
def get_log_level(self):
return self._logger.get_log_level()
def clear_log_level(self):
self._logger.clear_user_log_level()
def _log(self, level, message):
self._logger.log_message(level, message, self['client_ip'],
self['user_id'], self['module'],
self['method'], self['call_id'])
def getIPAddress(environ):
xFF = environ.get('HTTP_X_FORWARDED_FOR')
realIP = environ.get('HTTP_X_REAL_IP')
trustXHeaders = config is None or \
config.get('dont_trust_x_ip_headers') != 'true'
if (trustXHeaders):
if (xFF):
return xFF.split(',')[0].strip()
if (realIP):
return realIP.strip()
return environ.get('REMOTE_ADDR')
class Application(object):
# Wrap the wsgi handler in a class definition so that we can
# do some initialization and avoid regenerating stuff over
# and over
def logcallback(self):
self.serverlog.set_log_file(self.userlog.get_log_file())
def log(self, level, context, message):
self.serverlog.log_message(level, message, context['client_ip'],
context['user_id'], context['module'],
context['method'], context['call_id'])
def __init__(self):
submod = get_service_name() or 'GenomeFeatureComparator'
self.userlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, changecallback=self.logcallback,
config=get_config_file())
self.serverlog = log.log(
submod, ip_address=True, authuser=True, module=True, method=True,
call_id=True, logfile=self.userlog.get_log_file())
self.serverlog.set_log_level(6)
self.rpc_service = JSONRPCServiceCustom()
self.method_authentication = dict()
self.rpc_service.add(impl_GenomeFeatureComparator.compare_genome_features,
name='GenomeFeatureComparator.compare_genome_features',
types=[object])
self.method_authentication['GenomeFeatureComparator.compare_genome_features'] = 'required'
self.auth_client = biokbase.nexus.Client(
config={'server': 'nexus.api.globusonline.org',
'verify_ssl': True,
'client': None,
'client_secret': None})
def __call__(self, environ, start_response):
# Context object, equivalent to the perl impl CallContext
ctx = MethodContext(self.userlog)
ctx['client_ip'] = getIPAddress(environ)
status = '500 Internal Server Error'
try:
body_size = int(environ.get('CONTENT_LENGTH', 0))
except (ValueError):
body_size = 0
if environ['REQUEST_METHOD'] == 'OPTIONS':
# we basically do nothing and just return headers
status = '200 OK'
rpc_result = ""
else:
request_body = environ['wsgi.input'].read(body_size)
try:
req = json.loads(request_body)
except ValueError as ve:
err = {'error': {'code': -32700,
'name': "Parse error",
'message': str(ve),
}
}
rpc_result = self.process_error(err, ctx, {'version': '1.1'})
else:
ctx['module'], ctx['method'] = req['method'].split('.')
ctx['call_id'] = req['id']
ctx['rpc_context'] = {'call_stack': [{'time':self.now_in_utc(), 'method': req['method']}]}
try:
token = environ.get('HTTP_AUTHORIZATION')
# parse out the method being requested and check if it
# has an authentication requirement
method_name = req['method']
if method_name in async_run_methods:
method_name = async_run_methods[method_name][0] + "." + async_run_methods[method_name][1]
if method_name in async_check_methods:
method_name = async_check_methods[method_name][0] + "." + async_check_methods[method_name][1]
auth_req = self.method_authentication.get(method_name,
"none")
if auth_req != "none":
if token is None and auth_req == 'required':
err = ServerError()
err.data = "Authentication required for " + \
"GenomeFeatureComparator but no authentication header was passed"
raise err
elif token is None and auth_req == 'optional':
pass
else:
try:
user, _, _ = \
self.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
except Exception, e:
if auth_req == 'required':
err = ServerError()
err.data = \
"Token validation failed: %s" % e
raise err
if (environ.get('HTTP_X_FORWARDED_FOR')):
self.log(log.INFO, ctx, 'X-Forwarded-For: ' +
environ.get('HTTP_X_FORWARDED_FOR'))
method_name = req['method']
if method_name in async_run_methods or method_name in async_check_methods:
if method_name in async_run_methods:
orig_method_pair = async_run_methods[method_name]
else:
orig_method_pair = async_check_methods[method_name]
orig_method_name = orig_method_pair[0] + '.' + orig_method_pair[1]
if 'required' != self.method_authentication.get(orig_method_name, 'none'):
err = ServerError()
err.data = 'Async method ' + orig_method_name + ' should require ' + \
'authentication, but it has authentication level: ' + \
self.method_authentication.get(orig_method_name, 'none')
raise err
job_service_client = AsyncJobServiceClient(token = ctx['token'])
if method_name in async_run_methods:
run_job_params = {
'method': orig_method_name,
'params': req['params']}
if 'rpc_context' in ctx:
run_job_params['rpc_context'] = ctx['rpc_context']
job_id = job_service_client.run_job(run_job_params)
respond = {'version': '1.1', 'result': [job_id], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
job_id = req['params'][0]
job_state = job_service_client.check_job(job_id)
finished = job_state['finished']
if finished != 0 and 'error' in job_state and job_state['error'] is not None:
err = {'error': job_state['error']}
rpc_result = self.process_error(err, ctx, req, None)
else:
respond = {'version': '1.1', 'result': [job_state], 'id': req['id']}
rpc_result = json.dumps(respond, cls=JSONObjectEncoder)
status = '200 OK'
else:
self.log(log.INFO, ctx, 'start method')
rpc_result = self.rpc_service.call(ctx, req)
self.log(log.INFO, ctx, 'end method')
status = '200 OK'
except JSONRPCError as jre:
err = {'error': {'code': jre.code,
'name': jre.message,
'message': jre.data
}
}
trace = jre.trace if hasattr(jre, 'trace') else None
rpc_result = self.process_error(err, ctx, req, trace)
except Exception, e:
err = {'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error ' +
'occurred',
}
}
rpc_result = self.process_error(err, ctx, req,
traceback.format_exc())
# print 'The request method was %s\n' % environ['REQUEST_METHOD']
# print 'The environment dictionary is:\n%s\n' % pprint.pformat(environ) @IgnorePep8
# print 'The request body was: %s' % request_body
# print 'The result from the method call is:\n%s\n' % \
# pprint.pformat(rpc_result)
if rpc_result:
response_body = rpc_result
else:
response_body = ''
response_headers = [
('Access-Control-Allow-Origin', '*'),
('Access-Control-Allow-Headers', environ.get(
'HTTP_ACCESS_CONTROL_REQUEST_HEADERS', 'authorization')),
('content-type', 'application/json'),
('content-length', str(len(response_body)))]
start_response(status, response_headers)
return [response_body]
def process_error(self, error, context, request, trace=None):
if trace:
self.log(log.ERR, context, trace.split('\n')[0:-1])
if 'id' in request:
error['id'] = request['id']
if 'version' in request:
error['version'] = request['version']
if 'error' not in error['error'] or error['error']['error'] is None:
error['error']['error'] = trace
elif 'jsonrpc' in request:
error['jsonrpc'] = request['jsonrpc']
error['error']['data'] = trace
else:
error['version'] = '1.0'
error['error']['error'] = trace
return json.dumps(error)
def now_in_utc(self):
# Taken from http://stackoverflow.com/questions/3401428/how-to-get-an-isoformat-datetime-string-including-the-default-timezone
dtnow = datetime.datetime.now()
dtutcnow = datetime.datetime.utcnow()
delta = dtnow - dtutcnow
hh,mm = divmod((delta.days * 24*60*60 + delta.seconds + 30) // 60, 60)
return "%s%+02d:%02d" % (dtnow.isoformat(), hh, mm)
application = Application()
# This is the uwsgi application dictionary. On startup uwsgi will look
# for this dict and pull its configuration from here.
# This simply lists where to "mount" the application in the URL path
#
# This uwsgi module "magically" appears when running the app within
# uwsgi and is not available otherwise, so wrap an exception handler
# around it
#
# To run this server in uwsgi with 4 workers listening on port 9999 use:
# uwsgi -M -p 4 --http :9999 --wsgi-file _this_file_
# To run a using the single threaded python BaseHTTP service
# listening on port 9999 by default execute this file
#
try:
import uwsgi
# Before we do anything with the application, see if the
# configs specify patching all std routines to be asynch
# *ONLY* use this if you are going to wrap the service in
# a wsgi container that has enabled gevent, such as
# uwsgi with the --gevent option
if config is not None and config.get('gevent_monkeypatch_all', False):
print "Monkeypatching std libraries for async"
from gevent import monkey
monkey.patch_all()
uwsgi.applications = {
'': application
}
except ImportError:
# Not available outside of wsgi, ignore
pass
_proc = None
def start_server(host='localhost', port=0, newprocess=False):
'''
By default, will start the server on localhost on a system assigned port
in the main thread. Excecution of the main thread will stay in the server
main loop until interrupted. To run the server in a separate process, and
thus allow the stop_server method to be called, set newprocess = True. This
will also allow returning of the port number.'''
global _proc
if _proc:
raise RuntimeError('server is already running')
httpd = make_server(host, port, application)
port = httpd.server_address[1]
print "Listening on port %s" % port
if newprocess:
_proc = Process(target=httpd.serve_forever)
_proc.daemon = True
_proc.start()
else:
httpd.serve_forever()
return port
def stop_server():
global _proc
_proc.terminate()
_proc = None
def process_async_cli(input_file_path, output_file_path, token):
exit_code = 0
with open(input_file_path) as data_file:
req = json.load(data_file)
if 'version' not in req:
req['version'] = '1.1'
if 'id' not in req:
req['id'] = str(_random.random())[2:]
ctx = MethodContext(application.userlog)
user, _, _ = application.auth_client.validate_token(token)
ctx['user_id'] = user
ctx['authenticated'] = 1
ctx['token'] = token
ctx['rpc_context'] = req['context']
resp = None
try:
resp = application.rpc_service.call_py(ctx, req)
except JSONRPCError as jre:
trace = jre.trace if hasattr(jre, 'trace') else None
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': jre.code,
'name': jre.message,
'message': jre.data,
'error': trace}
}
except Exception, e:
trace = traceback.format_exc()
resp = {'id': req['id'],
'version': req['version'],
'error': {'code': 0,
'name': 'Unexpected Server Error',
'message': 'An unexpected server error occurred',
'error': trace}
}
if 'error' in resp:
exit_code = 500
with open(output_file_path, "w") as f:
f.write(json.dumps(resp, cls=JSONObjectEncoder))
return exit_code
if __name__ == "__main__":
if len(sys.argv) == 4 and os.path.isfile(sys.argv[1]):
sys.exit(process_async_cli(sys.argv[1], sys.argv[2], sys.argv[3]))
try:
opts, args = getopt(sys.argv[1:], "", ["port=", "host="])
except GetoptError as err:
# print help information and exit:
print str(err) # will print something like "option -a not recognized"
sys.exit(2)
port = 9999
host = 'localhost'
for o, a in opts:
if o == '--port':
port = int(a)
elif o == '--host':
host = a
print "Host set to %s" % host
else:
assert False, "unhandled option"
start_server(host=host, port=port)
# print "Listening on port %s" % port
# httpd = make_server( host, port, application)
#
# httpd.serve_forever()
|
kbase/narrative_method_store
|
test/data/test_repo_1/service/GenomeFeatureComparatorServer.py
|
Python
|
mit
| 25,877
| 0.002164
|
# coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Functions for generating a logistic dataset.
x ~ N(0, I_d)
y ~ Bernoulli(sigmoid(-(1/temp) w^T x))
"""
import jax
from jax import numpy as jnp
def logistic_dataset_init_param(dim, r, rng_key):
param0 = jax.random.normal(rng_key, (dim, 1))
param0_norm = jnp.linalg.norm(param0)
param = param0 / param0_norm * r
return param
def logistic_dataset_gen_data(num, w, dim, temp, rng_key):
"""Samples data from a standard Gaussian with binary noisy labels.
Args:
num: An integer denoting the number of data points.
w: An array of size dim x odim, the weight vector used to generate labels.
dim: An integer denoting the number of input dimensions.
temp: A float denoting the temperature parameter controlling label noise.
rng_key: JAX random number generator key.
Returns:
x: An array of size dim x num denoting data points.
y_pm: An array of size num x odim denoting +/-1 labels.
"""
rng_subkey = jax.random.split(rng_key, 3)
x = jax.random.normal(rng_subkey[0], (dim, num))
prob = jax.nn.sigmoid(-(1 / temp) * w.T.dot(x))
y = jax.random.bernoulli(rng_subkey[1], (prob))
y_pm = 2. * y - 1
return x, y_pm
def logistic_dataset_gen_train_test(config, rng_key):
"""Creates the train and test sets of a logistic dataset.
Args:
config: Dictionary of parameters.
config.dim: A float denoting input dimensionality.
config.r: A float denoting L2 norm of the true parameters.
config.num_train: An integer denoting the number of training data.
config.num_test: An integer denoting the number of test data.
rng_key: JAX random number generator key.
Returns:
train_data: The tuple (input, label) of training data.
test_data: The tuple (input, label) of test data.
"""
dim = config.dim
temp = config.temperature
rng_subkey = jax.random.split(rng_key, 3)
param = logistic_dataset_init_param(dim, config.r, rng_subkey[0])
train_data = logistic_dataset_gen_data(config.num_train, param, dim, temp,
rng_subkey[1])
test_data = logistic_dataset_gen_data(config.num_test, param, dim, temp,
rng_subkey[2])
return train_data, test_data
def get_train_test_generator(dataset):
if dataset == 'logistic':
return logistic_dataset_gen_train_test
raise NotImplementedError('Dataset not found.')
|
google-research/google-research
|
robust_optim/data.py
|
Python
|
apache-2.0
| 2,993
| 0.007016
|
import pickle
import tempfile
import shutil
import os
import numbers
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_raises_regexp
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import ignore_warnings
from sklearn.utils.testing import assert_not_equal
from sklearn.utils.testing import assert_warns_message
from sklearn.base import BaseEstimator
from sklearn.metrics import (f1_score, r2_score, roc_auc_score, fbeta_score,
log_loss, precision_score, recall_score)
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.metrics.scorer import (check_scoring, _PredictScorer,
_passthrough_scorer)
from sklearn.metrics import make_scorer, get_scorer, SCORERS
from sklearn.svm import LinearSVC
from sklearn.pipeline import make_pipeline
from sklearn.cluster import KMeans
from sklearn.dummy import DummyRegressor
from sklearn.linear_model import Ridge, LogisticRegression
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
from sklearn.datasets import make_blobs
from sklearn.datasets import make_classification
from sklearn.datasets import make_multilabel_classification
from sklearn.datasets import load_diabetes
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.model_selection import GridSearchCV
from sklearn.multiclass import OneVsRestClassifier
from sklearn.externals import joblib
REGRESSION_SCORERS = ['r2', 'neg_mean_absolute_error',
'neg_mean_squared_error', 'neg_mean_squared_log_error',
'neg_median_absolute_error', 'mean_absolute_error',
'mean_squared_error', 'median_absolute_error']
CLF_SCORERS = ['accuracy', 'f1', 'f1_weighted', 'f1_macro', 'f1_micro',
'roc_auc', 'average_precision', 'precision',
'precision_weighted', 'precision_macro', 'precision_micro',
'recall', 'recall_weighted', 'recall_macro', 'recall_micro',
'neg_log_loss', 'log_loss',
'adjusted_rand_score' # not really, but works
]
MULTILABEL_ONLY_SCORERS = ['precision_samples', 'recall_samples', 'f1_samples']
def _make_estimators(X_train, y_train, y_ml_train):
# Make estimators that make sense to test various scoring methods
sensible_regr = DummyRegressor(strategy='median')
sensible_regr.fit(X_train, y_train)
sensible_clf = DecisionTreeClassifier(random_state=0)
sensible_clf.fit(X_train, y_train)
sensible_ml_clf = DecisionTreeClassifier(random_state=0)
sensible_ml_clf.fit(X_train, y_ml_train)
return dict(
[(name, sensible_regr) for name in REGRESSION_SCORERS] +
[(name, sensible_clf) for name in CLF_SCORERS] +
[(name, sensible_ml_clf) for name in MULTILABEL_ONLY_SCORERS]
)
X_mm, y_mm, y_ml_mm = None, None, None
ESTIMATORS = None
TEMP_FOLDER = None
def setup_module():
# Create some memory mapped data
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
TEMP_FOLDER = tempfile.mkdtemp(prefix='sklearn_test_score_objects_')
X, y = make_classification(n_samples=30, n_features=5, random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
filename = os.path.join(TEMP_FOLDER, 'test_data.pkl')
joblib.dump((X, y, y_ml), filename)
X_mm, y_mm, y_ml_mm = joblib.load(filename, mmap_mode='r')
ESTIMATORS = _make_estimators(X_mm, y_mm, y_ml_mm)
def teardown_module():
global X_mm, y_mm, y_ml_mm, TEMP_FOLDER, ESTIMATORS
# GC closes the mmap file descriptors
X_mm, y_mm, y_ml_mm, ESTIMATORS = None, None, None, None
shutil.rmtree(TEMP_FOLDER)
class EstimatorWithoutFit(object):
"""Dummy estimator to test check_scoring"""
pass
class EstimatorWithFit(BaseEstimator):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
class EstimatorWithFitAndScore(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
return self
def score(self, X, y):
return 1.0
class EstimatorWithFitAndPredict(object):
"""Dummy estimator to test check_scoring"""
def fit(self, X, y):
self.y = y
return self
def predict(self, X):
return self.y
class DummyScorer(object):
"""Dummy scorer that always returns 1."""
def __call__(self, est, X, y):
return 1
def test_all_scorers_repr():
# Test that all scorers have a working repr
for name, scorer in SCORERS.items():
repr(scorer)
def test_check_scoring():
# Test all branches of check_scoring
estimator = EstimatorWithoutFit()
pattern = (r"estimator should be an estimator implementing 'fit' method,"
r" .* was passed")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
estimator = EstimatorWithFitAndScore()
estimator.fit([[1]], [1])
scorer = check_scoring(estimator)
assert_true(scorer is _passthrough_scorer)
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFitAndPredict()
estimator.fit([[1]], [1])
pattern = (r"If no scoring is specified, the estimator passed should have"
r" a 'score' method\. The estimator .* does not\.")
assert_raises_regexp(TypeError, pattern, check_scoring, estimator)
scorer = check_scoring(estimator, "accuracy")
assert_almost_equal(scorer(estimator, [[1]], [1]), 1.0)
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, "accuracy")
assert_true(isinstance(scorer, _PredictScorer))
estimator = EstimatorWithFit()
scorer = check_scoring(estimator, allow_none=True)
assert_true(scorer is None)
def test_check_scoring_gridsearchcv():
# test that check_scoring works on GridSearchCV and pipeline.
# slightly redundant non-regression test.
grid = GridSearchCV(LinearSVC(), param_grid={'C': [.1, 1]})
scorer = check_scoring(grid, "f1")
assert_true(isinstance(scorer, _PredictScorer))
pipe = make_pipeline(LinearSVC())
scorer = check_scoring(pipe, "f1")
assert_true(isinstance(scorer, _PredictScorer))
# check that cross_val_score definitely calls the scorer
# and doesn't make any assumptions about the estimator apart from having a
# fit.
scores = cross_val_score(EstimatorWithFit(), [[1], [2], [3]], [1, 0, 1],
scoring=DummyScorer())
assert_array_equal(scores, 1)
def test_make_scorer():
# Sanity check on the make_scorer factory function.
f = lambda *args: 0
assert_raises(ValueError, make_scorer, f, needs_threshold=True,
needs_proba=True)
def test_classification_scores():
# Test classification scorers.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LinearSVC(random_state=0)
clf.fit(X_train, y_train)
for prefix, metric in [('f1', f1_score), ('precision', precision_score),
('recall', recall_score)]:
score1 = get_scorer('%s_weighted' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='weighted')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_macro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='macro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s_micro' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=None,
average='micro')
assert_almost_equal(score1, score2)
score1 = get_scorer('%s' % prefix)(clf, X_test, y_test)
score2 = metric(y_test, clf.predict(X_test), pos_label=1)
assert_almost_equal(score1, score2)
# test fbeta score that takes an argument
scorer = make_scorer(fbeta_score, beta=2)
score1 = scorer(clf, X_test, y_test)
score2 = fbeta_score(y_test, clf.predict(X_test), beta=2)
assert_almost_equal(score1, score2)
# test that custom scorer can be pickled
unpickled_scorer = pickle.loads(pickle.dumps(scorer))
score3 = unpickled_scorer(clf, X_test, y_test)
assert_almost_equal(score1, score3)
# smoke test the repr:
repr(fbeta_score)
def test_regression_scorers():
# Test regression scorers.
diabetes = load_diabetes()
X, y = diabetes.data, diabetes.target
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = Ridge()
clf.fit(X_train, y_train)
score1 = get_scorer('r2')(clf, X_test, y_test)
score2 = r2_score(y_test, clf.predict(X_test))
assert_almost_equal(score1, score2)
def test_thresholded_scorers():
# Test scorers that take thresholds.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
score3 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
assert_almost_equal(score1, score3)
logscore = get_scorer('neg_log_loss')(clf, X_test, y_test)
logloss = log_loss(y_test, clf.predict_proba(X_test))
assert_almost_equal(-logscore, logloss)
# same for an estimator without decision_function
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test)[:, 1])
assert_almost_equal(score1, score2)
# test with a regressor (no decision_function)
reg = DecisionTreeRegressor()
reg.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(reg, X_test, y_test)
score2 = roc_auc_score(y_test, reg.predict(X_test))
assert_almost_equal(score1, score2)
# Test that an exception is raised on more than two classes
X, y = make_blobs(random_state=0, centers=3)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf.fit(X_train, y_train)
assert_raises(ValueError, get_scorer('roc_auc'), clf, X_test, y_test)
def test_thresholded_scorers_multilabel_indicator_data():
# Test that the scorer work with multilabel-indicator format
# for multilabel and multi-output multi-class classifier
X, y = make_multilabel_classification(allow_unlabeled=False,
random_state=0)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
# Multi-output multi-class predict_proba
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
y_proba = clf.predict_proba(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p[:, -1] for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multi-output multi-class decision_function
# TODO Is there any yet?
clf = DecisionTreeClassifier()
clf.fit(X_train, y_train)
clf._predict_proba = clf.predict_proba
clf.predict_proba = None
clf.decision_function = lambda X: [p[:, 1] for p in clf._predict_proba(X)]
y_proba = clf.decision_function(X_test)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, np.vstack(p for p in y_proba).T)
assert_almost_equal(score1, score2)
# Multilabel predict_proba
clf = OneVsRestClassifier(DecisionTreeClassifier())
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.predict_proba(X_test))
assert_almost_equal(score1, score2)
# Multilabel decision function
clf = OneVsRestClassifier(LinearSVC(random_state=0))
clf.fit(X_train, y_train)
score1 = get_scorer('roc_auc')(clf, X_test, y_test)
score2 = roc_auc_score(y_test, clf.decision_function(X_test))
assert_almost_equal(score1, score2)
def test_unsupervised_scorers():
# Test clustering scorers against gold standard labeling.
# We don't have any real unsupervised Scorers yet.
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
km = KMeans(n_clusters=3)
km.fit(X_train)
score1 = get_scorer('adjusted_rand_score')(km, X_test, y_test)
score2 = adjusted_rand_score(y_test, km.predict(X_test))
assert_almost_equal(score1, score2)
@ignore_warnings
def test_raises_on_score_list():
# Test that when a list of scores is returned, we raise proper errors.
X, y = make_blobs(random_state=0)
f1_scorer_no_average = make_scorer(f1_score, average=None)
clf = DecisionTreeClassifier()
assert_raises(ValueError, cross_val_score, clf, X, y,
scoring=f1_scorer_no_average)
grid_search = GridSearchCV(clf, scoring=f1_scorer_no_average,
param_grid={'max_depth': [1, 2]})
assert_raises(ValueError, grid_search.fit, X, y)
@ignore_warnings
def test_scorer_sample_weight():
# Test that scorers support sample_weight or raise sensible errors
# Unlike the metrics invariance test, in the scorer case it's harder
# to ensure that, on the classifier output, weighted and unweighted
# scores really should be unequal.
X, y = make_classification(random_state=0)
_, y_ml = make_multilabel_classification(n_samples=X.shape[0],
random_state=0)
split = train_test_split(X, y, y_ml, random_state=0)
X_train, X_test, y_train, y_test, y_ml_train, y_ml_test = split
sample_weight = np.ones_like(y_test)
sample_weight[:10] = 0
# get sensible estimators for each metric
estimator = _make_estimators(X_train, y_train, y_ml_train)
for name, scorer in SCORERS.items():
if name in MULTILABEL_ONLY_SCORERS:
target = y_ml_test
else:
target = y_test
try:
weighted = scorer(estimator[name], X_test, target,
sample_weight=sample_weight)
ignored = scorer(estimator[name], X_test[10:], target[10:])
unweighted = scorer(estimator[name], X_test, target)
assert_not_equal(weighted, unweighted,
msg="scorer {0} behaves identically when "
"called with sample weights: {1} vs "
"{2}".format(name, weighted, unweighted))
assert_almost_equal(weighted, ignored,
err_msg="scorer {0} behaves differently when "
"ignoring samples and setting sample_weight to"
" 0: {1} vs {2}".format(name, weighted,
ignored))
except TypeError as e:
assert_true("sample_weight" in str(e),
"scorer {0} raises unhelpful exception when called "
"with sample weights: {1}".format(name, str(e)))
@ignore_warnings # UndefinedMetricWarning for P / R scores
def check_scorer_memmap(scorer_name):
scorer, estimator = SCORERS[scorer_name], ESTIMATORS[scorer_name]
if scorer_name in MULTILABEL_ONLY_SCORERS:
score = scorer(estimator, X_mm, y_ml_mm)
else:
score = scorer(estimator, X_mm, y_mm)
assert isinstance(score, numbers.Number), scorer_name
def test_scorer_memmap_input():
# Non-regression test for #6147: some score functions would
# return singleton memmap when computed on memmap data instead of scalar
# float values.
for name in SCORERS.keys():
yield check_scorer_memmap, name
def test_deprecated_names():
X, y = make_blobs(random_state=0, centers=2)
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=0)
clf = LogisticRegression(random_state=0)
clf.fit(X_train, y_train)
for name in ('mean_absolute_error', 'mean_squared_error',
'median_absolute_error', 'log_loss'):
warning_msg = "Scoring method %s was renamed to" % name
for scorer in (get_scorer(name), SCORERS[name]):
assert_warns_message(DeprecationWarning,
warning_msg,
scorer, clf, X, y)
assert_warns_message(DeprecationWarning,
warning_msg,
cross_val_score, clf, X, y, scoring=name)
def test_scoring_is_not_metric():
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), f1_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
LogisticRegression(), roc_auc_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
Ridge(), r2_score)
assert_raises_regexp(ValueError, 'make_scorer', check_scoring,
KMeans(), adjusted_rand_score)
|
meduz/scikit-learn
|
sklearn/metrics/tests/test_score_objects.py
|
Python
|
bsd-3-clause
| 17,473
| 0.000057
|
# Purpose: dxf engine for R2007/AC1021
# Created: 12.03.2011
# Copyright (C) , Manfred Moitzi
# License: MIT License
from __future__ import unicode_literals
__author__ = "mozman <mozman@gmx.at>"
from .headervars import VARMAP
from ..ac1018 import AC1018Factory
class AC1021Factory(AC1018Factory):
HEADERVARS = dict(VARMAP)
|
lautr3k/RepRap-iTopie
|
odmt/ezdxf/ac1021/__init__.py
|
Python
|
gpl-3.0
| 330
| 0
|
from audio.io import *
|
Curly-Mo/audio
|
__init__.py
|
Python
|
mit
| 23
| 0
|
# -*- encoding: utf-8 -*-
import ast
import inspect
class NameLower(ast.NodeVisitor):
def __init__(self, lowered_names):
self.lowered_names = lowered_names
def visit_FunctionDef(self, node):
code = '__globals = globals()\n'
code += '\n'.join("{0} = __globals['{0}']".format(name) for name in self.lowered_names)
code_ast = ast.parse(code, mode='exec')
node.body[:0] = code_ast.body
self.func = node
def lower_names(*namelist):
def lower(func):
srclines = inspect.getsource(func).splitlines()
for n, line in enumerate(srclines):
if '@lower_names' in line:
break
src = '\n'.join(srclines[n + 1:])
if src.startswith(' ', '\t'):
src = 'if 1:\n' + src
top = ast.parse(src, mode='exec')
cl = NameLower(namelist)
cl.visit(top)
temp = {}
exec(compile(top, '', 'exec'), temp, temp)
func.__code__ = temp[func.__name__].__code__
return func
return lower
|
xu6148152/Binea_Python_Project
|
PythonCookbook/meta/newlower.py
|
Python
|
mit
| 1,116
| 0.000896
|
#!/usr/bin/env python
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import (assert_allclose, assert_, assert_raises,
assert_array_equal)
import pywt
# Check that float32, float64, complex64, complex128 are preserved.
# Other real types get converted to float64.
# complex256 gets converted to complex128
dtypes_in = [np.int8, np.float16, np.float32, np.float64, np.complex64,
np.complex128]
dtypes_out = [np.float64, np.float32, np.float32, np.float64, np.complex64,
np.complex128]
# test complex256 as well if it is available
try:
dtypes_in += [np.complex256, ]
dtypes_out += [np.complex128, ]
except AttributeError:
pass
def test_dwt_idwt_basic():
x = [3, 7, 1, 1, -2, 5, 4, 6]
cA, cD = pywt.dwt(x, 'db2')
cA_expect = [5.65685425, 7.39923721, 0.22414387, 3.33677403, 7.77817459]
cD_expect = [-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
# mismatched dtypes OK
x_roundtrip2 = pywt.idwt(cA.astype(np.float64), cD.astype(np.float32),
'db2')
assert_allclose(x_roundtrip2, x, rtol=1e-7, atol=1e-7)
assert_(x_roundtrip2.dtype == np.float64)
def test_idwt_mixed_complex_dtype():
x = np.arange(8).astype(float)
x = x + 1j*x[::-1]
cA, cD = pywt.dwt(x, 'db2')
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
# mismatched dtypes OK
x_roundtrip2 = pywt.idwt(cA.astype(np.complex128), cD.astype(np.complex64),
'db2')
assert_allclose(x_roundtrip2, x, rtol=1e-7, atol=1e-7)
assert_(x_roundtrip2.dtype == np.complex128)
def test_dwt_idwt_dtypes():
wavelet = pywt.Wavelet('haar')
for dt_in, dt_out in zip(dtypes_in, dtypes_out):
x = np.ones(4, dtype=dt_in)
errmsg = "wrong dtype returned for {0} input".format(dt_in)
cA, cD = pywt.dwt(x, wavelet)
assert_(cA.dtype == cD.dtype == dt_out, "dwt: " + errmsg)
x_roundtrip = pywt.idwt(cA, cD, wavelet)
assert_(x_roundtrip.dtype == dt_out, "idwt: " + errmsg)
def test_dwt_idwt_basic_complex():
x = np.asarray([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 0.5j*x
cA, cD = pywt.dwt(x, 'db2')
cA_expect = np.asarray([5.65685425, 7.39923721, 0.22414387, 3.33677403,
7.77817459])
cA_expect = cA_expect + 0.5j*cA_expect
cD_expect = np.asarray([-2.44948974, -1.60368225, -4.44140056, -0.41361256,
1.22474487])
cD_expect = cD_expect + 0.5j*cD_expect
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
x_roundtrip = pywt.idwt(cA, cD, 'db2')
assert_allclose(x_roundtrip, x, rtol=1e-10)
def test_dwt_idwt_partial_complex():
x = np.asarray([3, 7, 1, 1, -2, 5, 4, 6])
x = x + 0.5j*x
cA, cD = pywt.dwt(x, 'haar')
cA_rec_expect = np.array([5.0+2.5j, 5.0+2.5j, 1.0+0.5j, 1.0+0.5j,
1.5+0.75j, 1.5+0.75j, 5.0+2.5j, 5.0+2.5j])
cA_rec = pywt.idwt(cA, None, 'haar')
assert_allclose(cA_rec, cA_rec_expect)
cD_rec_expect = np.array([-2.0-1.0j, 2.0+1.0j, 0.0+0.0j, 0.0+0.0j,
-3.5-1.75j, 3.5+1.75j, -1.0-0.5j, 1.0+0.5j])
cD_rec = pywt.idwt(None, cD, 'haar')
assert_allclose(cD_rec, cD_rec_expect)
assert_allclose(cA_rec + cD_rec, x)
def test_dwt_wavelet_kwd():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
cA, cD = pywt.dwt(x, wavelet=w, mode='constant')
cA_expect = [4.38354585, 3.80302657, 7.31813271, -0.58565539, 4.09727044,
7.81994027]
cD_expect = [-1.33068221, -2.78795192, -3.16825651, -0.67715519,
-0.09722957, -0.07045258]
assert_allclose(cA, cA_expect)
assert_allclose(cD, cD_expect)
def test_dwt_coeff_len():
x = np.array([3, 7, 1, 1, -2, 5, 4, 6])
w = pywt.Wavelet('sym3')
ln_modes = [pywt.dwt_coeff_len(len(x), w.dec_len, mode) for mode in
pywt.Modes.modes]
expected_result = [6, ] * len(pywt.Modes.modes)
expected_result[pywt.Modes.modes.index('periodization')] = 4
assert_allclose(ln_modes, expected_result)
ln_modes = [pywt.dwt_coeff_len(len(x), w, mode) for mode in
pywt.Modes.modes]
assert_allclose(ln_modes, expected_result)
def test_idwt_none_input():
# None input equals arrays of zeros of the right length
res1 = pywt.idwt([1, 2, 0, 1], None, 'db2', 'symmetric')
res2 = pywt.idwt([1, 2, 0, 1], [0, 0, 0, 0], 'db2', 'symmetric')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
res1 = pywt.idwt(None, [1, 2, 0, 1], 'db2', 'symmetric')
res2 = pywt.idwt([0, 0, 0, 0], [1, 2, 0, 1], 'db2', 'symmetric')
assert_allclose(res1, res2, rtol=1e-15, atol=1e-15)
# Only one argument at a time can be None
assert_raises(ValueError, pywt.idwt, None, None, 'db2', 'symmetric')
def test_idwt_invalid_input():
# Too short, min length is 4 for 'db4':
assert_raises(ValueError, pywt.idwt, [1, 2, 4], [4, 1, 3], 'db4', 'symmetric')
def test_dwt_single_axis():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA, cD = pywt.dwt(x, 'db2', axis=-1)
cA0, cD0 = pywt.dwt(x[0], 'db2')
cA1, cD1 = pywt.dwt(x[1], 'db2')
assert_allclose(cA[0], cA0)
assert_allclose(cA[1], cA1)
assert_allclose(cD[0], cD0)
assert_allclose(cD[1], cD1)
def test_idwt_single_axis():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
x = np.asarray(x)
x = x + 1j*x # test with complex data
cA, cD = pywt.dwt(x, 'db2', axis=-1)
x0 = pywt.idwt(cA[0], cD[0], 'db2', axis=-1)
x1 = pywt.idwt(cA[1], cD[1], 'db2', axis=-1)
assert_allclose(x[0], x0)
assert_allclose(x[1], x1)
def test_dwt_invalid_input():
x = np.arange(1)
assert_raises(ValueError, pywt.dwt, x, 'db2', 'reflect')
assert_raises(ValueError, pywt.dwt, x, 'haar', 'antireflect')
def test_dwt_axis_arg():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA_, cD_ = pywt.dwt(x, 'db2', axis=-1)
cA, cD = pywt.dwt(x, 'db2', axis=1)
assert_allclose(cA_, cA)
assert_allclose(cD_, cD)
def test_dwt_axis_invalid_input():
x = np.ones((3,1))
assert_raises(ValueError, pywt.dwt, x, 'db2', 'reflect')
def test_idwt_axis_arg():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
cA, cD = pywt.dwt(x, 'db2', axis=1)
x_ = pywt.idwt(cA, cD, 'db2', axis=-1)
x = pywt.idwt(cA, cD, 'db2', axis=1)
assert_allclose(x_, x)
def test_dwt_idwt_axis_excess():
x = [[3, 7, 1, 1],
[-2, 5, 4, 6]]
# can't transform over axes that aren't there
assert_raises(ValueError,
pywt.dwt, x, 'db2', 'symmetric', axis=2)
assert_raises(ValueError,
pywt.idwt, [1, 2, 4], [4, 1, 3], 'db2', 'symmetric', axis=1)
def test_error_on_continuous_wavelet():
# A ValueError is raised if a Continuous wavelet is selected
data = np.ones((32, ))
for cwave in ['morl', pywt.DiscreteContinuousWavelet('morl')]:
assert_raises(ValueError, pywt.dwt, data, cwave)
cA, cD = pywt.dwt(data, 'db1')
assert_raises(ValueError, pywt.idwt, cA, cD, cwave)
def test_dwt_zero_size_axes():
# raise on empty input array
assert_raises(ValueError, pywt.dwt, [], 'db2')
# >1D case uses a different code path so check there as well
x = np.ones((1, 4))[0:0, :] # 2D with a size zero axis
assert_raises(ValueError, pywt.dwt, x, 'db2', axis=0)
def test_pad_1d():
x = [1, 2, 3]
assert_array_equal(pywt.pad(x, (4, 6), 'periodization'),
[1, 2, 3, 3, 1, 2, 3, 3, 1, 2, 3, 3, 1, 2])
assert_array_equal(pywt.pad(x, (4, 6), 'periodic'),
[3, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 3])
assert_array_equal(pywt.pad(x, (4, 6), 'constant'),
[1, 1, 1, 1, 1, 2, 3, 3, 3, 3, 3, 3, 3])
assert_array_equal(pywt.pad(x, (4, 6), 'zero'),
[0, 0, 0, 0, 1, 2, 3, 0, 0, 0, 0, 0, 0])
assert_array_equal(pywt.pad(x, (4, 6), 'smooth'),
[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
assert_array_equal(pywt.pad(x, (4, 6), 'symmetric'),
[3, 3, 2, 1, 1, 2, 3, 3, 2, 1, 1, 2, 3])
assert_array_equal(pywt.pad(x, (4, 6), 'antisymmetric'),
[3, -3, -2, -1, 1, 2, 3, -3, -2, -1, 1, 2, 3])
assert_array_equal(pywt.pad(x, (4, 6), 'reflect'),
[1, 2, 3, 2, 1, 2, 3, 2, 1, 2, 3, 2, 1])
assert_array_equal(pywt.pad(x, (4, 6), 'antireflect'),
[-3, -2, -1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
# equivalence of various pad_width formats
assert_array_equal(pywt.pad(x, 4, 'periodic'),
pywt.pad(x, (4, 4), 'periodic'))
assert_array_equal(pywt.pad(x, (4, ), 'periodic'),
pywt.pad(x, (4, 4), 'periodic'))
assert_array_equal(pywt.pad(x, [(4, 4)], 'periodic'),
pywt.pad(x, (4, 4), 'periodic'))
def test_pad_errors():
# negative pad width
x = [1, 2, 3]
assert_raises(ValueError, pywt.pad, x, -2, 'periodic')
# wrong length pad width
assert_raises(ValueError, pywt.pad, x, (1, 1, 1), 'periodic')
# invalid mode name
assert_raises(ValueError, pywt.pad, x, 2, 'bad_mode')
def test_pad_nd():
for ndim in [2, 3]:
x = np.arange(4**ndim).reshape((4, ) * ndim)
if ndim == 2:
pad_widths = [(2, 1), (2, 3)]
else:
pad_widths = [(2, 1), ] * ndim
for mode in pywt.Modes.modes:
xp = pywt.pad(x, pad_widths, mode)
# expected result is the same as applying along axes separably
xp_expected = x.copy()
for ax in range(ndim):
xp_expected = np.apply_along_axis(pywt.pad,
ax,
xp_expected,
pad_widths=[pad_widths[ax]],
mode=mode)
assert_array_equal(xp, xp_expected)
|
PyWavelets/pywt
|
pywt/tests/test_dwt_idwt.py
|
Python
|
mit
| 10,352
| 0.00058
|
from .fields import BitField, Field
from nettest.exceptions import NettestError
import struct
class PacketMeta(type):
def __new__(cls, name, bases, attrs):
fields = attrs.get('fields')
if fields is None:
raise NettestError(_("packet class must have 'fields' field"))
_fields = []
for fieldname in attrs['fields']:
field = attrs.get(fieldname)
if field is None:
for baseclass in bases:
field = getattr(baseclass, fieldname)
if field is not None:
break
else:
raise NettestError(_("field '%s' doesn't exsists in class %s")%(fieldname, name))
if not cls.__check_field_type(cls, field):
raise NettestError(_("field '%s' in class %s should be in type (Field, Packet, list)")%(fieldname, name))
_fields.append((fieldname, field))
if isinstance(field, Field):
attrs[fieldname] = field.default_value
if '_fields' in attrs:
raise NettestError(_("the name '_fields' is reserved in class %s")%(name))
attrs['_fields']= _fields
return super(PacketMeta, cls).__new__(cls, name, bases, attrs)
@staticmethod
def __check_field_type(cls, field):
if not isinstance(field, (Field, Packet, list)):
return False
if isinstance(field, (list)):
for subfield in field:
if not cls.__check_field_type(cls, subfield):
return False
return True
class BitDumper(object):
def __init__(self):
self.data= []
self.data_len = []
self.data_len_sum = 0
def clear(self):
self.data = []
self.data_len = []
self.data_len_sum = 0
def push(self, data, length):
data = int(data)
if data < 0 or data > 2**length:
raise NettestError(_("bit value out of range"))
self.data.append(data)
self.data_len.append(length)
self.data_len_sum += length
def dump(self):
if self.data_len_sum % 8 != 0:
raise NettestError(_("incorrect bit field length"))
data = 0
left_len = self.data_len_sum
index = 0
for field_data in self.data:
data += field_data<<(left_len - self.data_len[index])
left_len -= self.data_len[index]
index += 1
length = self.data_len_sum / 8
if length == 1:
return struct.pack('!B', int(data))
elif length == 2:
return struct.pack('!H', int(data))
elif length == 4:
return struct.pack('!I', int(data))
elif length == 8:
return struct.pack('!Q', int(data))
else:
raise NettestError(_("too long bit field"))
class BitLoader(object):
def __init__(self, packet):
self.fields = []
self.bit_len_sum = 0
self.packet = packet
def clear(self):
self.fields = []
self.bit_len_sum = 0
def push(self, fieldname, field):
self.fields.append((fieldname,field))
self.bit_len_sum += field.length
def load(self, data):
if self.bit_len_sum % 8 != 0:
raise NettestError(_("incorrect bit field length"))
byte_len = int(self.bit_len_sum / 8)
data = data[:byte_len]
loaded_len = 0
for field_name, field in self.fields:
field_data = field.from_netbytes(data, loaded_len)
loaded_len += field.length
setattr(self.packet, field_name, field_data)
return byte_len
class Packet(object, metaclass=PacketMeta):
'''define field order
'''
fields=[]
def __init__(self):
for field_name, field in self._fields:
if isinstance(field, Packet):
setattr(self, field_name, field.__class__())
def dump(self):
'''Serialize self to bytes
'''
data = b''
bit_dumper = BitDumper()
for field_name, field in self._fields:
field_value = getattr(self, field_name)
if field_value is None:
raise NettestError(_("%s is None and haven't default value")%(field_name))
if isinstance(field, BitField):
bit_dumper.push(field_value, field.length)
continue
else:
if bit_dumper.data_len_sum > 0:
data += bit_dumper.dump()
bit_dumper.clear()
if isinstance(field, Packet):
data += field_value.dump()
continue
data += field.to_netbytes(field_value)
if bit_dumper.data_len_sum > 0:
data += bit_dumper.dump()
return data
# def __dump_list_data(self, fields):
# data = b''
# for field in fields:
# if isinstance(field, Packet):
# data += field.dump()
# continue
# if isinstance(field, list):
# data += self.__dump_list_data()
# continue
# if isinstance(field, Field):
# data += field.to_netbytes(field_value)
# continue
def load(self, data):
'''Deserialize bytes to a self.
if success, return the total data length used
else return None
'''
loaded_len = 0
bit_loader = BitLoader(self)
for field_name, field in self._fields:
if isinstance(field, BitField):
bit_loader.push(field_name, field)
continue
else:
if bit_loader.bit_len_sum > 0:
loaded_len += bit_loader.load(data[loaded_len:])
bit_loader.clear()
if isinstance(field, Packet):
field_value = getattr(self, field_name)
length = field_value.load(data[loaded_len:])
if length is None:
return None
loaded_len += length
continue
field_data = field.from_netbytes(data[loaded_len:])
if field_data is None:
return None
loaded_len += field.length
setattr(self, field_name, field_data)
if bit_loader.bit_len_sum > 0:
loaded_len += bit_loader.load(data[loaded_len:])
return loaded_len
def to_printable(self):
string = ''
string += '-'*20+str(self.__class__.__name__)+'-'*20+'\n'
for field_name, field in self._fields:
field_value = getattr(self, field_name)
if field_value is None:
string += '%s\tNone\n'%(field_name)
elif isinstance(field, Packet):
string += '%s\t%s\n'%(field_name, field_value.to_printable())
else:
string += '%s\t%s\n'%(field_name, field.to_printable(field_value))
string += '-'*(40+len(self.__class__.__name__))+'\n'
return string
def __eq__(self, other):
for field_name in self.fields:
field_value1 = getattr(self, field_name)
field_value2 = getattr(other, field_name)
if field_value1 != field_value2:
return False
return True
@property
def length(self):
total_len = 0
bit_len = 0
for field_name, field in self._fields:
if isinstance(field, BitField):
bit_len += field.length
elif field.length > 0:
total_len += field.length
else:
field_value = getattr(self, field_name)
total_len += len(field_value)
total_len += int(bit_len/8)
return total_len
|
public0821/nettest
|
nettest/packets/base.py
|
Python
|
apache-2.0
| 7,940
| 0.005038
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
utils
=====
Utility functions for matching.py.
"""
import datetime
import json
import os
#testme
def parse_timestamp(t):
"""Parse MediaWiki-style timestamps and return a datetime."""
if t == '0000-00-00T00:00:00Z':
return None
else:
return datetime.datetime.strptime(t, '%Y-%m-%dT%H:%M:%SZ')
def load_config(filepath):
"""Given the path to the config file, opens and returns the dict."""
configfile = os.path.join(filepath, 'config.json')
with open(configfile, 'rb') as configf:
config = json.loads(configf.read())
return config
#testme
def make_category_string(categories):
"""Given a list of categories, return the |-separated string."""
return '|'.join(categories)
def timelog(run_time, filepath):
"""Get the timestamp from the last run, then log the current time
(UTC).
"""
timelogfile = os.path.join(filepath, 'time.log') # fixme this currently only works because filepath is in the enclosing scope (main)
try:
with open(timelogfile, 'r+b') as timelog:
prevruntimestamp = timelog.read()
timelog.seek(0)
timelog.write(datetime.datetime.strftime(run_time,
'%Y-%m-%dT%H:%M:%SZ'))
timelog.truncate()
except IOError:
with open(timelogfile, 'wb') as timelog:
prevruntimestamp = ''
timelog.write(datetime.datetime.strftime(run_time,
'%Y-%m-%dT%H:%M:%SZ'))
return prevruntimestamp
#testme
def buildgreeting(greeting, username, ideas):
"""Create a customized greeting string to be posted to a talk page
to present the IdeaLab member with a list of interesting ideas.
Return the wikitext-formatted greeting string.
"""
idea_string = ''
for idea in ideas:
title = idea['profile_title']
idea_string = u'{}* [[{}]]\n'.format(idea_string, title)
full_greeting = greeting.format(username, idea_string)
return full_greeting
|
fhocutt/grantsbot-matching
|
matching/utils.py
|
Python
|
lgpl-3.0
| 2,104
| 0.002376
|
"""
Filename: mc_tools.py
Authors: John Stachurski and Thomas J. Sargent
"""
import numpy as np
from discrete_rv import DiscreteRV
def mc_compute_stationary(P):
"""
Computes the stationary distribution of Markov matrix P.
Parameters
===========
P : a square 2D NumPy array
Returns: A flat array giving the stationary distribution
"""
n = len(P) # P is n x n
I = np.identity(n) # Identity matrix
B, b = np.ones((n, n)), np.ones((n, 1)) # Matrix and vector of ones
A = np.transpose(I - P + B)
solution = np.linalg.solve(A, b)
return solution.flatten() # Return a flat array
def mc_sample_path(P, init=0, sample_size=1000):
"""
Generates one sample path from a finite Markov chain with (n x n) Markov
matrix P on state space S = {0,...,n-1}.
Parameters
==========
P : A nonnegative 2D NumPy array with rows that sum to 1
init : Either an integer in S or a nonnegative array of length n
with elements that sum to 1
sample_size : int
If init is an integer, the integer is treated as the determinstic initial
condition. If init is a distribution on S, then X_0 is drawn from this
distribution.
Returns
========
A NumPy array containing the sample path
"""
# === set up array to store output === #
X = np.empty(sample_size, dtype=int)
if isinstance(init, int):
X[0] = init
else:
X[0] = DiscreteRV(init).draw()
# === turn each row into a distribution === #
# In particular, let P_dist[i] be the distribution corresponding to the
# i-th row P[i,:]
n = len(P)
P_dist = [DiscreteRV(P[i,:]) for i in range(n)]
# === generate the sample path === #
for t in range(sample_size - 1):
X[t+1] = P_dist[X[t]].draw()
return X
|
beeleb/Kandori-Mailath-Rob
|
mc_tools.py
|
Python
|
bsd-3-clause
| 1,915
| 0.0047
|
# -*- coding: utf-8 -*-
'''
This module implements :class:`IrregularlySampledSignal`, an array of analog
signals with samples taken at arbitrary time points.
:class:`IrregularlySampledSignal` derives from :class:`BaseNeo`, from
:module:`neo.core.baseneo`, and from :class:`quantites.Quantity`, which
inherits from :class:`numpy.array`.
Inheritance from :class:`numpy.array` is explained here:
http://docs.scipy.org/doc/numpy/user/basics.subclassing.html
In brief:
* Initialization of a new object from constructor happens in :meth:`__new__`.
This is where user-specified attributes are set.
* :meth:`__array_finalize__` is called for all new objects, including those
created by slicing. This is where attributes are copied over from
the old object.
'''
# needed for Python 3 compatibility
from __future__ import absolute_import, division, print_function
import numpy as np
import quantities as pq
from neo.core.baseneo import BaseNeo, MergeError, merge_annotations
def _new_IrregularlySampledSignal(cls, times, signal, units=None, time_units=None, dtype=None,
copy=True, name=None, file_origin=None, description=None,
annotations=None):
'''
A function to map IrregularlySampledSignal.__new__ to function that
does not do the unit checking. This is needed for pickle to work.
'''
return cls(times=times, signal=signal, units=units, time_units=time_units,
dtype=dtype, copy=copy, name=name, file_origin=file_origin,
description=description, **annotations)
class IrregularlySampledSignal(BaseNeo, pq.Quantity):
'''
An array of one or more analog signals with samples taken at arbitrary time points.
A representation of one or more continuous, analog signals acquired at time
:attr:`t_start` with a varying sampling interval. Each channel is sampled
at the same time points.
*Usage*::
>>> from neo.core import IrregularlySampledSignal
>>> from quantities import s, nA
>>>
>>> irsig0 = IrregularlySampledSignal([0.0, 1.23, 6.78], [1, 2, 3],
... units='mV', time_units='ms')
>>> irsig1 = IrregularlySampledSignal([0.01, 0.03, 0.12]*s,
... [[4, 5], [5, 4], [6, 3]]*nA)
*Required attributes/properties*:
:times: (quantity array 1D, numpy array 1D, or list)
The time of each data point. Must have the same size as :attr:`signal`.
:signal: (quantity array 2D, numpy array 2D, or list (data, channel))
The data itself.
:units: (quantity units)
Required if the signal is a list or NumPy array, not if it is
a :class:`Quantity`.
:time_units: (quantity units) Required if :attr:`times` is a list or
NumPy array, not if it is a :class:`Quantity`.
*Recommended attributes/properties*:.
:name: (str) A label for the dataset
:description: (str) Text description.
:file_origin: (str) Filesystem path or URL of the original data file.
*Optional attributes/properties*:
:dtype: (numpy dtype or str) Override the dtype of the signal array.
(times are always floats).
:copy: (bool) True by default.
Note: Any other additional arguments are assumed to be user-specific
metadata and stored in :attr:`annotations`.
*Properties available on this object*:
:sampling_intervals: (quantity array 1D) Interval between each adjacent
pair of samples.
(``times[1:] - times[:-1]``)
:duration: (quantity scalar) Signal duration, read-only.
(``times[-1] - times[0]``)
:t_start: (quantity scalar) Time when signal begins, read-only.
(``times[0]``)
:t_stop: (quantity scalar) Time when signal ends, read-only.
(``times[-1]``)
*Slicing*:
:class:`IrregularlySampledSignal` objects can be sliced. When this
occurs, a new :class:`IrregularlySampledSignal` (actually a view) is
returned, with the same metadata, except that :attr:`times` is also
sliced in the same way.
*Operations available on this object*:
== != + * /
'''
_single_parent_objects = ('Segment', 'ChannelIndex')
_quantity_attr = 'signal'
_necessary_attrs = (('times', pq.Quantity, 1),
('signal', pq.Quantity, 2))
def __new__(cls, times, signal, units=None, time_units=None, dtype=None,
copy=True, name=None, file_origin=None,
description=None,
**annotations):
'''
Construct a new :class:`IrregularlySampledSignal` instance.
This is called whenever a new :class:`IrregularlySampledSignal` is
created from the constructor, but not when slicing.
'''
if units is None:
if hasattr(signal, "units"):
units = signal.units
else:
raise ValueError("Units must be specified")
elif isinstance(signal, pq.Quantity):
# could improve this test, what if units is a string?
if units != signal.units:
signal = signal.rescale(units)
if time_units is None:
if hasattr(times, "units"):
time_units = times.units
else:
raise ValueError("Time units must be specified")
elif isinstance(times, pq.Quantity):
# could improve this test, what if units is a string?
if time_units != times.units:
times = times.rescale(time_units)
# should check time units have correct dimensions
obj = pq.Quantity.__new__(cls, signal, units=units,
dtype=dtype, copy=copy)
if obj.ndim == 1:
obj = obj.reshape(-1, 1)
if len(times) != obj.shape[0]:
raise ValueError("times array and signal array must "
"have same length")
obj.times = pq.Quantity(times, units=time_units,
dtype=float, copy=copy)
obj.segment = None
obj.channel_index = None
return obj
def __init__(self, times, signal, units=None, time_units=None, dtype=None,
copy=True, name=None, file_origin=None, description=None,
**annotations):
'''
Initializes a newly constructed :class:`IrregularlySampledSignal`
instance.
'''
BaseNeo.__init__(self, name=name, file_origin=file_origin,
description=description, **annotations)
def __reduce__(self):
'''
Map the __new__ function onto _new_IrregularlySampledSignal, so that pickle
works
'''
return _new_IrregularlySampledSignal, (self.__class__,
self.times,
np.array(self),
self.units,
self.times.units,
self.dtype,
True,
self.name,
self.file_origin,
self.description,
self.annotations)
def __array_finalize__(self, obj):
'''
This is called every time a new :class:`IrregularlySampledSignal` is
created.
It is the appropriate place to set default values for attributes
for :class:`IrregularlySampledSignal` constructed by slicing or
viewing.
User-specified values are only relevant for construction from
constructor, and these are set in __new__. Then they are just
copied over here.
'''
super(IrregularlySampledSignal, self).__array_finalize__(obj)
self.times = getattr(obj, 'times', None)
# The additional arguments
self.annotations = getattr(obj, 'annotations', None)
# Globally recommended attributes
self.name = getattr(obj, 'name', None)
self.file_origin = getattr(obj, 'file_origin', None)
self.description = getattr(obj, 'description', None)
def __repr__(self):
'''
Returns a string representing the :class:`IrregularlySampledSignal`.
'''
return '<%s(%s at times %s)>' % (self.__class__.__name__,
super(IrregularlySampledSignal,
self).__repr__(), self.times)
def __getslice__(self, i, j):
'''
Get a slice from :attr:`i` to :attr:`j`.
Doesn't get called in Python 3, :meth:`__getitem__` is called instead
'''
return self.__getitem__(slice(i, j))
def __getitem__(self, i):
'''
Get the item or slice :attr:`i`.
'''
obj = super(IrregularlySampledSignal, self).__getitem__(i)
if isinstance(i, int): # a single point in time across all channels
obj = pq.Quantity(obj.magnitude, units=obj.units)
elif isinstance(i, tuple):
j, k = i
if isinstance(j, int): # a single point in time across some channels
obj = pq.Quantity(obj.magnitude, units=obj.units)
else:
if isinstance(j, slice):
obj.times = self.times.__getitem__(j)
elif isinstance(j, np.ndarray):
raise NotImplementedError("Arrays not yet supported")
else:
raise TypeError("%s not supported" % type(j))
if isinstance(k, int):
obj = obj.reshape(-1, 1)
elif isinstance(i, slice):
obj.times = self.times.__getitem__(i)
else:
raise IndexError("index should be an integer, tuple or slice")
return obj
@property
def duration(self):
'''
Signal duration.
(:attr:`times`[-1] - :attr:`times`[0])
'''
return self.times[-1] - self.times[0]
@property
def t_start(self):
'''
Time when signal begins.
(:attr:`times`[0])
'''
return self.times[0]
@property
def t_stop(self):
'''
Time when signal ends.
(:attr:`times`[-1])
'''
return self.times[-1]
def __eq__(self, other):
'''
Equality test (==)
'''
return (super(IrregularlySampledSignal, self).__eq__(other).all() and
(self.times == other.times).all())
def __ne__(self, other):
'''
Non-equality test (!=)
'''
return not self.__eq__(other)
def _apply_operator(self, other, op, *args):
'''
Handle copying metadata to the new :class:`IrregularlySampledSignal`
after a mathematical operation.
'''
self._check_consistency(other)
f = getattr(super(IrregularlySampledSignal, self), op)
new_signal = f(other, *args)
new_signal._copy_data_complement(self)
return new_signal
def _check_consistency(self, other):
'''
Check if the attributes of another :class:`IrregularlySampledSignal`
are compatible with this one.
'''
# if not an array, then allow the calculation
if not hasattr(other, 'ndim'):
return
# if a scalar array, then allow the calculation
if not other.ndim:
return
# dimensionality should match
if self.ndim != other.ndim:
raise ValueError('Dimensionality does not match: %s vs %s' %
(self.ndim, other.ndim))
# if if the other array does not have a times property,
# then it should be okay to add it directly
if not hasattr(other, 'times'):
return
# if there is a times property, the times need to be the same
if not (self.times == other.times).all():
raise ValueError('Times do not match: %s vs %s' %
(self.times, other.times))
def _copy_data_complement(self, other):
'''
Copy the metadata from another :class:`IrregularlySampledSignal`.
'''
for attr in ("times", "name", "file_origin",
"description", "annotations"):
setattr(self, attr, getattr(other, attr, None))
def __add__(self, other, *args):
'''
Addition (+)
'''
return self._apply_operator(other, "__add__", *args)
def __sub__(self, other, *args):
'''
Subtraction (-)
'''
return self._apply_operator(other, "__sub__", *args)
def __mul__(self, other, *args):
'''
Multiplication (*)
'''
return self._apply_operator(other, "__mul__", *args)
def __truediv__(self, other, *args):
'''
Float division (/)
'''
return self._apply_operator(other, "__truediv__", *args)
def __div__(self, other, *args):
'''
Integer division (//)
'''
return self._apply_operator(other, "__div__", *args)
__radd__ = __add__
__rmul__ = __sub__
def __rsub__(self, other, *args):
'''
Backwards subtraction (other-self)
'''
return self.__mul__(-1) + other
def _repr_pretty_(self, pp, cycle):
'''
Handle pretty-printing the :class:`IrregularlySampledSignal`.
'''
pp.text("{cls} with {channels} channels of length {length}; "
"units {units}; datatype {dtype} ".format(
cls=self.__class__.__name__,
channels=self.shape[1],
length=self.shape[0],
units=self.units.dimensionality.string,
dtype=self.dtype))
if self._has_repr_pretty_attrs_():
pp.breakable()
self._repr_pretty_attrs_(pp, cycle)
def _pp(line):
pp.breakable()
with pp.group(indent=1):
pp.text(line)
for line in ["sample times: {0}".format(self.times)]:
_pp(line)
@property
def sampling_intervals(self):
'''
Interval between each adjacent pair of samples.
(:attr:`times[1:]` - :attr:`times`[:-1])
'''
return self.times[1:] - self.times[:-1]
def mean(self, interpolation=None):
'''
Calculates the mean, optionally using interpolation between sampling
times.
If :attr:`interpolation` is None, we assume that values change
stepwise at sampling times.
'''
if interpolation is None:
return (self[:-1]*self.sampling_intervals.reshape(-1, 1)).sum()/self.duration
else:
raise NotImplementedError
def resample(self, at=None, interpolation=None):
'''
Resample the signal, returning either an :class:`AnalogSignal` object
or another :class:`IrregularlySampledSignal` object.
Arguments:
:at: either a :class:`Quantity` array containing the times at
which samples should be created (times must be within the
signal duration, there is no extrapolation), a sampling rate
with dimensions (1/Time) or a sampling interval
with dimensions (Time).
:interpolation: one of: None, 'linear'
'''
# further interpolation methods could be added
raise NotImplementedError
def rescale(self, units):
'''
Return a copy of the :class:`IrregularlySampledSignal` converted to the
specified units
'''
to_dims = pq.quantity.validate_dimensionality(units)
if self.dimensionality == to_dims:
to_u = self.units
signal = np.array(self)
else:
to_u = pq.Quantity(1.0, to_dims)
from_u = pq.Quantity(1.0, self.dimensionality)
try:
cf = pq.quantity.get_conversion_factor(from_u, to_u)
except AssertionError:
raise ValueError('Unable to convert between units of "%s" \
and "%s"' % (from_u._dimensionality,
to_u._dimensionality))
signal = cf * self.magnitude
new = self.__class__(times=self.times, signal=signal, units=to_u)
new._copy_data_complement(self)
new.annotations.update(self.annotations)
return new
def merge(self, other):
'''
Merge another :class:`IrregularlySampledSignal` with this one, and return the
merged signal.
The :class:`IrregularlySampledSignal` objects are concatenated horizontally
(column-wise, :func:`np.hstack`).
If the attributes of the two :class:`IrregularlySampledSignal` are not
compatible, a :class:`MergeError` is raised.
'''
if not np.array_equal(self.times, other.times):
raise MergeError("Cannot merge these two signals as the sample times differ.")
if self.segment != other.segment:
raise MergeError("Cannot merge these two signals as they belong to different segments.")
if hasattr(self, "lazy_shape"):
if hasattr(other, "lazy_shape"):
if self.lazy_shape[0] != other.lazy_shape[0]:
raise MergeError("Cannot merge signals of different length.")
merged_lazy_shape = (self.lazy_shape[0], self.lazy_shape[1] + other.lazy_shape[1])
else:
raise MergeError("Cannot merge a lazy object with a real object.")
if other.units != self.units:
other = other.rescale(self.units)
stack = np.hstack(map(np.array, (self, other)))
kwargs = {}
for name in ("name", "description", "file_origin"):
attr_self = getattr(self, name)
attr_other = getattr(other, name)
if attr_self == attr_other:
kwargs[name] = attr_self
else:
kwargs[name] = "merge(%s, %s)" % (attr_self, attr_other)
merged_annotations = merge_annotations(self.annotations,
other.annotations)
kwargs.update(merged_annotations)
signal = IrregularlySampledSignal(self.times, stack, units=self.units,
dtype=self.dtype, copy=False,
**kwargs)
signal.segment = self.segment
if hasattr(self, "lazy_shape"):
signal.lazy_shape = merged_lazy_shape
return signal
def time_slice (self, t_start, t_stop):
'''
Creates a new :class:`IrregularlySampledSignal` corresponding to the time slice of
the original :class:`IrregularlySampledSignal` between times
`t_start` and `t_stop`. Either parameter can also be None
to use infinite endpoints for the time interval.
'''
_t_start = t_start
_t_stop = t_stop
if t_start is None:
_t_start = -np.inf
if t_stop is None:
_t_stop = np.inf
indices = (self.times >= _t_start) & (self.times <= _t_stop)
count = 0
id_start = None
id_stop = None
for i in indices :
if id_start == None :
if i == True :
id_start = count
else :
if i == False :
id_stop = count
break
count += 1
new_st = self[id_start:id_stop]
return new_st
def as_array(self, units=None):
"""
Return the signal as a plain NumPy array.
If `units` is specified, first rescale to those units.
"""
if units:
return self.rescale(units).magnitude
else:
return self.magnitude
def as_quantity(self):
"""
Return the signal as a quantities array.
"""
return self.view(pq.Quantity)
|
CINPLA/expipe-dev
|
python-neo/neo/core/irregularlysampledsignal.py
|
Python
|
gpl-3.0
| 20,385
| 0.001864
|
from __future__ import unicode_literals
from datetime import datetime, date
import os
import time
from django.utils.dateformat import format
from django.utils import dateformat, translation, unittest
from django.utils.timezone import utc
from django.utils.tzinfo import FixedOffset, LocalTimezone
class DateFormatTests(unittest.TestCase):
def setUp(self):
self.old_TZ = os.environ.get('TZ')
os.environ['TZ'] = 'Europe/Copenhagen'
translation.activate('en-us')
try:
# Check if a timezone has been set
time.tzset()
self.tz_tests = True
except AttributeError:
# No timezone available. Don't run the tests that require a TZ
self.tz_tests = False
def tearDown(self):
if self.old_TZ is None:
del os.environ['TZ']
else:
os.environ['TZ'] = self.old_TZ
# Cleanup - force re-evaluation of TZ environment variable.
if self.tz_tests:
time.tzset()
def test_date(self):
d = date(2009, 5, 16)
self.assertEqual(date.fromtimestamp(int(format(d, 'U'))), d)
def test_naive_datetime(self):
dt = datetime(2009, 5, 16, 5, 30, 30)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt)
def test_datetime_with_local_tzinfo(self):
ltz = LocalTimezone(datetime.now())
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=ltz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.replace(tzinfo=None))
def test_datetime_with_tzinfo(self):
tz = FixedOffset(-510)
ltz = LocalTimezone(datetime.now())
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz), dt)
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U'))), dt.astimezone(ltz).replace(tzinfo=None))
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), tz).utctimetuple(), dt.utctimetuple())
self.assertEqual(datetime.fromtimestamp(int(format(dt, 'U')), ltz).utctimetuple(), dt.utctimetuple())
def test_epoch(self):
udt = datetime(1970, 1, 1, tzinfo=utc)
self.assertEqual(format(udt, 'U'), '0')
def test_empty_format(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, ''), '')
def test_am_pm(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'a'), 'p.m.')
def test_microsecond(self):
# Regression test for #18951
dt = datetime(2009, 5, 16, microsecond=123)
self.assertEqual(dateformat.format(dt, 'u'), '000123')
def test_date_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
self.assertEqual(dateformat.format(my_birthday, 'A'), 'PM')
self.assertEqual(dateformat.format(timestamp, 'c'), '2008-05-19T11:45:23.123456')
self.assertEqual(dateformat.format(my_birthday, 'd'), '08')
self.assertEqual(dateformat.format(my_birthday, 'j'), '8')
self.assertEqual(dateformat.format(my_birthday, 'l'), 'Sunday')
self.assertEqual(dateformat.format(my_birthday, 'L'), 'False')
self.assertEqual(dateformat.format(my_birthday, 'm'), '07')
self.assertEqual(dateformat.format(my_birthday, 'M'), 'Jul')
self.assertEqual(dateformat.format(my_birthday, 'b'), 'jul')
self.assertEqual(dateformat.format(my_birthday, 'n'), '7')
self.assertEqual(dateformat.format(my_birthday, 'N'), 'July')
def test_time_formats(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, 'P'), '10 p.m.')
self.assertEqual(dateformat.format(my_birthday, 's'), '00')
self.assertEqual(dateformat.format(my_birthday, 'S'), 'th')
self.assertEqual(dateformat.format(my_birthday, 't'), '31')
self.assertEqual(dateformat.format(my_birthday, 'w'), '0')
self.assertEqual(dateformat.format(my_birthday, 'W'), '27')
self.assertEqual(dateformat.format(my_birthday, 'y'), '79')
self.assertEqual(dateformat.format(my_birthday, 'Y'), '1979')
self.assertEqual(dateformat.format(my_birthday, 'z'), '189')
def test_dateformat(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
self.assertEqual(dateformat.format(my_birthday, r'Y z \C\E\T'), '1979 189 CET')
self.assertEqual(dateformat.format(my_birthday, r'jS \o\f F'), '8th of July')
def test_futuredates(self):
the_future = datetime(2100, 10, 25, 0, 00)
self.assertEqual(dateformat.format(the_future, r'Y'), '2100')
def test_timezones(self):
my_birthday = datetime(1979, 7, 8, 22, 00)
summertime = datetime(2005, 10, 30, 1, 00)
wintertime = datetime(2005, 10, 30, 4, 00)
timestamp = datetime(2008, 5, 19, 11, 45, 23, 123456)
if self.tz_tests:
self.assertEqual(dateformat.format(my_birthday, 'O'), '+0100')
self.assertEqual(dateformat.format(my_birthday, 'r'), 'Sun, 8 Jul 1979 22:00:00 +0100')
self.assertEqual(dateformat.format(my_birthday, 'T'), 'CET')
self.assertEqual(dateformat.format(my_birthday, 'U'), '300315600')
self.assertEqual(dateformat.format(timestamp, 'u'), '123456')
self.assertEqual(dateformat.format(my_birthday, 'Z'), '3600')
self.assertEqual(dateformat.format(summertime, 'I'), '1')
self.assertEqual(dateformat.format(summertime, 'O'), '+0200')
self.assertEqual(dateformat.format(wintertime, 'I'), '0')
self.assertEqual(dateformat.format(wintertime, 'O'), '+0100')
# Ticket #16924 -- We don't need timezone support to test this
# 3h30m to the west of UTC
tz = FixedOffset(-3*60 - 30)
dt = datetime(2009, 5, 16, 5, 30, 30, tzinfo=tz)
self.assertEqual(dateformat.format(dt, 'O'), '-0330')
|
openhatch/new-mini-tasks
|
vendor/packages/Django/tests/regressiontests/utils/dateformat.py
|
Python
|
apache-2.0
| 6,241
| 0.001282
|
# Copyright 2016 Splunk, Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License'): you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
'''
This module contains simple interfaces for Splunk config file management,
you can update/get/delete stanzas and encrypt/decrypt some fields of stanza
automatically.
'''
import json
import logging
import traceback
from . import splunk_rest_client as rest_client
from .credentials import CredentialManager
from .credentials import CredentialNotExistException
from .packages.splunklib import binding
from .utils import retry
__all__ = ['ConfStanzaNotExistException',
'ConfFile',
'ConfManagerException',
'ConfManager']
class ConfStanzaNotExistException(Exception):
pass
class ConfFile(object):
'''Configuration file.
:param name: Configuration file name.
:type name: ``string``
:param conf: Configuration file object.
:type conf: ``splunklib.client.ConfigurationFile``
:param session_key: Splunk access token.
:type session_key: ``string``
:param app: App name of namespace.
:type app: ``string``
:param owner: (optional) Owner of namespace, default is `nobody`.
:type owner: ``string``
:param realm: (optional) Realm of credential, default is None.
:type realm: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
'''
ENCRYPTED_TOKEN = '******'
reserved_keys = ('userName', 'appName')
def __init__(self, name, conf, session_key, app, owner='nobody',
scheme=None, host=None, port=None, realm=None,**context):
self._name = name
self._conf = conf
self._session_key = session_key
self._app = app
self._owner = owner
self._scheme = scheme
self._host = host
self._port = port
self._context = context
self._cred_manager = None
### 'realm' is set to provided 'realm' argument otherwise as default behaviour it is set to 'APP_NAME'.
if realm is None:
self._realm = self._app
else:
self._realm = realm
@property
@retry(exceptions=[binding.HTTPError])
def _cred_mgr(self):
if self._cred_manager is None:
self._cred_manager = CredentialManager(
self._session_key, self._app, owner=self._owner,
realm=self._realm, scheme=self._scheme, host=self._host,
port=self._port, **self._context)
return self._cred_manager
def _filter_stanza(self, stanza):
for k in self.reserved_keys:
if k in stanza:
del stanza[k]
return stanza
def _encrypt_stanza(self, stanza_name, stanza, encrypt_keys):
if not encrypt_keys:
return stanza
encrypt_stanza_keys = [ k for k in encrypt_keys if k in stanza ]
encrypt_fields = {key: stanza[key] for key in encrypt_stanza_keys}
if not encrypt_fields:
return stanza
self._cred_mgr.set_password(stanza_name, json.dumps(encrypt_fields))
for key in encrypt_stanza_keys:
stanza[key] = self.ENCRYPTED_TOKEN
return stanza
def _decrypt_stanza(self, stanza_name, encrypted_stanza):
encrypted_keys = [key for key in encrypted_stanza if
encrypted_stanza[key] == self.ENCRYPTED_TOKEN]
if encrypted_keys:
encrypted_fields = json.loads(
self._cred_mgr.get_password(stanza_name))
for key in encrypted_keys:
encrypted_stanza[key] = encrypted_fields[key]
return encrypted_stanza
def _delete_stanza_creds(self, stanza_name):
self._cred_mgr.delete_password(stanza_name)
@retry(exceptions=[binding.HTTPError])
def stanza_exist(self, stanza_name):
'''Check whether stanza exists.
:param stanza_name: Stanza name.
:type stanza_name: ``string``
:returns: True if stanza exists else False.
:rtype: ``bool``
Usage::
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test')
>>> conf = cfm.get_conf('test')
>>> conf.stanza_exist('test_stanza')
'''
try:
self._conf.list(name=stanza_name)[0]
except binding.HTTPError as e:
if e.status != 404:
raise
return False
return True
@retry(exceptions=[binding.HTTPError])
def get(self, stanza_name, only_current_app=False):
'''Get stanza from configuration file.
:param stanza_name: Stanza name.
:type stanza_name: ``string``
:returns: Stanza, like: {
'disabled': '0',
'eai:appName': 'solnlib_demo',
'eai:userName': 'nobody',
'k1': '1',
'k2': '2'}
:rtype: ``dict``
:raises ConfStanzaNotExistException: If stanza does not exist.
Usage::
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test')
>>> conf = cfm.get_conf('test')
>>> conf.get('test_stanza')
'''
try:
if only_current_app:
stanza_mgrs = self._conf.list(
search='eai:acl.app={} name={}'.format(
self._app, stanza_name.replace('=', r'\=')))
else:
stanza_mgrs = self._conf.list(name=stanza_name)
except binding.HTTPError as e:
if e.status != 404:
raise
raise ConfStanzaNotExistException(
'Stanza: %s does not exist in %s.conf' %
(stanza_name, self._name))
if len(stanza_mgrs) == 0:
raise ConfStanzaNotExistException(
'Stanza: %s does not exist in %s.conf' %
(stanza_name, self._name))
stanza = self._decrypt_stanza(stanza_mgrs[0].name, stanza_mgrs[0].content)
stanza['eai:access'] = stanza_mgrs[0].access
stanza['eai:appName'] = stanza_mgrs[0].access.app
return stanza
@retry(exceptions=[binding.HTTPError])
def get_all(self, only_current_app=False):
'''Get all stanzas from configuration file.
:returns: All stanzas, like: {'test': {
'disabled': '0',
'eai:appName': 'solnlib_demo',
'eai:userName': 'nobody',
'k1': '1',
'k2': '2'}}
:rtype: ``dict``
Usage::
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test')
>>> conf = cfm.get_conf('test')
>>> conf.get_all()
'''
if only_current_app:
stanza_mgrs = self._conf.list(search='eai:acl.app={}'.format(self._app))
else:
stanza_mgrs = self._conf.list()
res = {}
for stanza_mgr in stanza_mgrs:
name = stanza_mgr.name
key_values = self._decrypt_stanza(name, stanza_mgr.content)
key_values['eai:access'] = stanza_mgr.access
key_values['eai:appName'] = stanza_mgr.access.app
res[name] = key_values
return res
@retry(exceptions=[binding.HTTPError])
def update(self, stanza_name, stanza, encrypt_keys=None):
'''Update stanza.
It will try to encrypt the credential automatically fist if
encrypt_keys are not None else keep stanza untouched.
:param stanza_name: Stanza name.
:type stanza_name: ``string``
:param stanza: Stanza to update, like: {
'k1': 1,
'k2': 2}.
:type stanza: ``dict``
:param encrypt_keys: Fields name to encrypt.
:type encrypt_keys: ``list``
Usage::
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test')
>>> conf = cfm.get_conf('test')
>>> conf.update('test_stanza', {'k1': 1, 'k2': 2}, ['k1'])
'''
stanza = self._filter_stanza(stanza)
encrypted_stanza = self._encrypt_stanza(stanza_name,
stanza,
encrypt_keys)
try:
stanza_mgr = self._conf.list(name=stanza_name)[0]
except binding.HTTPError as e:
if e.status != 404:
raise
stanza_mgr = self._conf.create(stanza_name)
stanza_mgr.submit(encrypted_stanza)
@retry(exceptions=[binding.HTTPError])
def delete(self, stanza_name):
'''Delete stanza.
:param stanza_name: Stanza name to delete.
:type stanza_name: ``string``
:raises ConfStanzaNotExistException: If stanza does not exist.
Usage::
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test')
>>> conf = cfm.get_conf('test')
>>> conf.delete('test_stanza')
'''
try:
self._cred_mgr.delete_password(stanza_name)
except CredentialNotExistException:
pass
try:
self._conf.delete(stanza_name)
except KeyError as e:
logging.error('Delete stanza: %s error: %s.',
stanza_name, traceback.format_exc())
raise ConfStanzaNotExistException(
'Stanza: %s does not exist in %s.conf' %
(stanza_name, self._name))
@retry(exceptions=[binding.HTTPError])
def reload(self):
'''Reload configuration file.
Usage::
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test')
>>> conf = cfm.get_conf('test')
>>> conf.reload()
'''
self._conf.get('_reload')
class ConfManagerException(Exception):
pass
class ConfManager(object):
'''Configuration file manager.
:param session_key: Splunk access token.
:type session_key: ``string``
:param app: App name of namespace.
:type app: ``string``
:param owner: (optional) Owner of namespace, default is `nobody`.
:type owner: ``string``
:param realm: (optional) Realm of credential, default is None.
:type realm: ``string``
:param scheme: (optional) The access scheme, default is None.
:type scheme: ``string``
:param host: (optional) The host name, default is None.
:type host: ``string``
:param port: (optional) The port number, default is None.
:type port: ``integer``
:param context: Other configurations for Splunk rest client.
:type context: ``dict``
Usage::
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test')
EXAMPLE:
If stanza in passwords.conf is formatted as below:
[credential:__REST_CREDENTIAL__#Splunk_TA_test#configs/conf-CONF_FILENAME:STANZA_NAME``splunk_cred_sep``1:]
>>> from solnlib import conf_manager
>>> cfm = conf_manager.ConfManager(session_key,
'Splunk_TA_test', realm='__REST_CREDENTIAL__#Splunk_TA_test#configs/conf-CONF_FILENAME')
'''
def __init__(self, session_key, app, owner='nobody',
scheme=None, host=None, port=None, realm=None, **context):
self._session_key = session_key
self._app = app
self._owner = owner
self._scheme = scheme
self._host = host
self._port = port
self._context = context
self._rest_client = rest_client.SplunkRestClient(
self._session_key,
self._app,
owner=self._owner,
scheme=self._scheme,
host=self._host,
port=self._port,
**self._context)
self._confs = None
self._realm = realm
@retry(exceptions=[binding.HTTPError])
def get_conf(self, name, refresh=False):
'''Get conf file.
:param name: Conf file name.
:type name: ``string``
:param refresh: (optional) Flag to refresh conf file list, default is False.
:type refresh: ``bool``
:returns: Conf file object.
:rtype: ``solnlib.conf_manager.ConfFile``
:raises ConfManagerException: If `conf_file` does not exist.
'''
if self._confs is None or refresh:
# Fix bug that can't pass `-` as app name.
curr_app = self._rest_client.namespace.app
self._rest_client.namespace.app = "dummy"
self._confs = self._rest_client.confs
self._rest_client.namespace.app = curr_app
try:
conf = self._confs[name]
except KeyError:
raise ConfManagerException(
'Config file: %s does not exist.' % name)
return ConfFile(name, conf,
self._session_key, self._app, self._owner,
self._scheme, self._host, self._port, self._realm, **self._context)
@retry(exceptions=[binding.HTTPError])
def create_conf(self, name):
'''Create conf file.
:param name: Conf file name.
:type name: ``string``
:returns: Conf file object.
:rtype: ``solnlib.conf_manager.ConfFile``
'''
if self._confs is None:
self._confs = self._rest_client.confs
conf = self._confs.create(name)
return ConfFile(name, conf,
self._session_key, self._app, self._owner,
self._scheme, self._host, self._port, self._realm, **self._context)
|
PaloAltoNetworks-BD/SplunkforPaloAltoNetworks
|
Splunk_TA_paloalto/bin/splunk_ta_paloalto/aob_py3/solnlib/conf_manager.py
|
Python
|
isc
| 14,825
| 0.000742
|
# -*- coding: utf-8 -*-
import os
import tempfile
import shutil
from django.conf import settings
from django.core.files.storage import default_storage as storage
from django.test.client import RequestFactory
from mock import patch
from olympia import amo, core
from olympia.addons import forms
from olympia.addons.models import Addon, Category
from olympia.amo.tests import TestCase, addon_factory, req_factory_factory
from olympia.amo.tests.test_helpers import get_image_path
from olympia.amo.utils import rm_local_tmp_dir
from olympia.tags.models import AddonTag, Tag
from olympia.users.models import UserProfile
class TestAddonFormSupport(TestCase):
def test_bogus_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'javascript://something.com'}, request=None)
assert not form.is_valid()
assert form.errors['support_url'][0][1] == u'Enter a valid URL.'
def test_ftp_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'ftp://foo.com'}, request=None)
assert not form.is_valid()
assert form.errors['support_url'][0][1] == u'Enter a valid URL.'
def test_http_support_url(self):
form = forms.AddonFormSupport(
{'support_url': 'http://foo.com'}, request=None)
assert form.is_valid()
class FormsTest(TestCase):
fixtures = ('base/addon_3615', 'base/addon_3615_categories',
'addons/denied')
def setUp(self):
super(FormsTest, self).setUp()
self.existing_name = 'Delicious Bookmarks'
self.non_existing_name = 'Does Not Exist'
self.error_msg = 'This name is already in use. Please choose another.'
self.request = req_factory_factory('/')
def test_locales(self):
form = forms.AddonFormDetails(request=self.request)
assert form.fields['default_locale'].choices[0][0] == 'af'
def test_slug_deny(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic({'slug': 'submit'}, request=self.request,
instance=delicious)
assert not form.is_valid()
assert form.errors['slug'] == (
[u'The slug cannot be "submit". Please choose another.'])
def test_name_trademark_mozilla(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious Mozilla', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert not form.is_valid()
assert dict(form.errors['name'])['en-us'].startswith(
u'Add-on names cannot contain the Mozilla or Firefox trademarks.')
def test_name_trademark_firefox(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious Firefox', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert not form.is_valid()
assert dict(form.errors['name'])['en-us'].startswith(
u'Add-on names cannot contain the Mozilla or Firefox trademarks.')
def test_name_trademark_allowed_for_prefix(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious for Mozilla', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert form.is_valid()
def test_name_no_trademark(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic(
{'name': 'Delicious Dumdidum', 'summary': 'foo', 'slug': 'bar'},
request=self.request,
instance=delicious)
assert form.is_valid()
def test_bogus_homepage(self):
form = forms.AddonFormDetails(
{'homepage': 'javascript://something.com'}, request=self.request)
assert not form.is_valid()
assert form.errors['homepage'][0][1] == u'Enter a valid URL.'
def test_ftp_homepage(self):
form = forms.AddonFormDetails(
{'homepage': 'ftp://foo.com'}, request=self.request)
assert not form.is_valid()
assert form.errors['homepage'][0][1] == u'Enter a valid URL.'
def test_homepage_is_not_required(self):
delicious = Addon.objects.get()
form = forms.AddonFormDetails(
{'default_locale': 'en-US'},
request=self.request, instance=delicious)
assert form.is_valid()
def test_slug_isdigit(self):
delicious = Addon.objects.get()
form = forms.AddonFormBasic({'slug': '123'}, request=self.request,
instance=delicious)
assert not form.is_valid()
assert form.errors['slug'] == (
[u'The slug cannot be "123". Please choose another.'])
class TestTagsForm(TestCase):
fixtures = ['base/addon_3615', 'base/users']
def setUp(self):
super(TestTagsForm, self).setUp()
self.addon = Addon.objects.get(pk=3615)
category = Category.objects.get(pk=22)
category.db_name = 'test'
category.save()
self.data = {
'summary': str(self.addon.summary),
'name': str(self.addon.name),
'slug': self.addon.slug,
}
self.user = self.addon.authors.all()[0]
core.set_user(self.user)
self.request = req_factory_factory('/')
def add_tags(self, tags):
data = self.data.copy()
data.update({'tags': tags})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.is_valid()
form.save(self.addon)
return form
def get_tag_text(self):
return [t.tag_text for t in self.addon.tags.all()]
def test_tags(self):
self.add_tags('foo, bar')
assert self.get_tag_text() == ['bar', 'foo']
def test_tags_xss(self):
self.add_tags('<script>alert("foo")</script>, bar')
assert self.get_tag_text() == ['bar', 'scriptalertfooscript']
def test_tags_case_spaces(self):
self.add_tags('foo, bar')
self.add_tags('foo, bar , Bar, BAR, b a r ')
assert self.get_tag_text() == ['b a r', 'bar', 'foo']
def test_tags_spaces(self):
self.add_tags('foo, bar beer')
assert self.get_tag_text() == ['bar beer', 'foo']
def test_tags_unicode(self):
self.add_tags(u'Österreich')
assert self.get_tag_text() == [u'Österreich'.lower()]
def add_restricted(self, *args):
if not args:
args = ['i_am_a_restricted_tag']
for arg in args:
tag = Tag.objects.create(tag_text=arg, restricted=True)
AddonTag.objects.create(tag=tag, addon=self.addon)
def test_tags_restricted(self):
self.add_restricted()
self.add_tags('foo, bar')
form = forms.AddonFormBasic(data=self.data, request=self.request,
instance=self.addon)
assert form.fields['tags'].initial == 'bar, foo'
assert self.get_tag_text() == ['bar', 'foo', 'i_am_a_restricted_tag']
self.add_tags('')
assert self.get_tag_text() == ['i_am_a_restricted_tag']
def test_tags_error(self):
self.add_restricted('i_am_a_restricted_tag', 'sdk')
data = self.data.copy()
data.update({'tags': 'i_am_a_restricted_tag'})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.errors['tags'][0] == (
'"i_am_a_restricted_tag" is a reserved tag and cannot be used.')
data.update({'tags': 'i_am_a_restricted_tag, sdk'})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert form.errors['tags'][0] == (
'"i_am_a_restricted_tag", "sdk" are reserved tags and'
' cannot be used.')
@patch('olympia.access.acl.action_allowed')
def test_tags_admin_restricted(self, action_allowed):
action_allowed.return_value = True
self.add_restricted('i_am_a_restricted_tag')
self.add_tags('foo, bar')
assert self.get_tag_text() == ['bar', 'foo']
self.add_tags('foo, bar, i_am_a_restricted_tag')
assert self.get_tag_text() == ['bar', 'foo', 'i_am_a_restricted_tag']
form = forms.AddonFormBasic(data=self.data, request=self.request,
instance=self.addon)
assert form.fields['tags'].initial == 'bar, foo, i_am_a_restricted_tag'
@patch('olympia.access.acl.action_allowed')
def test_tags_admin_restricted_count(self, action_allowed):
action_allowed.return_value = True
self.add_restricted()
self.add_tags('i_am_a_restricted_tag, %s' % (', '.join('tag-test-%s' %
i for i in range(0, 20))))
def test_tags_restricted_count(self):
self.add_restricted()
self.add_tags(', '.join('tag-test-%s' % i for i in range(0, 20)))
def test_tags_slugified_count(self):
self.add_tags(', '.join('tag-test' for i in range(0, 21)))
assert self.get_tag_text() == ['tag-test']
def test_tags_limit(self):
self.add_tags(' %s' % ('t' * 128))
def test_tags_long(self):
tag = ' -%s' % ('t' * 128)
data = self.data.copy()
data.update({"tags": tag})
form = forms.AddonFormBasic(data=data, request=self.request,
instance=self.addon)
assert not form.is_valid()
assert form.errors['tags'] == [
'All tags must be 128 characters or less after invalid characters'
' are removed.']
class TestIconForm(TestCase):
fixtures = ['base/addon_3615']
# TODO: AddonFormMedia save() method could do with cleaning up
# so this isn't necessary
def setUp(self):
super(TestIconForm, self).setUp()
self.temp_dir = tempfile.mkdtemp(dir=settings.TMP_PATH)
self.addon = Addon.objects.get(pk=3615)
class DummyRequest:
FILES = None
self.request = DummyRequest()
self.icon_path = os.path.join(settings.TMP_PATH, 'icon')
if not os.path.exists(self.icon_path):
os.makedirs(self.icon_path)
def tearDown(self):
rm_local_tmp_dir(self.temp_dir)
super(TestIconForm, self).tearDown()
def get_icon_paths(self):
path = os.path.join(self.addon.get_icon_dir(), str(self.addon.id))
return ['%s-%s.png' % (path, size) for size in amo.ADDON_ICON_SIZES]
@patch('olympia.addons.models.Addon.get_icon_dir')
def testIconUpload(self, get_icon_dir):
# TODO(gkoberger): clarify this please.
# We no longer use AddonFormMedia to upload icons, so
# skipping until I can ask andym what the point of this
# test is. Additionally, it's called "TestIconRemoval",
# but it doesn't seem to remove icons.
return
get_icon_dir.return_value = self.temp_dir
for path in self.get_icon_paths():
assert not os.path.exists(path)
img = get_image_path('non-animated.png')
data = {'icon_upload': img, 'icon_type': 'text/png'}
self.request.FILES = {'icon_upload': open(img)}
form = forms.AddonFormMedia(data=data, request=self.request,
instance=self.addon)
assert form.is_valid()
form.save(self.addon)
for path in self.get_icon_paths():
assert os.path.exists(path)
@patch('olympia.amo.models.ModelBase.update')
def test_icon_modified(self, update_mock):
name = 'transparent.png'
form = forms.AddonFormMedia({'icon_upload_hash': name},
request=self.request,
instance=self.addon)
dest = os.path.join(self.icon_path, name)
with storage.open(dest, 'w') as f:
shutil.copyfileobj(open(get_image_path(name)), f)
assert form.is_valid()
form.save(addon=self.addon)
assert update_mock.called
class TestCategoryForm(TestCase):
def test_no_possible_categories(self):
Category.objects.create(type=amo.ADDON_SEARCH,
application=amo.FIREFOX.id)
addon = addon_factory(type=amo.ADDON_SEARCH)
request = req_factory_factory('/')
form = forms.CategoryFormSet(addon=addon, request=request)
apps = [f.app for f in form.forms]
assert apps == [amo.FIREFOX]
class TestThemeForm(TestCase):
# Don't save image, we use a fake one.
@patch('olympia.addons.forms.save_theme')
def test_long_author_or_display_username(self, mock_save_theme):
# Bug 1181751.
user = UserProfile.objects.create(email='foo@bar.com',
username='a' * 255,
display_name='b' * 255)
request = RequestFactory()
request.user = user
cat = Category.objects.create(type=amo.ADDON_PERSONA)
form = forms.ThemeForm({
'name': 'my theme',
'slug': 'my-theme',
'category': cat.pk,
'header': 'some_file.png',
'agreed': True,
'header_hash': 'hash',
'license': 1}, request=request)
assert form.is_valid()
# Make sure there's no database issue, like too long data for the
# author or display_username fields.
form.save()
|
lavish205/olympia
|
src/olympia/addons/tests/test_forms.py
|
Python
|
bsd-3-clause
| 13,654
| 0
|
from collections import namedtuple
import select
StreamEvent = namedtuple( 'StreamEvent', [ 'fd', 'stream', 'data', 'direction', 'num_bytes', 'eof' ] )
class StreamWatcher(object):
def __init__( self ):
if _best_backend is None:
raise Exception( "No poll/queue backend could be found for your OS." )
self.backend = _best_backend( )
self.fd_map = {}
self.stream_map = {}
def watch( self, fd, data=None, read=True, write=False ):
# allow python file-like objects that have a backing fd
if hasattr(fd, 'fileno') and callable(fd.fileno):
stream = fd
fd = stream.fileno()
self.stream_map[fd] = stream
else:
self.stream_map[fd] = None
# associate user data with the fd
self.fd_map[fd] = data
# prepare any event filter additions
if read:
self.backend.watch_read( fd )
if write:
self.backend.watch_write( fd )
def wait( self, timeout=None, max_events=4 ):
return self.backend.wait(
timeout=timeout,
max_events=max_events,
fd_data_map=self.fd_map,
fd_stream_map=self.stream_map )
_best_backend = None
try:
from select import kqueue, kevent
except ImportError:
pass
else:
class KQueueBackend(object):
def __init__( self ):
self.kq = kqueue( )
def watch_read( self, fd ):
event = kevent( fd, filter=select.KQ_FILTER_READ, flags=select.KQ_EV_ADD )
self._add_events( [event] )
def watch_write( self, fd ):
event = kevent( fd, filter=select.KQ_FILTER_WRITE, flags=select.KQ_EV_ADD )
self._add_events( [event] )
def _add_events( self, new_events ):
e = self.kq.control( new_events, 0, 0 )
assert len(e) == 0, "Not expecting to receive any events while adding filters."
def wait( self, timeout=None, max_events=4, fd_data_map={}, fd_stream_map={} ):
r_events = self.kq.control( None, max_events, timeout )
e = []
for event in r_events:
fd = event.ident
if fd in fd_data_map:
stream = fd_stream_map.get( fd, None )
data = fd_data_map.get( fd, None )
direction = 'read' if event.filter == select.KQ_FILTER_READ else 'write'
num_bytes = event.data
eof = ( event.flags & select.KQ_EV_EOF != 0 )
e.append( StreamEvent( fd, stream, data, direction, num_bytes, eof ) )
return e
if _best_backend is None:
_best_backend = KQueueBackend
try:
from select import epoll
from fcntl import ioctl
import array
import termios
except ImportError:
pass
else:
class EPollBackend(object):
def __init__( self ):
self.ep = epoll( )
def watch_read( self, fd ):
self.ep.register( fd, select.EPOLLIN )
def watch_write( self, fd ):
self.ep.register( fd, select.EPOLLOUT )
def wait( self, timeout=None, max_events=None, fd_data_map={}, fd_stream_map={} ):
if max_events is None:
max_events = -1
if timeout is None:
timeout = -1
r_events = self.ep.poll( timeout, max_events )
e = []
for fd, event in r_events:
if fd in fd_data_map:
buf = array.array( 'i', [0] )
ioctl( fd, termios.FIONREAD, buf, 1 )
stream = fd_stream_map.get( fd, None )
data = fd_data_map.get( fd, None )
num_bytes = buf[0]
eof = ( event & (select.EPOLLHUP | select.EPOLLERR) != 0 )
if event & select.EPOLLIN != 0:
e.append( StreamEvent( fd, stream, data, 'read', num_bytes, eof ) )
if event & select.EPOLLOUT != 0:
e.append( StreamEvent( fd, stream, data, 'write', num_bytes, eof ) )
return e
if _best_backend is None:
_best_backend = EPollBackend
|
theojulienne/pyio
|
pyio/io/StreamWatcher.py
|
Python
|
mit
| 3,517
| 0.063122
|
from django.conf.urls import include, url
from django.urls import path
from login import views
from django.contrib.auth.views import PasswordResetCompleteView, PasswordResetConfirmView, PasswordResetDoneView
urlpatterns = [
path('password_reset/', views.PasswordResetView.as_view(
html_email_template_name="registration/password_reset_email.html",
email_template_name="registration/password_reset_email.txt",
template_name='registration/custom_password_reset_form.html'), name='password_reset'),
path('password_reset/done/', PasswordResetDoneView.as_view(template_name = 'registration/custom_password_reset_done.html'), name='password_reset_done'),
path('reset/<uidb64>/<token>/', PasswordResetConfirmView.as_view(template_name = 'registration/custom_password_reset_confirm.html'), name='password_reset_confirm'),
path('reset/done/', PasswordResetCompleteView.as_view(template_name='registration/custom_password_reset_complete.html'), name='password_reset_complete'),
url("^", include("django.contrib.auth.urls")),
url(r"^profile/(?P<user_id>[\d]+)$", views.ProfileView.as_view(), name="input"),
url(
r"^profile/password_change$",
views.OEPPasswordChangeView.as_view(),
name="input",
),
url(
r"^profile/(?P<user_id>[\d]+)/edit$", views.EditUserView.as_view(), name="input"
),
url(r"^groups/$", views.GroupManagement.as_view(), name="input"),
url(
r"^groups/new/$",
views.GroupCreate.as_view(),
name="input",
),
url(
r"^groups/(?P<group_id>[\w\d_\s]+)/edit$",
views.GroupCreate.as_view(),
name="input",
),
url(
r"^groups/(?P<group_id>[\w\d_\s]+)/$",
views.GroupView.as_view(),
),
url(
r"^groups/(?P<group_id>[\w\d_\s]+)/members$",
views.GroupEdit.as_view(),
name="input",
),
url(r"^groups/new/$", views.GroupCreate.as_view(), name="input"),
url(r"^register$", views.CreateUserView.as_view()),
url(r"^detach$", views.DetachView.as_view()),
url(r"^activate/(?P<token>[\w\d\-\s]+)$", views.activate),
url(r"^activate/$", views.ActivationNoteView.as_view(), name="activate"),
]
|
openego/oeplatform
|
login/urls.py
|
Python
|
agpl-3.0
| 2,215
| 0.004966
|
# Copyright 2020, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Libraries for interacting with a computation."""
|
tensorflow/federated
|
tensorflow_federated/python/core/impl/computation/__init__.py
|
Python
|
apache-2.0
| 651
| 0
|
# coding=utf-8
from abc import ABCMeta, abstractmethod
from typing import Optional
from weakref import ref
from logging import getLogger
from ultros.core.networks.base.connectors import base as base_connector
from ultros.core.networks.base.networks import base as base_network
__author__ = "Gareth Coles"
class BaseServer(metaclass=ABCMeta):
def __init__(self, name: str, network: "base_network.BaseNetwork"):
self.name = name
self._network = ref(network)
self.logger = getLogger(self.name) # TODO: Logging
@property
def network(self) -> "base_network.BaseNetwork":
return self._network()
@abstractmethod
async def connector_connected(self, connector: "base_connector.BaseConnector"):
pass
@abstractmethod
async def connector_disconnected(self, connector: "base_connector.BaseConnector",
exc: Optional[Exception]):
pass
|
UltrosBot/Ultros3K
|
src/ultros/core/networks/base/servers/base.py
|
Python
|
artistic-2.0
| 942
| 0.002123
|
from emburse.resource import (
EmburseObject,
Account,
Allowance,
Card,
Category,
Company,
Department,
Label,
Location,
Member,
SharedLink,
Statement,
Transaction
)
class Client(EmburseObject):
"""
Emburse API Client
API enables for the creation of expense cards at scale for custom business solutions as well as for
third-party app integrations. Cards can be created with set spending limits and assigned with just an email.
Some use cases include vendor payments, employee expense control, and fleet card management.
API Version:
v1
API Docs:
https://www.emburse.com/api/v1/docs#getting-started
Authors:
Marc Ford <marc.ford@gmail.com>
"""
@property
def Account(self):
"""
Emburse Account Object,
configured with the auth token from the client
:return: A configured emburse.resource.Account
:rtype: Account
"""
return Account(auth_token=self.auth_token)
@property
def Allowance(self):
"""
Emburse Allowance Object,
configured with the auth token from the client
:return: A configured emburse.resource.Allowance
:rtype: Allowance
"""
return Allowance(auth_token=self.auth_token)
@property
def Card(self):
"""
Emburse Card Object,
configured with the auth token from the client
:return: A configured emburse.resource.Card
:rtype: Card
"""
return Card(auth_token=self.auth_token)
@property
def Category(self):
"""
Emburse Category Object,
configured with the auth token from the client
:return: A configured emburse.resource.Category
:rtype: Category
"""
return Category(auth_token=self.auth_token)
@property
def Company(self):
"""
Emburse Company Object,
configured with the auth token from the client
:return: A configured emburse.resource.Company
:rtype: Company
"""
return Company(auth_token=self.auth_token)
@property
def Department(self):
"""
Emburse Department Object,
configured with the auth token from the client
:return: A configured emburse.resource.Department
:rtype: Department
"""
return Department(auth_token=self.auth_token)
@property
def Label(self):
"""
Emburse Label Object,
configured with the auth token from the client
:return: A configured emburse.resource.Label
:rtype: Label
"""
return Label(auth_token=self.auth_token)
@property
def Location(self):
"""
Emburse Location Object,
configured with the auth token from the client
:return: A configured emburse.resource.Location
:rtype: Location
"""
return Location(auth_token=self.auth_token)
@property
def Member(self):
"""
Emburse Member Object,
configured with the auth token from the client
:return: A configured emburse.resource.Member
:rtype: Member
"""
return Member(auth_token=self.auth_token)
@property
def SharedLink(self):
"""
Emburse SharedLink Object,
configured with the auth token from the client
:return: A configured emburse.resource.SharedLink
:rtype: SharedLink
"""
return SharedLink(auth_token=self.auth_token)
@property
def Statement(self):
"""
Emburse Statement Object,
configured with the auth token from the client
:return: A configured emburse.resource.Statement
:rtype: Statement
"""
return Statement(auth_token=self.auth_token)
@property
def Transaction(self):
"""
Emburse Transaction Object,
configured with the auth token from the client
:return: A configured emburse.resource.Transaction
:rtype: Transaction
"""
return Transaction(auth_token=self.auth_token)
|
MarcFord/Emburse-python
|
emburse/client.py
|
Python
|
gpl-3.0
| 4,177
| 0.002394
|
# -*- coding: utf-8 -*-
""" Authentication, Authorization, Accouting
@requires: U{B{I{gluon}} <http://web2py.com>}
@copyright: (c) 2010-2012 Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["AuthS3",
"S3Permission",
"S3Audit",
"S3RoleManager",
"FaceBookAccount",
"GooglePlusAccount",
]
import datetime
import re
import time
import urllib
from urllib import urlencode
import urllib2
from gluon import *
from gluon.storage import Storage, Messages
from gluon.dal import Field, Row, Query, Set, Table, Expression
from gluon.sqlhtml import CheckboxesWidget, StringWidget
from gluon.tools import Auth, callback, addrow
from gluon.utils import web2py_uuid
from gluon.validators import IS_SLUG
from gluon.contrib import simplejson as json
from gluon.contrib.simplejson.ordered_dict import OrderedDict
from gluon.contrib.login_methods.oauth20_account import OAuthAccount
from s3method import S3Method
from s3validators import IS_ACL
from s3widgets import S3ACLWidget, CheckboxesWidgetS3
from s3utils import s3_mark_required
from s3fields import s3_uid, s3_timestamp, s3_deletion_status
DEFAULT = lambda: None
table_field = re.compile("[\w_]+\.[\w_]+")
DEBUG = False
if DEBUG:
import sys
print >> sys.stderr, "S3AAA: DEBUG MODE"
def _debug(m):
print >> sys.stderr, m
else:
_debug = lambda m: None
# =============================================================================
class AuthS3(Auth):
"""
S3 extensions of the gluon.tools.Auth class
- override:
define_tables()
login()
register()
profile()
verify_email()
requires_membership()
- add:
s3_has_role()
s3_has_permission()
s3_logged_in()
s3_accessible_query()
s3_impersonate()
s3_register() callback
s3_link_to_person()
s3_verify_email_onaccept()
s3_group_members()
s3_user_to_person()
s3_person_to_user()
person_id()
- language
- utc_offset
- organisation
- @ToDo: Facility
"""
# Configuration of UIDs for system roles
S3_SYSTEM_ROLES = Storage(ADMIN = "ADMIN",
AUTHENTICATED = "AUTHENTICATED",
ANONYMOUS = "ANONYMOUS",
EDITOR = "EDITOR",
MAP_ADMIN = "MAP_ADMIN")
def __init__(self):
""" Initialise parent class & make any necessary modifications """
Auth.__init__(self, current.db)
deployment_settings = current.deployment_settings
system_name = deployment_settings.get_system_name()
self.settings.lock_keys = False
self.settings.username_field = False
self.settings.lock_keys = True
self.messages.lock_keys = False
self.messages.registration_pending_approval = "Account registered, however registration is still pending approval - please wait until confirmation received."
self.messages.email_approver_failed = "Failed to send mail to Approver - see if you can notify them manually!"
self.messages.email_verification_failed = "Unable to send verification email - either your email is invalid or our email server is down"
self.messages.email_sent = "Verification Email sent - please check your email to validate. If you do not receive this email please check you junk email or spam filters"
self.messages.email_verified = "Email verified - you can now login"
self.messages.welcome_email_subject = "Welcome to %(system_name)s" % \
dict(system_name=system_name)
self.messages.welcome_email = \
"Welcome to %(system_name)s - click on the link %(url)s to complete your profile" % \
dict(system_name = system_name,
url = deployment_settings.get_base_public_url() + URL("default", "user", args=["profile"]))
self.messages.duplicate_email = "This email address is already in use"
self.messages.registration_disabled = "Registration Disabled!"
self.messages.registration_verifying = "You haven't yet Verified your account - please check your email"
self.messages.label_organisation_id = "Organization"
self.messages.label_site_id = "Facility"
self.messages.label_utc_offset = "UTC Offset"
self.messages.label_image = "Profile Image"
self.messages.help_utc_offset = "The time difference between UTC and your timezone, specify as +HHMM for eastern or -HHMM for western timezones."
self.messages.help_mobile_phone = "Entering a phone number is optional, but doing so allows you to subscribe to receive SMS messages."
self.messages.help_organisation = "Entering an Organization is optional, but doing so directs you to the appropriate approver & means you automatically get the appropriate permissions."
self.messages.help_image = "You can either use %(gravatar)s or else upload a picture here. The picture will be resized to 50x50."
#self.messages.logged_in = "Signed In"
#self.messages.submit_button = "Signed In"
#self.messages.logged_out = "Signed Out"
self.messages.lock_keys = True
# S3Permission
self.permission = S3Permission(self)
# Set to True to override any authorization
self.override = False
# Site types (for OrgAuth)
T = current.T
if deployment_settings.get_ui_camp():
shelter = T("Camp")
else:
shelter = T("Shelter")
self.org_site_types = Storage(
cr_shelter = shelter,
#org_facility = T("Facility"),
org_facility = T("Site"),
org_office = T("Office"),
hms_hospital = T("Hospital"),
#project_site = T("Project Site"),
#fire_station = T("Fire Station"),
)
# -------------------------------------------------------------------------
def define_tables(self, migrate=True, fake_migrate=False):
"""
to be called unless tables are defined manually
usages::
# defines all needed tables and table files
# UUID + "_auth_user.table", ...
auth.define_tables()
# defines all needed tables and table files
# "myprefix_auth_user.table", ...
auth.define_tables(migrate="myprefix_")
# defines all needed tables without migration/table files
auth.define_tables(migrate=False)
"""
db = current.db
request = current.request
session = current.session
settings = self.settings
messages = self.messages
# User table
if not settings.table_user:
passfield = settings.password_field
if settings.username_field:
# with username (not used by default in Sahana)
settings.table_user = db.define_table(
settings.table_user_name,
Field("first_name", length=128, default="",
label=messages.label_first_name),
Field("last_name", length=128, default="",
label=messages.label_last_name),
Field("username", length=128, default="",
unique=True),
Field(passfield, "password", length=512,
readable=False, label=messages.label_password),
Field("email", length=512, default="",
label=messages.label_email),
Field("language", length=16),
Field("utc_offset", length=16,
readable=False, writable=False),
Field("organisation_id", "integer",
writable=False,
label=messages.label_organisation_id),
Field("site_id", "integer",
writable=False,
label=messages.label_site_id),
Field("registration_key", length=512,
writable=False, readable=False, default="",
label=messages.label_registration_key),
Field("reset_password_key", length=512,
writable=False, readable=False, default="",
label=messages.label_registration_key),
Field("deleted", "boolean", writable=False,
readable=False, default=False),
Field("timestmp", "datetime", writable=False,
readable=False, default=""),
migrate = migrate,
fake_migrate=fake_migrate,
*(s3_uid()+s3_timestamp()))
else:
# with email-address (Sahana default)
settings.table_user = db.define_table(
settings.table_user_name,
Field("first_name", length=128, default="",
label=messages.label_first_name),
Field("last_name", length=128, default="",
label=messages.label_last_name),
Field("email", length=512, default="",
label=messages.label_email,
unique=True),
Field(passfield, "password", length=512,
readable=False, label=messages.label_password),
Field("language", length=16),
Field("utc_offset", length=16,
readable=False,
writable=False,
label=messages.label_utc_offset),
Field("organisation_id", "integer",
writable=False,
label=messages.label_organisation_id),
Field("site_id", "integer",
writable=False,
label=messages.label_site_id),
Field("registration_key", length=512,
writable=False, readable=False, default="",
label=messages.label_registration_key),
Field("reset_password_key", length=512,
writable=False, readable=False, default="",
label=messages.label_registration_key),
Field("deleted", "boolean", writable=False,
readable=False, default=False),
Field("timestmp", "datetime", writable=False,
readable=False, default=""),
migrate = migrate,
fake_migrate=fake_migrate,
*(s3_uid()+s3_timestamp()))
table = settings.table_user
table.first_name.notnull = True
table.first_name.requires = \
IS_NOT_EMPTY(error_message=messages.is_empty)
if current.deployment_settings.get_L10n_mandatory_lastname():
table.last_name.notnull = True
table.last_name.requires = \
IS_NOT_EMPTY(error_message=messages.is_empty)
table.utc_offset.comment = A(SPAN("[Help]"),
_class="tooltip",
_title="%s|%s" % (messages.label_utc_offset,
messages.help_utc_offset))
try:
from s3validators import IS_UTC_OFFSET
table.utc_offset.requires = IS_EMPTY_OR(IS_UTC_OFFSET())
except:
pass
table[passfield].requires = [CRYPT(key=settings.hmac_key,
min_length=self.settings.password_min_length,
digest_alg="sha512")]
if settings.username_field:
table.username.requires = IS_NOT_IN_DB(db,
"%s.username" % settings.table_user._tablename)
table.email.requires = \
[IS_EMAIL(error_message=messages.invalid_email),
IS_LOWER(),
IS_NOT_IN_DB(db,
"%s.email" % settings.table_user._tablename,
error_message=messages.duplicate_email)]
table.registration_key.default = ""
# Group table (roles)
if not settings.table_group:
settings.table_group = db.define_table(
settings.table_group_name,
# Group unique ID, must be notnull+unique:
Field("uuid",
length=64,
notnull=True,
unique=True,
readable=False,
writable=False),
# Group does not appear in the Role Manager:
# (can neither assign, nor modify, nor delete)
Field("hidden", "boolean",
readable=False,
writable=False,
default=False),
# Group cannot be modified in the Role Manager:
# (can assign, but neither modify nor delete)
Field("system", "boolean",
readable=False,
writable=False,
default=False),
# Group cannot be deleted in the Role Manager:
# (can assign and modify, but not delete)
Field("protected", "boolean",
readable=False,
writable=False,
default=False),
# Role name:
Field("role",
length=512,
default="",
unique=True,
label=messages.label_role),
Field("description", "text",
label=messages.label_description),
migrate = migrate,
fake_migrate=fake_migrate,
*(s3_timestamp()+s3_deletion_status()))
table = settings.table_group
table.role.requires = IS_NOT_IN_DB(db, "%s.role"
% settings.table_group._tablename)
# Group membership table (user<->role)
if not settings.table_membership:
settings.table_membership = db.define_table(
settings.table_membership_name,
Field("user_id", settings.table_user,
label=messages.label_user_id),
Field("group_id", settings.table_group,
label=messages.label_group_id),
migrate = migrate,
fake_migrate=fake_migrate,
*(s3_uid()+s3_timestamp()+s3_deletion_status()))
table = settings.table_membership
table.user_id.requires = IS_IN_DB(db, "%s.id" %
settings.table_user._tablename,
"%(id)s: %(first_name)s %(last_name)s")
table.group_id.requires = IS_IN_DB(db, "%s.id" %
settings.table_group._tablename,
"%(id)s: %(role)s")
security_policy = current.deployment_settings.get_security_policy()
# Define Eden permission table
self.permission.define_table(migrate=migrate,
fake_migrate=fake_migrate)
if security_policy not in (1, 2, 3, 4, 5, 6) and \
not settings.table_permission:
# Permissions table (group<->permission)
# NB This Web2Py table is deprecated / replaced in Eden by S3Permission
settings.table_permission = db.define_table(
settings.table_permission_name,
Field("group_id", settings.table_group,
label=messages.label_group_id),
Field("name", default="default", length=512,
label=messages.label_name),
Field("table_name", length=512,
label=messages.label_table_name),
Field("record_id", "integer",
label=messages.label_record_id),
migrate = migrate,
fake_migrate=fake_migrate)
table = settings.table_permission
table.group_id.requires = IS_IN_DB(db, "%s.id" %
settings.table_group._tablename,
"%(id)s: %(role)s")
table.name.requires = IS_NOT_EMPTY()
table.table_name.requires = IS_IN_SET(db.tables)
table.record_id.requires = IS_INT_IN_RANGE(0, 10 ** 9)
# Event table (auth log)
# Records Logins & ?
# @ToDo: Deprecate? At least make it configurable?
if not settings.table_event:
settings.table_event = db.define_table(
settings.table_event_name,
Field("time_stamp", "datetime",
default=request.now,
label=messages.label_time_stamp),
Field("client_ip",
default=request.client,
label=messages.label_client_ip),
Field("user_id", settings.table_user, default=None,
requires = IS_IN_DB(db, "%s.id" %
settings.table_user._tablename,
"%(id)s: %(first_name)s %(last_name)s"),
label=messages.label_user_id),
Field("origin", default="auth", length=512,
label=messages.label_origin,
requires = IS_NOT_EMPTY()),
Field("description", "text", default="",
label=messages.label_description,
requires = IS_NOT_EMPTY()),
migrate = migrate,
fake_migrate=fake_migrate,
*(s3_uid()+s3_timestamp()+s3_deletion_status()))
# -------------------------------------------------------------------------
def login_bare(self, username, password):
"""
Logs user in
- extended to understand session.s3.roles
"""
request = current.request
session = current.session
db = current.db
table_user = self.settings.table_user
table_membership = self.settings.table_membership
if self.settings.login_userfield:
userfield = self.settings.login_userfield
elif "username" in table_user.fields:
userfield = "username"
else:
userfield = "email"
passfield = self.settings.password_field
user = db(table_user[userfield] == username).select().first()
password = table_user[passfield].validate(password)[0]
if user:
user_id = user.id
if not user.registration_key and user[passfield] == password:
user = Storage(table_user._filter_fields(user, id=True))
session.auth = Storage(user=user,
last_visit=request.now,
expiration=self.settings.expiration)
self.user = user
self.set_roles()
return user
return False
# -------------------------------------------------------------------------
def set_roles(self):
"""
Update session roles and pe_id for the current user
"""
if self.user:
db = current.db
session = current.session
table_user = self.settings.table_user
table_membership = self.settings.table_membership
user_id = self.user.id
# Add the Roles to session.s3
roles = []
query = (table_membership.deleted != True) & \
(table_membership.user_id == user_id)
rows = db(query).select(table_membership.group_id)
session.s3.roles = [s.group_id for s in rows]
# Set pe_id for current user
ltable = current.s3db.pr_person_user
if ltable is not None:
query = (ltable.user_id == user_id)
row = db(query).select(ltable.pe_id, limitby=(0, 1)).first()
if row:
session.auth.user["pe_id"] = row.pe_id
return
# -------------------------------------------------------------------------
def set_cookie(self):
"""
Set a Cookie to the client browser so that we know this user has
registered & so we should present them with a login form instead
of a register form
"""
response = current.response
response.cookies["registered"] = "yes"
response.cookies["registered"]["expires"] = 365 * 24 * 3600 # 1 year
response.cookies["registered"]["path"] = "/"
# -------------------------------------------------------------------------
def login(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT):
"""
Overrides Web2Py's login() to use custom flash styles & utcnow
@returns: a login form
"""
db = current.db
table_user = self.settings.table_user
if self.settings.login_userfield:
username = self.settings.login_userfield
elif "username" in table_user.fields:
username = "username"
else:
username = "email"
old_requires = table_user[username].requires
table_user[username].requires = [IS_NOT_EMPTY(), IS_LOWER()]
request = current.request
response = current.response
session = current.session
passfield = self.settings.password_field
try:
table_user[passfield].requires[-1].min_length = 0
except:
pass
if next is DEFAULT:
next = request.vars._next or self.settings.login_next
if onvalidation is DEFAULT:
onvalidation = self.settings.login_onvalidation
if onaccept is DEFAULT:
onaccept = self.settings.login_onaccept
if log is DEFAULT:
log = self.messages.login_log
user = None # default
# Do we use our own login form, or from a central source?
if self.settings.login_form == self:
form = SQLFORM(
table_user,
fields=[username, passfield],
hidden=dict(_next=request.vars._next),
showid=self.settings.showid,
submit_button=self.messages.submit_button,
delete_label=self.messages.delete_label,
formstyle=self.settings.formstyle,
separator=self.settings.label_separator
)
if self.settings.remember_me_form:
# Add a new input checkbox "remember me for longer"
addrow(form,XML(" "),
DIV(XML(" "),
INPUT(_type='checkbox',
_class='checkbox',
_id="auth_user_remember",
_name="remember",
),
XML(" "),
LABEL(
self.messages.label_remember_me,
_for="auth_user_remember",
)),"",
self.settings.formstyle,
'auth_user_remember__row')
captcha = self.settings.login_captcha or \
(self.settings.login_captcha!=False and self.settings.captcha)
if captcha:
addrow(form, captcha.label, captcha, captcha.comment,
self.settings.formstyle,'captcha__row')
accepted_form = False
if form.accepts(request.vars, session,
formname="login", dbio=False,
onvalidation=onvalidation):
accepted_form = True
if username == "email":
# Check for Domains which can use Google's SMTP server for passwords
# @ToDo: an equivalent email_domains for other email providers
gmail_domains = current.deployment_settings.get_auth_gmail_domains()
if gmail_domains:
from gluon.contrib.login_methods.email_auth import email_auth
domain = form.vars[username].split("@")[1]
if domain in gmail_domains:
self.settings.login_methods.append(
email_auth("smtp.gmail.com:587", "@%s" % domain))
# Check for username in db
query = (table_user[username] == form.vars[username])
user = db(query).select().first()
if user:
# user in db, check if registration pending or disabled
temp_user = user
if temp_user.registration_key == "pending":
response.warning = self.messages.registration_pending
return form
elif temp_user.registration_key in ("disabled", "blocked"):
response.error = self.messages.login_disabled
return form
elif not temp_user.registration_key is None and \
temp_user.registration_key.strip():
response.warning = \
self.messages.registration_verifying
return form
# Try alternate logins 1st as these have the
# current version of the password
user = None
for login_method in self.settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if not self in self.settings.login_methods:
# do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(form.vars)
break
if not user:
# Alternates have failed, maybe because service inaccessible
if self.settings.login_methods[0] == self:
# Try logging in locally using cached credentials
if temp_user[passfield] == form.vars.get(passfield, ""):
# Success
user = temp_user
else:
# User not in db
if not self.settings.alternate_requires_registration:
# We're allowed to auto-register users from external systems
for login_method in self.settings.login_methods:
if login_method != self and \
login_method(request.vars[username],
request.vars[passfield]):
if not self in self.settings.login_methods:
# Do not store password in db
form.vars[passfield] = None
user = self.get_or_create_user(form.vars)
break
if not user:
self.log_event(self.settings.login_failed_log,
request.post_vars)
# Invalid login
session.error = self.messages.invalid_login
redirect(self.url(args=request.args,
vars=request.get_vars))
else:
# Use a central authentication server
cas = self.settings.login_form
cas_user = cas.get_user()
if cas_user:
cas_user[passfield] = None
user = self.get_or_create_user(table_user._filter_fields(cas_user))
form = Storage()
form.vars = user
self.s3_register(form)
elif hasattr(cas, "login_form"):
return cas.login_form()
else:
# we need to pass through login again before going on
next = "%s?_next=%s" % (URL(r=request), next)
redirect(cas.login_url(next))
# Process authenticated users
if user:
user = Storage(table_user._filter_fields(user, id=True))
# If the user hasn't set a personal UTC offset,
# then read the UTC offset from the form:
if not user.utc_offset:
user.utc_offset = session.s3.utc_offset
session.auth = Storage(
user=user,
last_visit=request.now,
expiration = request.vars.get("remember", False) and \
self.settings.long_expiration or self.settings.expiration,
remember = request.vars.has_key("remember"),
hmac_key = web2py_uuid()
)
self.user = user
self.set_roles()
# Read their language from the Profile
language = user.language
current.T.force(language)
session.s3.language = language
session.confirmation = self.messages.logged_in
# Set a Cookie to present user with login box by default
self.set_cookie()
# Update the timestamp of the User so we know when they last logged-in
db(table_user.id == self.user.id).update(timestmp = request.utcnow)
if log and self.user:
self.log_event(log % self.user)
# How to continue
if self.settings.login_form == self:
if accepted_form:
if onaccept:
onaccept(form)
if isinstance(next, (list, tuple)):
# fix issue with 2.6
next = next[0]
if next and not next[0] == "/" and next[:4] != "http":
next = self.url(next.replace("[id]", str(form.vars.id)))
redirect(next)
table_user[username].requires = old_requires
return form
else:
redirect(next)
# -------------------------------------------------------------------------
def register(self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT):
"""
Overrides Web2Py's register() to add new functionality:
- Checks whether registration is permitted
- Custom Flash styles
- Allow form to be embedded in other pages
- Optional addition of Mobile Phone field to the Register form
- Optional addition of Organisation field to the Register form
- Lookup Domains/Organisations to check for Whitelists
&/or custom Approver
@returns: a registration form
"""
db = current.db
settings = self.settings
messages = self.messages
request = current.request
response = current.response
session = current.session
deployment_settings = current.deployment_settings
# S3: Don't allow registration if disabled
self_registration = deployment_settings.get_security_self_registration()
if not self_registration:
session.error = messages.registration_disabled
redirect(URL(args=["login"]))
if self.is_logged_in() and request.function != "index":
redirect(settings.logged_url)
if next == DEFAULT:
next = request.vars._next or settings.register_next
if onvalidation == DEFAULT:
onvalidation = settings.register_onvalidation
if onaccept == DEFAULT:
onaccept = settings.register_onaccept
if log == DEFAULT:
log = messages.register_log
user = settings.table_user
passfield = settings.password_field
# S3: Organisation field in form?
if deployment_settings.get_auth_registration_requests_organisation():
# Widget set in controllers/default.py
#user.organisation_id.widget =
user.organisation_id.writable = True
if deployment_settings.get_auth_registration_organisation_mandatory():
user.organisation_id.comment = SPAN("*", _class="req")
else:
user.organisation_id.comment = DIV(_class="tooltip",
_title="%s|%s" % (messages.label_organisation_id,
messages.help_organisation))
else:
user.organisation_id.readable = False
user.organisation_id.writable = False
user.organisation_id.default = deployment_settings.get_auth_registration_organisation_id_default()
# @ToDo: Option to request Facility during Registration
user.site_id.readable = False
labels, required = s3_mark_required(user)
#formstyle = current.manager.s3.crud.formstyle
form = SQLFORM(user, hidden=dict(_next=request.vars._next),
labels = labels,
separator = "",
showid=settings.showid,
submit_button=messages.submit_button,
delete_label=messages.delete_label,
#formstyle = formstyle
)
for i, row in enumerate(form[0].components):
item = row[1][0]
if isinstance(item, INPUT) and item["_name"] == passfield:
field_id = "%s_password_two" % user._tablename
#row = formstyle(...)
form[0].insert(i + 1, TR(
TD(LABEL("%s:" % messages.verify_password,
_for="password_two",
_id=field_id + SQLFORM.ID_LABEL_SUFFIX),
_class="w2p_fl"),
INPUT(_name="password_two",
_id=field_id,
_type="password",
requires=IS_EXPR("value==%s" % \
repr(request.vars.get(passfield, None)),
error_message=messages.mismatched_password)),
SPAN("*", _class="req"),
"", _id=field_id + SQLFORM.ID_ROW_SUFFIX))
#form[0].insert(i + 1, row)
# add an opt in clause to receive emails depending on the deployment settings
if deployment_settings.get_auth_opt_in_to_email():
field_id = "%s_opt_in" % user._tablename
comment = DIV(DIV(_class="tooltip",
_title="%s|%s" % ("Mailing list",
"By selecting this you agree that we may contact you.")))
checked = deployment_settings.get_auth_opt_in_default() and "selected"
form[0].insert(-1,
TR(TD(LABEL("%s:" % "Receive updates",
_for="opt_in",
_id=field_id + SQLFORM.ID_LABEL_SUFFIX),
_class="w2p_fl"),
INPUT(_name="opt_in", _id=field_id, _type="checkbox", _checked=checked),
TD(comment,
_class="w2p_fc"),
_id=field_id + SQLFORM.ID_ROW_SUFFIX))
# S3: Insert Mobile phone field into form
if deployment_settings.get_auth_registration_requests_mobile_phone():
field_id = "%s_mobile" % user._tablename
if deployment_settings.get_auth_registration_mobile_phone_mandatory():
comment = SPAN("*", _class="req")
else:
comment = DIV(_class="tooltip",
_title="%s|%s" % (deployment_settings.get_ui_label_mobile_phone(),
messages.help_mobile_phone))
form[0].insert(-1,
TR(TD(LABEL("%s:" % deployment_settings.get_ui_label_mobile_phone(),
_for="mobile",
_id=field_id + SQLFORM.ID_LABEL_SUFFIX),
_class="w2p_fl"),
INPUT(_name="mobile", _id=field_id),
TD(comment,
_class="w2p_fc"),
_id=field_id + SQLFORM.ID_ROW_SUFFIX))
# S3: Insert Photo widget into form
if deployment_settings.get_auth_registration_requests_image():
label = self.messages.label_image
comment = DIV(_class="stickytip",
_title="%s|%s" % (label,
self.messages.help_image % \
dict(gravatar = A("Gravatar",
_target="top",
_href="http://gravatar.com"))))
field_id = "%s_image" % user._tablename
widget = SQLFORM.widgets["upload"].widget(current.s3db.pr_image.image, None)
form[0].insert(-1,
TR(TD(LABEL("%s:" % label,
_for="image",
_id=field_id + SQLFORM.ID_LABEL_SUFFIX),
_class="w2p_fl"),
widget,
TD(comment,
_class="w2p_fc"),
_id=field_id + SQLFORM.ID_ROW_SUFFIX))
if settings.captcha != None:
form[0].insert(-1, TR("", settings.captcha, ""))
import uuid
user.registration_key.default = key = str(uuid.uuid4())
if form.accepts(request.vars, session, formname="register",
onvalidation=onvalidation):
if settings.create_user_groups:
# Not used in S3
description = \
"group uniquely assigned to %(first_name)s %(last_name)s"\
% form.vars
group_id = self.add_group("user_%s" % form.vars.id,
description)
self.add_membership(group_id, form.vars.id)
approved = False
users = db(settings.table_user.id > 0).count()
if users == 1:
# 1st user to register shouldn't need verification/approval
approved = True
elif settings.registration_requires_verification:
# Ensure that we add to the correct Organization
approver, organisation_id = self.s3_approver(form.vars)
if organisation_id:
# @ToDo: Is it correct to override the organisation entered by the user?
# Ideally (if the deployment_settings.auth.registration_requests_organisation = True
# the org could be selected based on the email and the user could then override
form.vars.organisation = organisation_id
# Send the Verification email
if not settings.mailer or \
not settings.mailer.send(to=form.vars.email,
subject=messages.verify_email_subject,
message=messages.verify_email % dict(key=key)):
db.rollback()
response.error = messages.email_verification_failed
return form
# @ToDo: Deployment Setting?
#session.confirmation = messages.email_sent
next = URL(c="default", f="message",
args = ["verify_email_sent"],
vars = {"email": form.vars.email})
elif settings.registration_requires_approval:
# Identify the Approver &
# ensure that we add to the correct Organization
approver, organisation_id = self.s3_approver(form.vars)
if organisation_id:
form.vars.organisation_id = organisation_id
if approver:
# Send the Authorisation email
form.vars.approver = approver
if not settings.mailer or \
not settings.verify_email_onaccept(form.vars):
# We don't wish to prevent registration if the approver mail fails to send
#db.rollback()
session.error = messages.email_approver_failed
#return form
user[form.vars.id] = dict(registration_key="pending")
session.warning = messages.registration_pending_approval
else:
# The domain is Whitelisted
approved = True
else:
# No verification or approval needed
approved = True
approver, organisation_id = self.s3_approver(form.vars)
if organisation_id:
form.vars.organisation = organisation_id
form.vars.registration_key = ""
form.vars.approver = approver
settings.verify_email_onaccept(form.vars)
# Set a Cookie to present user with login box by default
self.set_cookie()
if approved:
user[form.vars.id] = dict(registration_key="")
session.confirmation = messages.registration_successful
table_user = settings.table_user
if "username" in table_user.fields:
username = "username"
else:
username = "email"
query = (table_user[username] == form.vars[username])
user = db(query).select(limitby=(0, 1)).first()
user = Storage(table_user._filter_fields(user, id=True))
if users == 1:
# Add the first user to admin group
admin_group_id = 1
self.add_membership(admin_group_id, user.id)
# If the user hasn't set a personal UTC offset,
# then read the UTC offset from the form:
if not user.utc_offset:
user.utc_offset = session.s3.utc_offset
session.auth = Storage(user=user, last_visit=request.now,
expiration=settings.expiration)
self.user = user
session.flash = messages.logged_in
if log:
self.log_event(log % form.vars)
if onaccept:
onaccept(form)
if not next:
next = self.url(args = request.args)
elif isinstance(next, (list, tuple)):
# fix issue with 2.6
next = next[0]
elif next and not next[0] == "/" and next[:4] != "http":
next = self.url(next.replace("[id]", str(form.vars.id)))
redirect(next)
return form
# -------------------------------------------------------------------------
def profile(
self,
next=DEFAULT,
onvalidation=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT,
):
"""
returns a form that lets the user change his/her profile
.. method:: Auth.profile([next=DEFAULT [, onvalidation=DEFAULT
[, onaccept=DEFAULT [, log=DEFAULT]]]])
Patched for S3 to use s3_mark_required
"""
table_user = self.settings.table_user
if not self.is_logged_in():
redirect(self.settings.login_url)
passfield = self.settings.password_field
self.settings.table_user[passfield].writable = False
request = current.request
session = current.session
if next == DEFAULT:
next = request.get_vars._next \
or request.post_vars._next \
or self.settings.profile_next
if onvalidation == DEFAULT:
onvalidation = self.settings.profile_onvalidation
if onaccept == DEFAULT:
onaccept = self.settings.profile_onaccept
if log == DEFAULT:
log = self.messages.profile_log
labels, required = s3_mark_required(table_user)
form = SQLFORM(
table_user,
self.user.id,
fields = self.settings.profile_fields,
labels = labels,
hidden = dict(_next=next),
showid = self.settings.showid,
submit_button = self.messages.profile_save_button,
delete_label = self.messages.delete_label,
upload = self.settings.download_url,
formstyle = self.settings.formstyle,
separator=""
)
if form.accepts(request, session,
formname='profile',
onvalidation=onvalidation,hideerror=self.settings.hideerror):
self.user.update(table_user._filter_fields(form.vars))
session.flash = self.messages.profile_updated
if log:
self.log_event(log % self.user)
callback(onaccept,form)
if not next:
next = self.url(args=request.args)
elif isinstance(next, (list, tuple)): ### fix issue with 2.6
next = next[0]
elif next and not next[0] == '/' and next[:4] != 'http':
next = self.url(next.replace('[id]', str(form.vars.id)))
redirect(next)
return form
# -------------------------------------------------------------------------
def s3_lookup_org_role(self, organisation_id):
"""
Lookup the Organisation Access Role from the ID of the Organisation
"""
if not organisation_id:
return None
db = current.db
s3db = current.s3db
table = s3db.org_organisation
query = (table.id == organisation_id)
org = db(query).select(table.owned_by_organisation).first()
if org:
return org.owned_by_organisation
return None
# -------------------------------------------------------------------------
def s3_impersonate(self, user_id):
"""
S3 framework function
Designed to be used within tasks, which are run in a separate request
& hence don't have access to current.auth
@param user_id: auth.user.id
"""
session = current.session
db = current.db
if not user_id:
# Anonymous
return None
table_user = self.settings.table_user
user = db(table_user.id == user_id).select(limitby=(0, 1)).first()
if not user:
# Invalid user ID
return False
roles = []
table_membership = self.settings.table_membership
memberships = db(table_membership.user_id == user.id).select(
table_membership.group_id)
roles = [m.group_id for m in memberships]
if session.s3.system_roles.ANONYMOUS:
roles.append(session.s3.system_roles.ANONYMOUS)
session.s3.roles = roles
# Set the language from the Profile
language = user.language
current.T.force(language)
current.session.s3.language = language
user = Storage(table_user._filter_fields(user, id=True))
# Use this user
self.user = user
return user
# -------------------------------------------------------------------------
def s3_register(self, form):
"""
S3 framework function
Designed to be used as an onaccept callback for register()
Whenever someone registers, it:
- adds them to the 'Authenticated' role
- adds their name to the Person Registry
- creates their profile picture
- creates an HRM record
- adds them to the Org_x Access role
"""
db = current.db
manager = current.manager
s3db = current.s3db
vars = form.vars
user_id = vars.id
if not user_id:
return None
# Add to 'Authenticated' role
authenticated = self.id_group("Authenticated")
self.add_membership(authenticated, user_id)
# Link to organisation, lookup org role
organisation_id = self.s3_link_to_organisation(vars)
if organisation_id:
owned_by_organisation = self.s3_lookup_org_role(organisation_id)
else:
owned_by_organisation = None
# Add to Person Registry and Email/Mobile to pr_contact
person_id = self.s3_link_to_person(vars, # user
owned_by_organisation)
if "image" in vars:
if hasattr(vars.image, "file"):
source_file = vars.image.file
original_filename = vars.image.filename
ptable = s3db.pr_person
query = (ptable.id == person_id)
pe_id = db(query).select(ptable.pe_id,
limitby=(0, 1)).first()
if pe_id:
pe_id = pe_id.pe_id
itable = s3db.pr_image
field = itable.image
newfilename = field.store(source_file, original_filename, field.uploadfolder)
url = URL(c="default", f="download", args=newfilename)
fields = dict(pe_id=pe_id,
profile=True,
image=newfilename,
url = url,
title=current.T("Profile Picture"))
if isinstance(field.uploadfield, str):
fields[field.uploadfield] = source_file.read()
itable.insert(**fields)
htable = s3db.table("hrm_human_resource")
if htable and organisation_id:
# Create an HRM entry, if one doesn't already exist
query = (htable.person_id == person_id) & \
(htable.organisation_id == organisation_id)
row = db(query).select(htable.id, limitby=(0, 1)).first()
if not row:
if current.deployment_settings.get_hrm_show_staff():
type = 1 # Staff
else:
type = 2 # Volunteer
id = htable.insert(person_id=person_id,
organisation_id=organisation_id,
type=type,
owned_by_user=user_id,
owned_by_organisation=owned_by_organisation)
record = Storage(id=id)
manager.model.update_super(htable, record)
if owned_by_organisation:
# Add user to the Org Access Role
table = self.settings.table_membership
query = (table.deleted != True) & \
(table.user_id == user_id) & \
(table.group_id == owned_by_organisation)
if not db(query).select(table.id,
limitby=(0, 1)).first():
table.insert(user_id=user_id,
group_id=owned_by_organisation)
# Return person_id for init scripts
return person_id
# -------------------------------------------------------------------------
def s3_link_to_organisation(self, user):
"""
Link a user account to an organisation
@param user: the user account record (= form.vars in s3_register)
"""
db = current.db
s3db = current.s3db
manager = current.manager
organisation_id = user.organisation_id
if not organisation_id:
otable = s3db.org_organisation
name = user.get("organisation_name", None)
acronym = user.get("organisation_acronym", None)
if name:
# Create new organisation
organisation_id = otable.insert(name=name,
acronym=acronym)
# Update the super-entities
record = Storage(id=organisation_id)
manager.model.update_super(otable, record)
# Set record ownership
self.s3_set_record_owner(otable, organisation_id)
user.organisation_id = organisation_id
# Update user record
query = (utable.id == user_id)
db(query).update(organisation_id=organisation_id)
if not organisation_id:
return None
# Create link (if it doesn't exist)
user_id = user.id
ltable = s3db.org_organisation_user
if ltable:
query = (ltable.user_id == user_id) & \
(ltable.organisation_id == organisation_id)
row = db(query).select(ltable.id, limitby=(0, 1)).first()
if not row:
ltable.insert(user_id=user_id,
organisation_id=organisation_id)
return organisation_id
# -------------------------------------------------------------------------
def s3_link_to_person(self,
user=None,
owned_by_organisation=None):
"""
Links user accounts to person registry entries
@param user: the user record
@param owned_by_organisation: the role of the owner organisation
Policy for linking to pre-existing person records:
If a person record with exactly the same first name and
last name exists, which has a contact information record
with exactly the same email address as used in the user
account, and is not linked to another user account, then
this person record will be linked to this user account.
Otherwise, a new person record is created, and a new email
contact record with the email address from the user record
is registered for that person.
"""
db = current.db
s3db = current.s3db
utable = self.settings.table_user
ptable = s3db.pr_person
ctable = s3db.pr_contact
atable = s3db.pr_address
etable = s3db.pr_pentity
ttable = s3db.sit_trackable
gtable = s3db.gis_config
ltable = s3db.pr_person_user
left = [ltable.on(ltable.user_id == utable.id),
ptable.on(ptable.pe_id == ltable.pe_id)]
if user is not None:
if not isinstance(user, (list, tuple)):
user = [user]
user_ids = [u.id for u in user]
query = (utable.id.belongs(user_ids))
else:
query = (utable.id != None)
users = db(query).select(utable.id,
utable.first_name,
utable.last_name,
utable.email,
ltable.pe_id,
ptable.id,
left=left, distinct=True)
utn = utable._tablename
person_ids = [] # Collect the person IDs
for u in users:
person = u.pr_person
if person.id is not None:
person_ids.append(person.id)
continue
user = u[utn]
owner = Storage(owned_by_user=user.id,
owned_by_organisation=owned_by_organisation)
if "email" in user:
# Try to find a matching person record
first_name = user.first_name
last_name = user.last_name
email = user.email.lower()
query = (ptable.first_name == first_name) & \
(ptable.last_name == last_name) & \
(ctable.pe_id == ptable.pe_id) & \
(ctable.contact_method == "EMAIL") & \
(ctable.value.lower() == email)
person = db(query).select(ptable.id,
ptable.pe_id,
limitby=(0, 1)).first()
if person and \
not db(ltable.pe_id == person.pe_id).count():
# Match found, and it isn't linked to another user account
# Insert a link
ltable.insert(user_id=user.id, pe_id=person.pe_id)
# Assign ownership of the Person record
person.update_record(**owner)
# Assign ownership of the Contact record(s)
query = (ctable.pe_id == person.pe_id)
db(query).update(**owner)
# Assign ownership of the Address record(s)
query = (atable.pe_id == person.pe_id)
db(query).update(**owner)
# Assign ownership of the Config record(s)
query = (gtable.pe_id == person.pe_id)
db(query).update(**owner)
# HR records
self.s3_register_staff(user.id, person.id)
# Set pe_id if this is the current user
if self.user and self.user.id == user.id:
self.user.pe_id = person.pe_id
person_ids.append(person.id)
continue
# Create a PE
pe_id = etable.insert(instance_type="pr_person",
deleted=False)
# Create a TE
track_id = ttable.insert(instance_type="pr_person",
deleted=False)
if pe_id:
# Create a new person record
if current.request.vars.get("opt_in", None):
opt_in = current.deployment_settings.get_auth_opt_in_team_list()
else:
opt_in = ""
new_id = ptable.insert(pe_id = pe_id,
track_id = track_id,
first_name = first_name,
last_name = last_name,
opt_in = opt_in,
modified_by = user.id,
**owner)
if new_id:
# Insert a link
ltable.insert(user_id=user.id, pe_id=pe_id)
# Register the new person UUID in the PE and TE
person_uuid = ptable[new_id].uuid
db(etable.id == pe_id).update(uuid=person_uuid)
db(ttable.id == track_id).update(uuid=person_uuid)
# Add the email to pr_contact
ctable.insert(pe_id = pe_id,
contact_method = "EMAIL",
priority = 1,
value = email,
**owner)
# Add the mobile to pr_contact
mobile = current.request.vars.get("mobile", None)
if mobile:
ctable.insert(
pe_id = pe_id,
contact_method = "SMS",
priority = 2,
value = mobile,
**owner)
person_ids.append(new_id)
# Add the user to each team if they have chosen to opt-in
g_table = s3db["pr_group"]
gm_table = s3db["pr_group_membership"]
for team in opt_in:
query = (g_table.name == team)
team_rec = db(query).select(g_table.id, limitby=(0, 1)).first()
# if the team doesn't exist then add it
if team_rec == None:
team_id = g_table.insert(name = team, group_type = 5)
else:
team_id = team_rec.id
gm_table.insert(group_id = team_id,
person_id = new_id)
# Set pe_id if this is the current user
if self.user and self.user.id == user.id:
self.user.pe_id = pe_id
if len(person_ids) == 1:
return person_ids[0]
else:
return person_ids
# -------------------------------------------------------------------------
def s3_approver(self, user):
"""
Returns the Approver for a new Registration &
the organisation_id field
@param: user - the user record (form.vars when done direct)
"""
db = current.db
s3db = current.s3db
deployment_settings = current.deployment_settings
# Default Approver
approver = deployment_settings.get_mail_approver()
organisation_id = None
# Check for Domain: Whitelist or specific Approver
table = s3db.auth_organisation
address, domain = user.email.split("@", 1)
query = (table.domain == domain)
record = db(query).select(table.organisation_id,
table.approver,
limitby=(0, 1)).first()
if record:
organisation_id = record.organisation_id
approver = record.approver
elif deployment_settings.get_auth_registration_requests_organisation():
# Check for an Organization-specific Approver
organisation_id = user.get("organisation_id",
None)
if organisation_id:
query = (table.organisation_id == organisation_id)
record = db(query).select(table.approver,
limitby=(0, 1)).first()
if record and record.approver:
approver = record.approver
return approver, organisation_id
# -------------------------------------------------------------------------
def verify_email(self,
next=DEFAULT,
onaccept=DEFAULT,
log=DEFAULT):
"""
action user to verify the registration email, XXXXXXXXXXXXXXXX
.. method:: Auth.verify_email([next=DEFAULT [, onvalidation=DEFAULT
[, onaccept=DEFAULT [, log=DEFAULT]]]])
"""
db = current.db
settings = self.settings
messages = self.messages
deployment_settings = current.deployment_settings
key = current.request.args[-1]
table_user = settings.table_user
user = db(table_user.registration_key == key).select().first()
if not user:
redirect(settings.verify_email_next)
# S3: Lookup the Approver
approver, organisation_id = self.s3_approver(user)
if settings.registration_requires_approval and approver:
user.update_record(registration_key = "pending")
current.session.flash = messages.registration_pending_approval
else:
user.update_record(registration_key = "")
current.session.flash = messages.email_verified
if log == DEFAULT:
log = messages.verify_email_log
if next == DEFAULT:
next = settings.verify_email_next
if onaccept == DEFAULT:
onaccept = settings.verify_email_onaccept
if log:
self.log_event(log % user)
if approver:
user.approver = approver
callback(onaccept, user)
redirect(next)
# -------------------------------------------------------------------------
def s3_verify_email_onaccept(self, form):
""""
Sends a message to the approver to notify them if a user needs approval
If deployment_settings.auth.always_notify_approver = True,
send them notification regardless
"""
if form.registration_key == "": # User Approved
if not current.deployment_settings.get_auth_always_notify_approver():
return
subject = current.T("%(system_name)s - New User Registered") % \
{"system_name": current.deployment_settings.get_system_name()}
message = self.messages.new_user % dict(first_name = form.first_name,
last_name = form.last_name,
email = form.email)
else:
subject = current.T("%(system_name)s - New User Registration Approval Pending") % \
{"system_name": current.deployment_settings.get_system_name()}
message = self.messages.approve_user % \
dict(first_name=form.first_name,
last_name=form.last_name,
email=form.email)
result = self.settings.mailer.send(to=form.approver,
subject=subject,
message=message)
return result
# -------------------------------------------------------------------------
def s3_register_staff(self, user_id, person_id):
"""
Take ownership of the HR records of the person record,
and add user to the Org Access role.
To be called by s3_link_to_person in case a newly registered
user record gets linked to a prior existing person record.
@param user_id: the user record ID
@param person_id: the person record ID
"""
db = current.db
s3db = current.s3db
manager = current.manager
htable = s3db.table("hrm_human_resource")
if htable is None:
# HR module disabled: skip
return
rtable = self.settings.table_group
mtable = self.settings.table_membership
utable = self.settings.table_user
# User owns their own HRM records
query = (htable.person_id == person_id)
db(query).update(owned_by_user=user_id)
query &= ((htable.status == 1) &
(htable.deleted != True))
rows = db(query).select(htable.owned_by_organisation)
org_roles = []
for row in rows:
org_role = row.owned_by_organisation
if org_role and org_role not in org_roles:
query = (mtable.deleted != True) & \
(mtable.user_id == user_id) & \
(mtable.group_id == org_role)
if not db(query).select(limitby=(0, 1)).first():
org_roles.append(dict(user_id=user_id,
group_id=org_role))
if org_roles:
mtable.bulk_insert(org_roles)
# -------------------------------------------------------------------------
def s3_logged_in(self):
"""
Check whether the user is currently logged-in
- tries Basic if not
"""
if self.override:
return True
session = current.session
if not self.is_logged_in():
basic = self.basic()
try:
return basic[2]
except TypeError:
# old web2py
return basic
except:
return False
return True
# -------------------------------------------------------------------------
# Role Management
# -------------------------------------------------------------------------
def get_system_roles(self):
"""
Get the IDs of the session roles by their UIDs, and store them
into the current session. To be run once per session, as these
IDs should never change.
Caution: do NOT cache the result, otherwise a newly installed
system would be completely open during the caching period!
"""
session = current.session
try:
if session.s3.system_roles:
return session.s3.system_roles
except:
pass
db = current.db
rtable = self.settings.table_group
if rtable is not None:
system_roles = self.S3_SYSTEM_ROLES
query = (rtable.deleted != True) & \
rtable.uuid.belongs(system_roles.values())
rows = db(query).select(rtable.id, rtable.uuid)
sr = Storage([(role.uuid, role.id) for role in rows])
else:
sr = Storage([(uid, None) for uid in self.S3_SYSTEM_ROLES])
session.s3.system_roles = sr
return sr
# -------------------------------------------------------------------------
def s3_create_role(self, role, description=None, *acls, **args):
"""
Back-end method to create roles with ACLs
@param role: display name for the role
@param description: description of the role (optional)
@param acls: list of initial ACLs to assign to this role
@param args: keyword arguments (see below)
@keyword name: a unique name for the role
@keyword hidden: hide this role completely from the RoleManager
@keyword system: role can be assigned, but neither modified nor
deleted in the RoleManager
@keyword protected: role can be assigned and edited, but not
deleted in the RoleManager
"""
table = self.settings.table_group
hidden = args.get("hidden", False)
system = args.get("system", False)
protected = args.get("protected", False)
uid = args.get("uid", None)
if uid:
query = (table.uuid == uid)
record = current.db(query).select(limitby=(0, 1)).first()
else:
record = None
import uuid
uid = uuid.uuid4()
if record:
role_id = record.id
record.update_record(deleted=False,
role=role,
description=description,
hidden=hidden,
system=system,
protected=protected)
else:
role_id = table.insert(uuid=uid,
role=role,
description=description,
hidden=hidden,
system=system,
protected=protected)
if role_id:
for acl in acls:
self.s3_update_acl(role_id, **acl)
return role_id
# -------------------------------------------------------------------------
def s3_delete_role(self, role_id):
"""
Remove a role from the system.
@param role_id: the ID or UID of the role
@note: protected roles cannot be deleted with this function,
need to reset the protected-flag first to override
"""
db = current.db
table = self.settings.table_group
if isinstance(role_id, str) and not role_id.isdigit():
gquery = (table.uuid == role_id)
else:
role_id = int(role_id)
gquery = (table.id == role_id)
role = db(gquery).select(limitby=(0, 1)).first()
if role and not role.protected:
# Remove all memberships for this role
mtable = self.settings.table_membership
mquery = (mtable.group_id == role.id)
db(mquery).update(deleted=True)
# Remove all ACLs for this role
ptable = self.permission.table
pquery = (ptable.group_id == role.id)
db(pquery).update(deleted=True)
# Remove the role
db(gquery).update(role=None, deleted=True)
# -------------------------------------------------------------------------
def resolve_role_ids(self, roles):
"""
Resolve role UIDs
@param roles: list of role IDs or UIDs (or mixed)
"""
db = current.db
if not isinstance(roles, (list, tuple)):
roles = [roles]
role_ids = []
role_uids = []
for role_id in roles:
if isinstance(role_id, str) and not role_id.isdigit():
role_uids.append(role_id)
else:
_id = int(role_id)
if _id not in role_ids:
role_ids.append(_id)
if role_uids:
rtable = self.settings.table_group
query = (rtable.deleted != True) & \
(rtable.uuid.belongs(role_uids))
rows = db(query).select(rtable.id)
role_ids += [r.id for r in rows if r.id not in role_ids]
return role_ids
# -------------------------------------------------------------------------
def s3_assign_role(self, user_id, role_id):
"""
Assigns a role to a user
@param user_id: the record ID of the user account
@param role_id: the record ID(s)/UID(s) of the role
@note: strings or lists of strings are assumed to be
role UIDs
"""
db = current.db
rtable = self.settings.table_group
mtable = self.settings.table_membership
query = (rtable.deleted != True)
if isinstance(role_id, (list, tuple)):
if isinstance(role_id[0], str):
query &= (rtable.uuid.belongs(role_id))
else:
roles = role_id
elif isinstance(role_id, str):
query &= (rtable.uuid == role_id)
else:
roles = [role_id]
if query is not None:
roles = db(query).select(rtable.id)
roles = [r.id for r in roles]
query = (mtable.deleted != True) & \
(mtable.user_id == user_id) & \
(mtable.group_id.belongs(roles))
assigned = db(query).select(mtable.group_id)
assigned_roles = [r.group_id for r in assigned]
for role in roles:
if role not in assigned_roles:
mtable.insert(user_id=user_id, group_id=role)
# -------------------------------------------------------------------------
def s3_retract_role(self, user_id, role_id):
"""
Removes a role assignment from a user account
@param user_id: the record ID of the user account
@param role_id: the record ID(s)/UID(s) of the role
@note: strings or lists of strings are assumed to be
role UIDs
"""
if not role_id:
return
db = current.db
rtable = self.settings.table_group
mtable = self.settings.table_membership
query = (rtable.deleted != True)
if isinstance(role_id, (list, tuple)):
if isinstance(role_id[0], str):
query &= (rtable.uuid.belongs(role_id))
else:
roles = role_id
elif isinstance(role_id, str):
query &= (rtable.uuid == role_id)
else:
roles = [role_id]
if query is not None:
roles = db(query).select(rtable.id)
roles = [r.id for r in roles]
query = (mtable.deleted != True) & \
(mtable.user_id == user_id) & \
(mtable.group_id.belongs(roles))
db(query).update(deleted=True)
# -------------------------------------------------------------------------
def s3_has_role(self, role):
"""
Check whether the currently logged-in user has a role
@param role: the record ID or UID of the role
"""
if self.override:
return True
db = current.db
session = current.session
if not session.s3:
return False
# Trigger HTTP basic auth
self.s3_logged_in()
roles = session.s3.roles
if not roles:
return False
system_roles = session.s3.system_roles
if system_roles and system_roles.ADMIN in roles:
# Administrators have all roles
return True
if isinstance(role, str):
if role.isdigit():
role = int(role)
else:
rtable = self.settings.table_group
query = (rtable.deleted != True) & \
(rtable.uuid == role)
row = db(query).select(rtable.id, limitby=(0, 1)).first()
if row:
role = row.id
else:
return False
return role in session.s3.roles
# -------------------------------------------------------------------------
# ACL management
# -------------------------------------------------------------------------
def s3_update_acls(self, role, *acls):
"""
Wrapper for s3_update_acl to allow batch updating
"""
for acl in acls:
self.s3_update_acl(role, **acl)
# -------------------------------------------------------------------------
def s3_update_acl(self, role,
c=None, f=None, t=None, oacl=None, uacl=None,
organisation=None):
"""
Back-end method to update an ACL
"""
ALL = "all"
all_organisations = organisation == ALL
if all_organisations:
organisation = None
table = self.permission.table
if not table:
# ACLs not relevant to this security policy
return None
if c is None and f is None and t is None:
return None
if t is not None:
c = f = None
if uacl is None:
uacl = self.permission.NONE
if oacl is None:
oacl = uacl
if role:
query = ((table.group_id == role) & \
(table.controller == c) & \
(table.function == f) & \
(table.tablename == t))
record = current.db(query).select(table.id, limitby=(0, 1)).first()
acl = dict(deleted=False,
group_id=role,
controller=c,
function=f,
tablename=t,
oacl=oacl,
uacl=uacl,
all_organisations=all_organisations,
organisation=organisation)
if record:
success = record.update_record(**acl)
else:
success = table.insert(**acl)
return success
# -------------------------------------------------------------------------
# Utilities
# -------------------------------------------------------------------------
def s3_group_members(self, group_id):
"""
Get a list of members of a group
@param group_id: the group record ID
@returns: a list of the user_ids for members of a group
"""
membership = self.settings.table_membership
query = (membership.deleted != True) & \
(membership.group_id == group_id)
members = current.db(query).select(membership.user_id)
return [member.user_id for member in members]
# -------------------------------------------------------------------------
def s3_user_pe_id(self, user_id):
"""
Get the person pe_id for a user ID
@param user_id: the user ID
"""
db = current.db
s3db = current.s3db
ltable = s3db.pr_person_user
query = (ltable.user_id == user_id)
row = db(query).select(ltable.pe_id, limitby=(0, 1)).first()
if row:
return row.pe_id
return None
# -------------------------------------------------------------------------
def s3_logged_in_person(self):
"""
Get the person record ID for the current logged-in user
"""
db = current.db
s3db = current.s3db
ptable = s3db.pr_person
if self.s3_logged_in():
try:
query = (ptable.pe_id == self.user.pe_id)
except AttributeError:
# Prepop
pass
else:
record = db(query).select(ptable.id,
limitby=(0, 1)).first()
if record:
return record.id
return None
# -------------------------------------------------------------------------
def s3_logged_in_human_resource(self):
"""
Get the person record ID for the current logged-in user
"""
db = current.db
s3db = current.s3db
ptable = s3db.pr_person
htable = s3db.hrm_human_resource
if self.s3_logged_in():
try:
query = (htable.person_id == ptable.id) & \
(ptable.pe_id == self.user.pe_id)
except AttributeError:
# Prepop
pass
else:
record = db(query).select(htable.id,
orderby =~htable.modified_on,
limitby=(0, 1)).first()
if record:
return record.id
return None
# -------------------------------------------------------------------------
def s3_has_permission(self, method, table, record_id = 0):
"""
S3 framework function to define whether a user can access a record
in manner "method". Designed to be called from the RESTlike
controller.
@param table: the table or tablename
"""
if self.override:
return True
db = current.db
session = current.session
if not hasattr(table, "_tablename"):
s3db = current.s3db
table = s3db[table]
if session.s3.security_policy == 1:
# Simple policy
# Anonymous users can Read.
if method == "read":
authorised = True
else:
# Authentication required for Create/Update/Delete.
authorised = self.s3_logged_in()
elif session.s3.security_policy == 2:
# Editor policy
# Anonymous users can Read.
if method == "read":
authorised = True
elif method == "create":
# Authentication required for Create.
authorised = self.s3_logged_in()
elif record_id == 0 and method == "update":
# Authenticated users can update at least some records
authorised = self.s3_logged_in()
else:
# Editor role required for Update/Delete.
authorised = self.s3_has_role("Editor")
if not authorised and self.user and "owned_by_user" in table:
# Creator of Record is allowed to Edit
query = (table.id == record_id)
record = db(query).select(table.owned_by_user,
limitby=(0, 1)).first()
if record and self.user.id == record.owned_by_user:
authorised = True
elif session.s3.security_policy == 3:
# Controller ACLs
self.permission.use_cacls = True
self.permission.use_facls = False
self.permission.use_tacls = False
authorised = self.permission.has_permission(table,
record=record_id,
method=method)
elif session.s3.security_policy == 4:
# Controller+Function ACLs
self.permission.use_cacls = True
self.permission.use_facls = True
self.permission.use_tacls = False
authorised = self.permission.has_permission(table,
record=record_id,
method=method)
elif session.s3.security_policy >= 5:
# Controller+Function+Table ACLs
self.permission.use_cacls = True
self.permission.use_facls = True
self.permission.use_tacls = True
authorised = self.permission.has_permission(table,
record=record_id,
method=method)
else:
# Full policy
if self.s3_logged_in():
# Administrators are always authorised
if self.s3_has_role(1):
authorised = True
else:
# Require records in auth_permission to specify access
# (default Web2Py-style)
authorised = self.has_permission(method, table, record_id)
else:
# No access for anonymous
authorised = False
return authorised
# -------------------------------------------------------------------------
def s3_accessible_query(self, method, table):
"""
Returns a query with all accessible records for the currently
logged-in user
@note: This method does not work on GAE because it uses JOIN and IN
"""
if self.override:
return table.id > 0
db = current.db
session = current.session
T = current.T
policy = session.s3.security_policy
if policy == 1:
# "simple" security policy: show all records
return table.id > 0
elif policy == 2:
# "editor" security policy: show all records
return table.id > 0
elif policy in (3, 4, 5, 6):
# ACLs: use S3Permission method
query = self.permission.accessible_query(table, method)
return query
# "Full" security policy
if self.s3_has_role(1):
# Administrators can see all data
return table.id > 0
# If there is access to the entire table then show all records
try:
user_id = self.user.id
except:
user_id = 0
if self.has_permission(method, table, 0, user_id):
return table.id > 0
# Filter Records to show only those to which the user has access
session.warning = T("Only showing accessible records!")
membership = self.settings.table_membership
permission = self.settings.table_permission
return table.id.belongs(db(membership.user_id == user_id)\
(membership.group_id == permission.group_id)\
(permission.name == method)\
(permission.table_name == table)\
._select(permission.record_id))
# -------------------------------------------------------------------------
def s3_has_membership(self, group_id=None, user_id=None, role=None):
"""
Checks if user is member of group_id or role
Extends Web2Py's requires_membership() to add new functionality:
- Custom Flash style
- Uses s3_has_role()
"""
if self.override:
return True
group_id = group_id or self.id_group(role)
try:
group_id = int(group_id)
except:
group_id = self.id_group(group_id) # interpret group_id as a role
if self.s3_has_role(group_id):
r = True
else:
r = False
log = self.messages.has_membership_log
if log:
if not user_id and self.user:
user_id = self.user.id
self.log_event(log % dict(user_id=user_id,
group_id=group_id, check=r))
return r
# Override original method
has_membership = s3_has_membership
# -------------------------------------------------------------------------
def s3_requires_membership(self, role):
"""
Decorator that prevents access to action if not logged in or
if user logged in is not a member of group_id. If role is
provided instead of group_id then the group_id is calculated.
Extends Web2Py's requires_membership() to add new functionality:
- Custom Flash style
- Uses s3_has_role()
- Administrators (id=1) are deemed to have all roles
"""
def decorator(action):
def f(*a, **b):
if self.override:
return action(*a, **b)
if not self.s3_logged_in():
request = current.request
next = URL(args=request.args, vars=request.get_vars)
import urllib
redirect("%s?_next=%s" % (self.settings.login_url,
urllib.quote(next)))
if not self.s3_has_role(role) and not self.s3_has_role(1):
current.session.error = self.messages.access_denied
next = self.settings.on_failed_authorization
redirect(next)
return action(*a, **b)
f.__doc__ = action.__doc__
return f
return decorator
# Override original method
requires_membership = s3_requires_membership
# -------------------------------------------------------------------------
def s3_make_session_owner(self, table, record_id):
"""
Makes the current session owner of a record
@param table: the table or table name
@param record_id: the record ID
"""
if hasattr(table, "_tablename"):
table = table._tablename
if not self.user:
session = current.session
if "owned_records" not in session:
session.owned_records = Storage()
records = session.owned_records.get(table, [])
record_id = str(record_id)
if record_id not in records:
records.append(record_id)
session.owned_records[table] = records
# -------------------------------------------------------------------------
def s3_session_owns(self, table, record_id):
"""
Checks whether the current session owns a record
@param table: the table or table name
@param record_id: the record ID
"""
if hasattr(table, "_tablename"):
table = table._tablename
if not self.user:
try:
records = current.session.owned_records.get(table, [])
except:
records = []
if str(record_id) in records:
return True
return False
# -------------------------------------------------------------------------
def s3_set_record_owner(self, table, record):
"""
Set the owner organisation for a record
@param table: the table or table name
@param record: the record (as row) or record ID
"""
db = current.db
s3db = current.s3db
manager = current.manager
site_types = self.org_site_types
OWNED_BY_ORG = "owned_by_organisation"
ORG_ID = "organisation_id"
ORG_PREFIX = "Org_%s"
ORG_TABLENAME = "org_organisation"
NAME = "name"
org_table = s3db[ORG_TABLENAME]
grp_table = self.settings.table_group
# Get the table
if isinstance(table, str):
table = s3db[table]
tablename = table._tablename
_id = table._id.name
# Which fields are available?
fields = [table._id.name,
NAME,
ORG_ID,
OWNED_BY_ORG]
fields = [table[f] for f in fields if f in table.fields]
# Get the record
if not isinstance(record, Row):
record_id = record
record = db(table._id == record_id).select(limitby=(0, 1),
*fields).first()
else:
if table._id.name in record:
record_id = record[table._id.name]
else:
record_id = None
missing = [f for f in fields if f not in record]
if missing:
if record_id:
query = table._id == record_id
record = db(query).select(limitby=(0, 1),
*fields).first()
else:
record = None
if not record:
# Raise an exception here?
return
# Get the organisation ID
org_role = None
if tablename == ORG_TABLENAME:
organisation_id = record[_id]
if OWNED_BY_ORG in record:
org_role = record[OWNED_BY_ORG]
if not org_role:
# Create a new org_role
uuid = ORG_PREFIX % organisation_id
if NAME in table:
name = record[NAME]
else:
name = uuid
role = Storage(uuid=uuid,
deleted=False,
hidden=False,
system=True,
protected=True,
role="%s (Organisation)" % name,
description="All Staff of Organization %s" % name)
query = (grp_table.uuid == role.uuid) | \
(grp_table.role == role.role)
record = db(query).select(grp_table.id,
limitby=(0, 1)).first()
if not record:
org_role = grp_table.insert(**role)
else:
record.update_record(**role)
org_role = record.id
elif ORG_ID in table:
organisation_id = record[ORG_ID]
# Get the org_role from the organisation
if organisation_id:
query = org_table.id == organisation_id
organisation = db(query).select(org_table[OWNED_BY_ORG],
limitby=(0, 1)).first()
if organisation:
org_role = organisation[OWNED_BY_ORG]
# Update the record as necessary
data = Storage()
if org_role and OWNED_BY_ORG in table:
data[OWNED_BY_ORG] = org_role
if data and hasattr(record, "update_record"):
record.update_record(**data)
elif data and record_id:
db(table._id == record_id).update(**data)
return
# -------------------------------------------------------------------------
def s3_send_welcome_email(self, user):
"""
Send a welcome mail to newly-registered users
- especially suitable for users from Facebook/Google who don't
verify their emails
"""
if "name" in user:
user["first_name"] = user["name"]
if "family_name" in user:
# Facebook
user["last_name"] = user["family_name"]
subject = self.messages.welcome_email_subject
message = self.messages.welcome_email
self.settings.mailer.send(user["email"], subject=subject, message=message)
# =============================================================================
class S3Permission(object):
"""
S3 Class to handle permissions
@author: Dominic König <dominic@aidiq.com>
"""
TABLENAME = "s3_permission"
CREATE = 0x0001
READ = 0x0002
UPDATE = 0x0004
DELETE = 0x0008
ALL = CREATE | READ | UPDATE | DELETE
NONE = 0x0000 # must be 0!
PERMISSION_OPTS = OrderedDict([
#(NONE, "NONE"),
#(READ, "READ"),
#(CREATE|UPDATE|DELETE, "WRITE"),
[CREATE, "CREATE"],
[READ, "READ"],
[UPDATE, "UPDATE"],
[DELETE, "DELETE"]])
# Method string <-> required permission
METHODS = Storage({
"create": CREATE,
"import": CREATE,
"read": READ,
"report": READ,
"search": READ,
"update": UPDATE,
"delete": DELETE})
# Policy helpers
most_permissive = lambda self, acl: \
reduce(lambda x, y: (x[0]|y[0], x[1]|y[1]),
acl, (self.NONE, self.NONE))
most_restrictive = lambda self, acl: \
reduce(lambda x, y: (x[0]&y[0], x[1]&y[1]),
acl, (self.ALL, self.ALL))
# -------------------------------------------------------------------------
def __init__(self, auth, tablename=None):
"""
Constructor, invoked by AuthS3.__init__
@param tablename: the name for the permissions table
"""
# Instantiated once per request, but before Auth tables
# are defined and authentication is checked, thus no use
# to check permissions in the constructor
# Auth
self.auth = auth
# Deployment settings
settings = current.deployment_settings
self.policy = settings.get_security_policy()
# Which level of granularity do we want?
self.use_cacls = self.policy in (3, 4, 5, 6) # Controller ACLs
self.use_facls = self.policy in (4, 5, 6) # Function ACLs
self.use_tacls = self.policy in (5, 6) # Table ACLs
self.org_roles = self.policy == 6 # OrgAuth
self.modules = settings.modules
# If a large number of roles in the system turnes into a bottleneck
# in policy 6, then we could reduce the number of roles in
# subsequent queries; this would though add another query (or even two
# more queries) to the request, so the hypothetic performance gain
# should first be confirmed by tests:
#if self.policy == 6:
#gtable = auth.settings.table_group
#org_roles = current.db(gtable.uid.like("Org_%")).select(gtable.id)
#self.org_roles = [r.id for r in org_roles]
#else:
#self.org_roles = []
# Permissions table
self.tablename = tablename or self.TABLENAME
self.table = current.db.get(self.tablename, None)
# Error messages
T = current.T
self.INSUFFICIENT_PRIVILEGES = T("Insufficient Privileges")
self.AUTHENTICATION_REQUIRED = T("Authentication Required")
# Request information
request = current.request
self.controller = request.controller
self.function = request.function
# Request format
self.format = request.extension
if "format" in request.get_vars:
ext = request.get_vars.format
if isinstance(ext, list):
ext = ext[-1]
self.format = ext.lower() or self.format
else:
ext = [a for a in request.args if "." in a]
if ext:
self.format = ext[-1].rsplit(".", 1)[1].lower()
if request.function == "ticket" and \
request.controller == "admin":
# Error tickets need an override
self.format = "html"
# Page permission cache
self.page_acls = Storage()
self.table_acls = Storage()
# Pages which never require permission:
# Make sure that any data access via these pages uses
# accessible_query explicitly!
self.unrestricted_pages = ("default/index",
"default/user",
"default/contact",
"default/about")
# Default landing pages
_next = URL(args=request.args, vars=request.vars)
self.homepage = URL(c="default", f="index")
self.loginpage = URL(c="default", f="user", args="login",
vars=dict(_next=_next))
# -------------------------------------------------------------------------
def define_table(self, migrate=True, fake_migrate=False):
"""
Define permissions table, invoked by AuthS3.define_tables()
"""
db = current.db
table_group = self.auth.settings.table_group
if table_group is None:
table_group = "integer" # fallback (doesn't work with requires)
if not self.table:
self.table = db.define_table(self.tablename,
Field("group_id", table_group),
Field("controller", length=64),
Field("function", length=512),
Field("tablename", length=512),
Field("oacl", "integer", default=self.ALL),
Field("uacl", "integer", default=self.READ),
# Only apply to records owned by this
# organisation role (policy 6 only):
Field("all_organisations", "boolean",
default=False),
Field("organisation",
table_group,
requires = IS_NULL_OR(IS_IN_DB(
db, table_group.id))),
migrate=migrate,
fake_migrate=fake_migrate,
*(s3_uid()+s3_timestamp()+s3_deletion_status()))
# -------------------------------------------------------------------------
def __call__(self,
c=None,
f=None,
table=None,
record=None):
"""
Get the ACL for the current user for a path
@param c: the controller name (falls back request.controller)
@param f: the function name (falls back to request.function)
@param table: the table
@param record: the record ID (or the Row if already loaded)
@note: if passing a Row, it must contain all available ownership
fields (id, owned_by_user, owned_by_group), otherwise the
record will be re-loaded by this function
"""
_debug("auth.permission(c=%s, f=%s, table=%s, record=%s)" %
(c, f, table, record))
t = self.table # Permissions table
auth = self.auth
sr = auth.get_system_roles()
if record == 0:
record = None
# Get user roles, check logged_in to trigger HTTPBasicAuth
if not auth.s3_logged_in():
roles = [sr.ANONYMOUS]
else:
roles = [sr.AUTHENTICATED]
if current.session.s3 is not None:
roles = current.session.s3.roles or roles
if not self.use_cacls:
# Fall back to simple authorization
_debug("Simple authorization")
if auth.s3_logged_in():
_debug("acl=%04x" % self.ALL)
return self.ALL
else:
_debug("acl=%04x" % self.READ)
return self.READ
if sr.ADMIN in roles:
_debug("Administrator, acl=%04x" % self.ALL)
return self.ALL
# Fall back to current request
c = c or self.controller
f = f or self.function
# Do we need to check the owner role (i.e. table+record given)?
is_owner = False
require_org = None
if table is not None and record is not None:
owner_role, owner_user, owner_org = \
self.get_owners(table, record)
is_owner = self.is_owner(table, None,
owner_role=owner_role,
owner_user=owner_user,
owner_org=owner_org)
if self.policy == 6:
require_org = owner_org
# Get the applicable ACLs
page_acl = self.page_acl(c=c, f=f,
require_org=require_org)
if table is None or not self.use_tacls:
acl = page_acl
else:
if sr.EDITOR in roles:
table_acl = (self.ALL, self.ALL)
else:
table_acl = self.table_acl(table=table,
c=c,
default=page_acl,
require_org=require_org)
acl = self.most_restrictive((page_acl, table_acl))
# Decide which ACL to use for this case
if acl[0] == self.NONE and acl[1] == self.NONE:
# No table access at all
acl = self.NONE
elif record is None:
# No record specified, return most permissive ACL
acl = (acl[0] & ~self.CREATE) | acl[1]
else:
# ACL based on ownership
acl = is_owner and (acl[0] | acl[1]) or acl[1]
_debug("acl=%04x" % acl)
return acl
# -------------------------------------------------------------------------
def page_acl(self, c=None, f=None, require_org=None):
"""
Get the ACL for a page
@param c: the controller (falls back to current request)
@param f: the function (falls back to current request)
@returns: tuple of (ACL for owned resources, ACL for all resources)
"""
session = current.session
policy = self.policy
t = self.table
sr = self.auth.get_system_roles()
most_permissive = self.most_permissive
roles = []
if session.s3 is not None:
roles = session.s3.roles or []
if sr.ADMIN in roles:
# Admin always has rights
return (self.ALL, self.ALL)
c = c or self.controller
f = f or self.function
page = "%s/%s" % (c, f)
if page in self.unrestricted_pages:
page_acl = (self.ALL, self.ALL)
elif c not in self.modules or \
c in self.modules and not self.modules[c].restricted or \
not self.use_cacls:
# Controller is not restricted => simple authorization
if self.auth.s3_logged_in():
page_acl = (self.ALL, self.ALL)
else:
page_acl = (self.READ, self.READ)
else:
# Lookup cached result
page_acl = self.page_acls.get((page, require_org), None)
if page_acl is None:
page_acl = (self.NONE, self.NONE) # default
q = ((t.deleted != True) & \
(t.controller == c) & \
((t.function == f) | (t.function == None)))
if roles:
query = (t.group_id.belongs(roles)) & q
else:
query = (t.group_id == None) & q
# Additional restrictions in OrgAuth
if policy == 6 and require_org:
field = t.organisation
query &= ((t.all_organisations == True) | \
(field == require_org) | (field == None))
rows = current.db(query).select()
if rows:
# ACLs found, check for function-specific
controller_acl = []
function_acl = []
for row in rows:
if not row.function:
controller_acl += [(row.oacl, row.uacl)]
else:
function_acl += [(row.oacl, row.uacl)]
# Function-specific ACL overrides Controller ACL
if function_acl and self.use_facls:
page_acl = most_permissive(function_acl)
elif controller_acl:
page_acl = most_permissive(controller_acl)
# Remember this result
self.page_acls.update({(page, require_org): page_acl})
return page_acl
# -------------------------------------------------------------------------
def table_acl(self, table=None, c=None, default=None,
require_org=None):
"""
Get the ACL for a table
@param table: the table
@param c: the controller (falls back to current request)
@param default: ACL to apply if no specific table ACL is found
@returns: tuple of (ACL for owned resources, ACL for all resources)
"""
if table is None or not self.use_tacls:
return self.page_acl(c=c)
policy = self.policy
t = self.table
sr = self.auth.get_system_roles()
roles = []
if current.session.s3 is not None:
roles = current.session.s3.roles or []
if sr.ADMIN in roles:
# Admin always has rights
return (self.ALL, self.ALL)
c = c or self.controller
if default is None:
if self.auth.s3_logged_in():
default = (self.ALL, self.ALL)
else:
default = (self.READ, self.READ)
# Already loaded?
if hasattr(table, "_tablename"):
tablename = table._tablename
else:
tablename = table
table_acl = self.table_acls.get((tablename, require_org), None)
if table_acl is None:
q = ((t.deleted != True) & \
(t.tablename == tablename) &
((t.controller == c) | (t.controller == None)))
if roles:
query = (t.group_id.belongs(roles)) & q
else:
query = (t.group_id == None) & q
# Additional restrictions in OrgAuth
if policy == 6 and require_org:
field = t.organisation
query &= ((t.all_organisations == True) | \
(field == require_org) | (field == None))
rows = current.db(query).select()
table_acl = [(r.oacl, r.uacl) for r in rows]
if table_acl:
# ACL found, apply most permissive role
table_acl = self.most_permissive(table_acl)
else:
# No ACL found for any of the roles, fall back to default
table_acl = default
# Remember this result
self.table_acls.update({(tablename, require_org): table_acl})
return table_acl
# -------------------------------------------------------------------------
def get_owners(self, table, record):
"""
Get the organisation/group/user owning a record
@param table: the table
@param record: the record ID (or the Row, if already loaded)
"""
owner_org = None
owner_role = None
owner_user = None
record_id = None
# Check which ownership fields the table defines
ownership_fields = ("owned_by_user",
"owned_by_group",
"owned_by_organisation")
fields = [f for f in ownership_fields if f in table.fields]
if not fields:
# Ownership is not defined for this table
return (None, None, None)
if isinstance(record, Row):
# Check if all necessary fields are present
missing = [f for f in fields if f not in record]
if missing:
# Have to reload the record :(
if table._id.name in record:
record_id = record[table._id.name]
record = None
else:
# Record ID given
record_id = record
record = None
if not record and record_id:
# Get the record
fs = [table[f] for f in fields] + [table.id]
query = (table._id == record_id)
record = current.db(query).select(limitby=(0, 1), *fs).first()
if not record:
# Record does not exist
return (None, None, None)
if "owned_by_group" in record:
owner_role = record["owned_by_group"]
if "owned_by_user" in record:
owner_user = record["owned_by_user"]
if "owned_by_organisation" in record:
owner_org = record["owned_by_organisation"]
return (owner_role, owner_user, owner_org)
# -------------------------------------------------------------------------
def is_owner(self, table, record,
owner_role=None,
owner_user=None,
owner_org=None):
"""
Establish the ownership of a record
@param table: the table
@param record: the record ID (or the Row if already loaded)
@param owner_role: owner_role of the record (if already known)
@param owner_user: owner_user of the record (if already known)
@param owner_org: owner_org of the record (if already known)
@note: if passing a Row, it must contain all available ownership
fields (id, owned_by_user, owned_by_group), otherwise the
record will be re-loaded by this function
"""
user_id = None
roles = []
sr = self.auth.get_system_roles()
if self.auth.user is not None:
user_id = self.auth.user.id
if current.session.s3 is not None:
roles = current.session.s3.roles or []
if not user_id and not roles:
return False
elif sr.ADMIN in roles:
# Admin owns all records
return True
elif record:
owner_role, owner_user, owner_org = \
self.get_owners(table, record)
try:
record_id = record.id
except:
record_id = record
# Session ownership?
if not user_id:
if not owner_user and record_id and \
self.auth.s3_session_owns(table, record_id):
# Session owns record
return True
else:
return False
# Individual record ownership
if owner_user and owner_user == user_id:
return True
# OrgAuth?
if self.policy == 6 and owner_org:
# Must have the organisation's staff role
if owner_org not in roles:
return False
# Owner?
if not owner_role and not owner_user:
# All authenticated users own this record
return True
elif owner_role and owner_role in roles:
# user has owner role
return True
else:
return False
# -------------------------------------------------------------------------
def hidden_modules(self):
"""
List of modules to hide from the main menu
"""
sr = self.auth.get_system_roles()
hidden_modules = []
if self.use_cacls:
restricted_modules = [m for m in self.modules
if self.modules[m].restricted]
roles = []
if current.session.s3 is not None:
roles = current.session.s3.roles or []
if sr.ADMIN in roles or sr.EDITOR in roles:
return []
if not roles:
hidden_modules = restricted_modules
else:
t = self.table
query = (t.deleted != True) & \
(t.controller.belongs(restricted_modules)) & \
(t.tablename == None)
if roles:
query = query & (t.group_id.belongs(roles))
else:
query = query & (t.group_id == None)
rows = current.db(query).select()
acls = dict()
for acl in rows:
if acl.controller not in acls:
acls[acl.controller] = self.NONE
acls[acl.controller] |= acl.oacl | acl.uacl
hidden_modules = [m for m in restricted_modules
if m not in acls or not acls[m]]
return hidden_modules
# -------------------------------------------------------------------------
def accessible_url(self,
c=None,
f=None,
p=None,
t=None,
a=None,
args=[],
vars={},
anchor="",
extension=None,
env=None):
"""
Return a URL only if accessible by the user, otherwise False
@param c: the controller
@param f: the function
@param p: the permission (defaults to READ)
@param t: the tablename (defaults to <c>_<f>)
@param a: the application name
@param args: the URL arguments
@param vars: the URL variables
@param anchor: the anchor (#) of the URL
@param extension: the request format extension
@param env: the environment
"""
required = self.METHODS
if p in required:
permission = required[p]
else:
permission = self.READ
if not c:
c = self.controller
if not f:
f = self.function
if t is None:
tablename = "%s_%s" % (c, f)
else:
tablename = t
# Hide disabled modules
if self.modules and c not in self.modules:
return False
permitted = True
if not self.auth.override:
if self.use_cacls:
acl = self(c=c, f=f, table=tablename)
if acl & permission != permission:
permitted = False
else:
if permission != self.READ:
permitted = self.auth.s3_logged_in()
if permitted:
return URL(a=a,
c=c,
f=f,
args=args,
vars=vars,
anchor=anchor,
extension=extension,
env=env)
else:
return False
# -------------------------------------------------------------------------
def page_restricted(self, c=None, f=None):
"""
Checks whether a page is restricted (=whether ACLs
are to be applied)
@param c: controller
@param f: function
"""
page = "%s/%s" % (c, f)
if page in self.unrestricted_pages:
return False
elif c not in self.modules or \
c in self.modules and not self.modules[c].restricted:
return False
return True
# -------------------------------------------------------------------------
def applicable_acls(self, roles, racl, c=None, f=None, t=None):
"""
Get the available ACLs for the particular situation
@param roles: the roles of the current user
@param racl: the required ACL
@param c: controller
@param f: function
@param t: tablename
@returns: None for no ACLs to apply (access granted), [] for
no ACLs matching the required permissions (access
denied), or a list of ACLs to apply.
"""
db = current.db
table = self.table
if not self.use_cacls:
# We do not use ACLs at all
return None
c = c or self.controller
f = f or self.function
if self.page_restricted(c=c, f=f):
page_restricted = True
else:
page_restricted = False
# Get page ACLs
page_acls = None
if page_restricted:
# Base query
query = (table.deleted != True) & \
(table.function == None)
if f and self.use_facls:
query = (query | (table.function == f))
query &= (table.controller == c)
# Do not use delegated ACLs except for policy 6
if self.policy != 6:
query &= (table.organisation == None)
# Restrict to available roles
if roles:
query &= (table.group_id.belongs(roles))
else:
query &= (table.group_id == None)
page_acls = db(query).select(table.ALL)
if page_acls:
if f and self.use_facls:
facl = [acl for acl in page_acls if acl.function != None]
if facl:
page_acls = facl
page_acls = [acl for acl in page_acls
if (acl.uacl & racl == racl or
acl.oacl & racl == racl)]
else:
# Page is restricted, but no permitting ACL
# available for this set of roles => no access
return []
# Get table ACLs
table_acls = []
if t and self.use_tacls:
# Base query
query = ((table.deleted != True) & \
(table.controller == None) & \
(table.function == None) &
(table.tablename == t))
# Is the table restricted at all?
restricted = db(query).select(limitby=(0, 1)).first() is not None
# Do not use delegated ACLs except for policy 6
if self.policy != 6:
query &= (table.organisation == None)
# Restrict to available roles
if roles:
query = (table.group_id.belongs(roles)) & query
else:
query = (table.group_id == None) & query
table_acls = db(query).select(table.ALL)
if restricted and table_acls:
# if the table is restricted and there are ACLs
# available for this set of roles, then deny access
# if none of the ACLs gives the required permissions
_debug("acls: %s" % table_acls)
default = []
else:
# otherwise, if the table is unrestricted or there are
# no restricting ACLs for this set of roles, then grant
# access as per page_acls
default = page_acls
# Find matches
table_acls = [acl for acl in table_acls
if (acl.uacl & racl == racl or
acl.oacl & racl == racl)]
if table_acls:
# Found matching table ACLs, grant access
return table_acls
else:
# No matching table ACLs found
return default
# default:
return page_acls
# -------------------------------------------------------------------------
def accessible_query(self, table, *methods):
"""
Query for records which the user is permitted to access
with methods
Example::
query = auth.permission.accessible_query(table,
"read", "update")
- requests a query for records that can be both read and
updated.
@param table: the DB table
@param methods: list of methods for which permission is
required (AND), any combination of "create",
"read", "update", "delete"
"""
_debug("accessible_query(%s, %s)" % (table, methods))
session = current.session
policy = self.policy
required = self.METHODS
sr = self.auth.get_system_roles()
OWNED_BY_ORG = "owned_by_organisation"
OWNED_BY_USER = "owned_by_user"
OWNED_BY_GROUP = "owned_by_group"
ALL_ORGS = "all_organisations"
# Default queries
query = (table._id != None)
no_access = (table._id == None)
# Required ACL
racl = reduce(lambda a, b: a | b,
[required[m]
for m in methods if m in required],
self.NONE)
if not racl:
_debug("No permission specified, query=%s" % query)
return query
# User & Roles
user_id = None
if self.auth.user is not None:
user_id = self.auth.user.id
roles = []
if session.s3 is not None:
roles = session.s3.roles or []
if sr.ADMIN in roles or sr.EDITOR in roles:
_debug("Admin/Editor in Roles, query=%s" % query)
return query
# Org roles the user has
org_roles = []
all_orgs = False
if policy == 6:
org_roles = list(roles)
# Applicable ACLs
acls = self.applicable_acls(roles, racl, t=table)
permitted = False
ownership_required = True
if acls is None:
permitted = True
ownership_required = False
elif acls:
permitted = True
for acl in acls:
_debug("ACL: oacl=%04x uacl=%04x" % (acl.oacl, acl.uacl))
if acl.uacl & racl == racl:
ownership_required = False
_debug("uACL found - no ownership required")
if policy == 6:
org_role = acl.organisation
if acl[ALL_ORGS]:
all_orgs = True
elif org_role and org_role not in org_roles:
org_roles.append(org_role)
if not permitted:
_debug("No access")
return no_access
_debug("ownership_required=%s" % ownership_required)
# Query fragments
if OWNED_BY_ORG in table:
has_org_role = ((table[OWNED_BY_ORG] == None) | \
(table[OWNED_BY_ORG].belongs(org_roles)))
if OWNED_BY_USER in table:
user_owns_record = (table[OWNED_BY_USER] == user_id)
# OrgAuth
q = None
if policy == 6 and OWNED_BY_ORG in table and not all_orgs:
q = has_org_role
if user_id and OWNED_BY_USER in table:
q |= user_owns_record
if q is not None:
query = q
if ownership_required:
if not user_id:
query = (table._id == None)
if OWNED_BY_USER in table:
try:
records = session.owned_records.get(table._tablename,
None)
except:
pass
else:
if records:
query = (table._id.belongs(records))
else:
qowner = qrole = quser = None
if OWNED_BY_GROUP in table:
qrole = (table.owned_by_group.belongs(roles))
if OWNED_BY_USER in table and user_id:
quser = (table.owned_by_user == user_id)
if qrole is not None:
qowner = qrole
if quser is not None:
if qowner is not None:
qowner = (qowner | quser)
else:
qowner = quser
if qowner is not None:
if query is not None:
query = query & qowner
else:
query = qowner
# Fallback
if query is None:
query = (table._id > 0)
_debug("Access granted, query=%s" % query)
return query
# -------------------------------------------------------------------------
def ownership_required(self, table, *methods):
"""
Check if record ownership is required for a method
@param table: the table
@param methods: methods to check (OR)
@status: deprecated, using applicable_acls instead
"""
sr = self.auth.get_system_roles()
roles = []
if current.session.s3 is not None:
# No ownership required in policies without ACLs
if not self.use_cacls:
return False
roles = current.session.s3.roles or []
if sr.ADMIN in roles or sr.EDITOR in roles:
return False # Admins and Editors do not need to own a record
required = self.METHODS
racl = reduce(lambda a, b: a | b,
[required[m] for m in methods if m in required],
self.NONE)
if not racl:
return False
# Available ACLs
pacl = self.page_acl()
if not self.use_tacls:
acl = pacl
else:
tacl = self.table_acl(table)
acl = (tacl[0] & pacl[0], tacl[1] & pacl[1])
# Ownership required?
permitted = (acl[0] | acl[1]) & racl == racl
ownership_required = False
if not permitted:
pkey = table.fields[0]
query = (table[pkey] == None)
elif "owned_by_group" in table or "owned_by_user" in table:
ownership_required = permitted and acl[1] & racl != racl
return ownership_required
# -------------------------------------------------------------------------
def has_permission(self, table, record=None, method=None):
"""
Check permission to access a record
@param table: the table
@param record: the record or record ID (None for any record)
@param method: the method (or tuple/list of methods),
any of "create", "read", "update", "delete"
@note: when submitting a record, the record ID and the ownership
fields (="owned_by_user", "owned_by_group") must be contained
if available, otherwise the record will be re-loaded
"""
_debug("has_permission(%s, %s, method=%s)" %
(table, record, method))
required = self.METHODS
if not isinstance(method, (list, tuple)):
method = [method]
# Required ACL
racl = reduce(lambda a, b: a | b,
[required[m] for m in method if m in required], self.NONE)
# Available ACL
aacl = self(table=table, record=record)
permitted = racl & aacl == racl
_debug("permitted=%s" % permitted)
return permitted
# -------------------------------------------------------------------------
def permitted_facilities(self,
table=None,
error_msg=None,
redirect_on_error=True,
facility_type=None):
"""
If there are no facilities that the user has permission for,
prevents create & update of records in table & gives a
warning if the user tries to.
@param table: the table or table name
@param error_msg: error message
@param redirect_on_error: whether to redirect on error
@param facility_type: restrict to this particular type of
facilities (a tablename)
"""
db = current.db
s3db = current.s3db
T = current.T
ERROR = T("You do not have permission for any facility to perform this action.")
HINT = T("Create a new facility or ensure that you have permissions for an existing facility.")
if not error_msg:
error_msg = ERROR
site_ids = []
if facility_type is None:
site_types = self.auth.org_site_types
else:
if facility_type not in self.auth.org_site_types:
return
site_types = [s3db[facility_type]]
for site_type in site_types:
try:
ftable = s3db[site_type]
if not "site_id" in ftable.fields:
continue
query = self.auth.s3_accessible_query("update", ftable)
if "deleted" in ftable:
query &= (ftable.deleted != True)
rows = db(query).select(ftable.site_id)
site_ids += [row.site_id for row in rows]
except:
# Module disabled
pass
if site_ids:
return site_ids
args = current.request.args
if "update" in args or "create" in args:
if redirect_on_error:
# Trying to create or update
# If they do no have permission to any facilities
current.session.error = "%s %s" % (error_msg, HINT)
redirect(URL(c="default", f="index"))
elif table is not None:
if hasattr(table, "_tablename"):
tablename = table._tablename
else:
tablename = table
current.manager.configure(tablename, insertable = False)
return []
# -------------------------------------------------------------------------
def permitted_organisations(self,
table=None,
error_msg=None,
redirect_on_error=True):
"""
If there are no organisations that the user has update
permission for, prevents create & update of a record in
table & gives an warning if the user tries to.
@param table: the table or table name
@param error_msg: error message
@param redirect_on_error: whether to redirect on error
"""
db = current.db
s3db = current.s3db
manager = current.manager
T = current.T
ERROR = T("You do not have permission for any organization to perform this action.")
HINT = T("Create a new organization or ensure that you have permissions for an existing organization.")
if not error_msg:
error_msg = ERROR
org_table = s3db.org_organisation
query = self.auth.s3_accessible_query("update", org_table)
query &= (org_table.deleted == False)
rows = db(query).select(org_table.id)
if rows:
return [org.id for org in rows]
request = current.request
if "update" in request.args or "create" in request.args:
if redirect_on_error:
manager.session.error = error_msg + " " + HINT
redirect(URL(c="default", f="index"))
elif table is not None:
if hasattr(table, "_tablename"):
tablename = table._tablename
else:
tablename = table
manager.configure(tablename, insertable = False)
return []
# -------------------------------------------------------------------------
def fail(self):
"""
Action upon insufficient permissions
"""
if self.format == "html":
# HTML interactive request => flash message + redirect
if self.auth.s3_logged_in():
current.session.error = self.INSUFFICIENT_PRIVILEGES
redirect(self.homepage)
else:
current.session.error = self.AUTHENTICATION_REQUIRED
redirect(self.loginpage)
else:
# non-HTML request => raise proper HTTP error
if self.auth.s3_logged_in():
raise HTTP(403, body=self.INSUFFICIENT_PRIVILEGES)
else:
raise HTTP(401, body=self.AUTHENTICATION_REQUIRED)
# =============================================================================
class S3Audit(object):
"""
S3 Audit Trail Writer Class
@author: Dominic König <dominic@aidiq.com>
"""
def __init__(self,
tablename="s3_audit",
migrate=True,
fake_migrate=False):
"""
Constructor
@param tablename: the name of the audit table
@param migrate: migration setting
@note: this defines the audit table
"""
db = current.db
self.table = db.get(tablename, None)
if not self.table:
self.table = db.define_table(tablename,
Field("timestmp", "datetime"),
Field("person", "integer"),
Field("operation"),
Field("tablename"),
Field("record", "integer"),
Field("representation"),
Field("old_value", "text"),
Field("new_value", "text"),
migrate=migrate,
fake_migrate=fake_migrate)
session = current.session
self.auth = session.auth
if session.auth and session.auth.user:
self.user = session.auth.user.id
else:
self.user = None
self.diff = None
# -------------------------------------------------------------------------
def __call__(self, operation, prefix, name,
form=None,
record=None,
representation="unknown"):
"""
Audit
@param operation: Operation to log, one of
"create", "update", "read", "list" or "delete"
@param prefix: the module prefix of the resource
@param name: the name of the resource (without prefix)
@param form: the form
@param record: the record ID
@param representation: the representation format
"""
settings = current.session.s3
#print >>sys.stderr, "Audit %s: %s_%s record=%s representation=%s" % \
#(operation, prefix, name, record, representation)
now = datetime.datetime.utcnow()
db = current.db
table = self.table
tablename = "%s_%s" % (prefix, name)
if record:
if isinstance(record, Row):
record = record.get("id", None)
if not record:
return True
try:
record = int(record)
except ValueError:
record = None
elif form:
try:
record = form.vars["id"]
except:
try:
record = form["id"]
except:
record = None
if record:
try:
record = int(record)
except ValueError:
record = None
else:
record = None
if operation in ("list", "read"):
if settings.audit_read:
table.insert(timestmp = now,
person = self.user,
operation = operation,
tablename = tablename,
record = record,
representation = representation)
elif operation in ("create", "update"):
if settings.audit_write:
if form:
record = form.vars.id
new_value = ["%s:%s" % (var, str(form.vars[var]))
for var in form.vars]
else:
new_value = []
table.insert(timestmp = now,
person = self.user,
operation = operation,
tablename = tablename,
record = record,
representation = representation,
new_value = new_value)
self.diff = None
elif operation == "delete":
if settings.audit_write:
query = db[tablename].id == record
row = db(query).select(limitby=(0, 1)).first()
old_value = []
if row:
old_value = ["%s:%s" % (field, row[field])
for field in row]
table.insert(timestmp = now,
person = self.user,
operation = operation,
tablename = tablename,
record = record,
representation = representation,
old_value = old_value)
self.diff = None
return True
# =============================================================================
class S3RoleManager(S3Method):
"""
REST Method to manage ACLs (Role Manager UI for administrators)
@todo: does not handle org-wise role assignment or
delegation of permissions yet.
"""
# Controllers to hide from the permissions matrix
HIDE_CONTROLLER = ("admin", "default")
# Roles to hide from the permissions matrix
# @todo: deprecate
HIDE_ROLES = (1, 4)
# Undeletable roles
# @todo: deprecate
PROTECTED_ROLES = (1, 2, 3, 4, 5)
controllers = Storage()
# -------------------------------------------------------------------------
def apply_method(self, r, **attr):
"""
Apply role manager
"""
method = self.method
manager = current.manager
if method == "list":
output = self._list(r, **attr)
elif method in ("read", "create", "update"):
output = self._edit(r, **attr)
elif method == "delete":
output = self._delete(r, **attr)
elif method == "roles" and r.name == "user":
output = self._roles(r, **attr)
elif method == "users":
output = self._users(r, **attr)
else:
r.error(405, manager.ERROR.BAD_METHOD)
if r.http == "GET" and method not in ("create", "update", "delete"):
current.session.s3.cancel = r.url()
return output
# -------------------------------------------------------------------------
def _list(self, r, **attr):
"""
List roles/permissions
"""
output = dict()
request = self.request
response = current.response
resource = self.resource
manager = current.manager
auth = manager.auth
db = current.db
table = self.table
T = current.T
if r.id:
return self._edit(r, **attr)
# Show permission matrix?
show_matrix = request.get_vars.get("matrix", False) and True
if r.interactive:
# Title and subtitle
output.update(title = T("List of Roles"))
# System roles
query = ((table.deleted != True) & \
(table.system == True))
rows = db(query).select(table.id)
system_roles = [row.id for row in rows]
# Protected roles
query = ((table.deleted != True) & \
(table.protected == True))
rows = db(query).select(table.id)
protected_roles = [row.id for row in rows]
# Filter out hidden roles
resource.add_filter((~(table.id.belongs(self.HIDE_ROLES))) &
(table.hidden != True))
resource.load()
# Get active controllers
controllers = [c for c in self.controllers.keys()
if c not in self.HIDE_CONTROLLER]
# ACLs
acl_table = auth.permission.table
query = resource.get_query()
query = query & \
(acl_table.group_id == self.table.id) & \
(acl_table.deleted != True)
records = db(query).select(acl_table.ALL)
any = "ANY"
acls = Storage({any: Storage()})
for acl in records:
c = acl.controller
f = acl.function
if not f:
f = any
role_id = acl.group_id
if f not in acls:
acls[f] = Storage()
if c not in acls[f]:
acls[f][c] = Storage()
acls[f][c][str(role_id)] = Storage(oacl = acl.oacl,
uacl = acl.uacl)
for c in controllers:
if c not in acls[any]:
acls[any][c] = Storage()
if any not in acls[any][c]:
acls[any][c][any] = Storage(oacl = auth.permission.NONE,
uacl = auth.permission.NONE)
# Table header
columns = []
headers = [TH("ID"), TH(T("Role"))]
if show_matrix:
for c in controllers:
if c in acls[any]:
headers.append(TH(self.controllers[c].name_nice))
columns.append((c, any))
for f in acls:
if f != any and c in acls[f]:
headers.append(TH(self.controllers[c].name_nice,
BR(), f))
columns.append((c, f))
else:
headers += [TH(T("Description"))]
thead = THEAD(TR(headers))
# Table body
trows = []
i = 1
for role in resource:
role_id = role.id
role_name = role.role
role_desc = role.description
edit_btn = A(T("Edit"),
_href=URL(c="admin", f="role",
args=[role_id], vars=request.get_vars),
_class="action-btn")
users_btn = A(T("Users"),
_href=URL(c="admin", f="role",
args=[role_id, "users"]),
_class="action-btn")
if role.protected:
tdata = [TD(edit_btn,
XML(" "),
users_btn),
TD(role_name)]
else:
delete_btn = A(T("Delete"),
_href=URL(c="admin", f="role",
args=[role_id, "delete"],
vars=request.get_vars),
_class="delete-btn")
tdata = [TD(edit_btn,
XML(" "),
users_btn,
XML(" "),
delete_btn),
TD(role_name)]
if show_matrix:
# Display the permission matrix
for c, f in columns:
if f in acls and c in acls[f] and \
str(role_id) in acls[f][c]:
oacl = acls[f][c][str(role_id)].oacl
uacl = acls[f][c][str(role_id)].uacl
else:
oacl = acls[any][c][any].oacl
uacl = acls[any][c][any].oacl
oaclstr = ""
uaclstr = ""
options = auth.permission.PERMISSION_OPTS
NONE = auth.permission.NONE
for o in options:
if o == NONE and oacl == NONE:
oaclstr = "%s%s" % (oaclstr, options[o][0])
elif oacl and oacl & o:
oaclstr = "%s%s" % (oaclstr, options[o][0])
else:
oaclstr = "%s-" % oaclstr
if o == NONE and uacl == NONE:
uaclstr = "%s%s" % (uaclstr, options[o][0])
elif uacl and uacl & o:
uaclstr = "%s%s" % (uaclstr, options[o][0])
else:
uaclstr = "%s-" % uaclstr
values = "%s (%s)" % (uaclstr, oaclstr)
tdata += [TD(values, _nowrap="nowrap")]
else:
# Display role descriptions
tdata += [TD(role_desc)]
_class = i % 2 and "even" or "odd"
trows.append(TR(tdata, _class=_class))
tbody = TBODY(trows)
# Aggregate list
items = TABLE(thead, tbody, _id="list", _class="dataTable display")
output.update(items=items, sortby=[[1, "asc"]])
# Add-button
add_btn = A(T("Add Role"), _href=URL(c="admin", f="role",
args=["create"]),
_class="action-btn")
output.update(add_btn=add_btn)
response.view = "admin/role_list.html"
response.s3.actions = []
response.s3.no_sspag = True
elif r.representation == "xls":
# Not implemented yet
r.error(501, manager.ERROR.BAD_FORMAT)
else:
r.error(501, manager.ERROR.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def _edit(self, r, **attr):
"""
Create/update role
"""
output = dict()
request = self.request
session = current.session
manager = current.manager
db = current.db
T = current.T
crud_settings = manager.s3.crud
CACL = T("Application Permissions")
FACL = T("Function Permissions")
TACL = T("Table Permissions")
CANCEL = T("Cancel")
auth = manager.auth
model = manager.model
acl_table = auth.permission.table
if r.interactive:
# Get the current record (if any)
if r.record:
output.update(title=T("Edit Role"))
role_id = r.record.id
role_name = r.record.role
role_desc = r.record.description
else:
output.update(title=T("New Role"))
role_id = None
role_name = None
role_desc = None
# Form helpers ----------------------------------------------------
mandatory = lambda l: DIV(l, XML(" "),
SPAN("*", _class="req"))
acl_table.oacl.requires = IS_ACL(auth.permission.PERMISSION_OPTS)
acl_table.uacl.requires = IS_ACL(auth.permission.PERMISSION_OPTS)
acl_widget = lambda f, n, v: \
S3ACLWidget.widget(acl_table[f], v, _id=n, _name=n,
_class="acl-widget")
formstyle = crud_settings.formstyle
using_default = SPAN(T("using default"), _class="using-default")
delete_acl = lambda _id: _id is not None and \
A(T("Delete"),
_href = URL(c="admin", f="acl",
args=[_id, "delete"],
vars=dict(_next=r.url())),
_class = "delete-btn") or using_default
new_acl = SPAN(T("new ACL"), _class="new-acl")
# Role form -------------------------------------------------------
form_rows = formstyle("role_name",
mandatory("%s:" % T("Role Name")),
INPUT(value=role_name,
_name="role_name",
_type="text",
requires=IS_NOT_IN_DB(db,
"auth_group.role",
allowed_override=[role_name])),
"") + \
formstyle("role_desc",
"%s:" % T("Description"),
TEXTAREA(value=role_desc,
_name="role_desc",
_rows="4"),
"")
key_row = P(T("* Required Fields"), _class="red")
role_form = DIV(TABLE(form_rows), key_row, _id="role-form")
# Prepare ACL forms -----------------------------------------------
any = "ANY"
controllers = [c for c in self.controllers.keys()
if c not in self.HIDE_CONTROLLER]
ptables = []
query = (acl_table.deleted != True) & \
(acl_table.group_id == role_id)
records = db(query).select()
acl_forms = []
# Relevant ACLs
acls = Storage()
for acl in records:
if acl.controller in controllers:
if acl.controller not in acls:
acls[acl.controller] = Storage()
if not acl.function:
f = any
else:
if auth.permission.use_facls:
f = acl.function
else:
continue
acls[acl.controller][f] = acl
# Controller ACL table --------------------------------------------
# Table header
thead = THEAD(TR(TH(T("Application")),
TH(T("All Records")),
TH(T("Owned Records")),
TH()))
# Rows for existing ACLs
form_rows = []
i = 0
for c in controllers:
default = Storage(id = None,
controller = c,
function = any,
tablename = None,
uacl = auth.permission.NONE,
oacl = auth.permission.NONE)
if c in acls:
acl_list = acls[c]
if any not in acl_list:
acl_list[any] = default
else:
acl_list = Storage(ANY=default)
acl = acl_list[any]
_class = i % 2 and "even" or "odd"
i += 1
uacl = auth.permission.NONE
oacl = auth.permission.NONE
if acl.oacl is not None:
oacl = acl.oacl
if acl.uacl is not None:
uacl = acl.uacl
_id = acl.id
delete_btn = delete_acl(_id)
n = "%s_%s_ANY_ANY" % (_id, c)
uacl = acl_widget("uacl", "acl_u_%s" % n, uacl)
oacl = acl_widget("oacl", "acl_o_%s" % n, oacl)
cn = self.controllers[c].name_nice
form_rows.append(TR(TD(cn),
TD(uacl),
TD(oacl),
TD(delete_btn),
_class=_class))
# Tabs
tabs = [SPAN(A(CACL), _class="tab_here")]
if auth.permission.use_facls:
_class = auth.permission.use_tacls and \
"tab_other" or "tab_last"
tabs.append(SPAN(A(FACL, _class="facl-tab"), _class=_class))
if auth.permission.use_tacls:
tabs.append(SPAN(A(TACL, _class="tacl-tab"),
_class="tab_last"))
acl_forms.append(DIV(DIV(tabs, _class="tabs"),
TABLE(thead, TBODY(form_rows)),
_id="controller-acls"))
# Function ACL table ----------------------------------------------
if auth.permission.use_facls:
# Table header
thead = THEAD(TR(TH(T("Application")),
TH(T("Function")),
TH(T("All Records")),
TH(T("Owned Records")),
TH()))
# Rows for existing ACLs
form_rows = []
i = 0
for c in controllers:
if c in acls:
acl_list = acls[c]
else:
continue
keys = acl_list.keys()
keys.sort()
for f in keys:
if f == any:
continue
acl = acl_list[f]
_class = i % 2 and "even" or "odd"
i += 1
uacl = auth.permission.NONE
oacl = auth.permission.NONE
if acl.oacl is not None:
oacl = acl.oacl
if acl.uacl is not None:
uacl = acl.uacl
_id = acl.id
delete_btn = delete_acl(_id)
n = "%s_%s_%s_ANY" % (_id, c, f)
uacl = acl_widget("uacl", "acl_u_%s" % n, uacl)
oacl = acl_widget("oacl", "acl_o_%s" % n, oacl)
cn = self.controllers[c].name_nice
form_rows.append(TR(TD(cn),
TD(f),
TD(uacl),
TD(oacl),
TD(delete_btn),
_class=_class))
# Row to enter a new controller ACL
_class = i % 2 and "even" or "odd"
c_opts = [OPTION("", _value=None, _selected="selected")] + \
[OPTION(self.controllers[c].name_nice,
_value=c) for c in controllers]
c_select = SELECT(_name="new_controller", *c_opts)
form_rows.append(TR(
TD(c_select),
TD(INPUT(_type="text", _name="new_function")),
TD(acl_widget("uacl", "new_c_uacl", auth.permission.NONE)),
TD(acl_widget("oacl", "new_c_oacl", auth.permission.NONE)),
TD(new_acl), _class=_class))
# Tabs to change to the other view
tabs = [SPAN(A(CACL, _class="cacl-tab"),
_class="tab_other"),
SPAN(A(FACL), _class="tab_here")]
if auth.permission.use_tacls:
tabs.append(SPAN(A(TACL, _class="tacl-tab"),
_class="tab_last"))
acl_forms.append(DIV(DIV(tabs, _class="tabs"),
TABLE(thead, TBODY(form_rows)),
_id="function-acls"))
# Table ACL table -------------------------------------------------
if auth.permission.use_tacls:
query = (acl_table.deleted != True) & \
(acl_table.tablename != None)
tacls = db(query).select(acl_table.tablename, distinct=True)
if tacls:
ptables = [acl.tablename for acl in tacls]
# Relevant ACLs
acls = dict([(acl.tablename, acl) for acl in records
if acl.tablename in ptables])
# Table header
thead = THEAD(TR(TH(T("Tablename")),
TH(T("All Records")),
TH(T("Owned Records")),
TH()))
# Rows for existing table ACLs
form_rows = []
i = 0
for t in ptables:
_class = i % 2 and "even" or "odd"
i += 1
uacl = auth.permission.NONE
oacl = auth.permission.NONE
_id = None
if t in acls:
acl = acls[t]
if acl.uacl is not None:
uacl = acl.uacl
if acl.oacl is not None:
oacl = acl.oacl
_id = acl.id
delete_btn = delete_acl(_id)
n = "%s_ANY_ANY_%s" % (_id, t)
uacl = acl_widget("uacl", "acl_u_%s" % n, uacl)
oacl = acl_widget("oacl", "acl_o_%s" % n, oacl)
form_rows.append(TR(TD(t),
TD(uacl),
TD(oacl),
TD(delete_btn),
_class=_class))
# Row to enter a new table ACL
_class = i % 2 and "even" or "odd"
# @todo: find a better way to provide a selection of tables
#all_tables = [t._tablename for t in current.db]
form_rows.append(TR(
TD(INPUT(_type="text", _name="new_table")),
# @todo: doesn't work with conditional models
#requires=IS_EMPTY_OR(IS_IN_SET(all_tables,
#zero=None,
#error_message=T("Undefined Table"))))),
TD(acl_widget("uacl", "new_t_uacl", auth.permission.NONE)),
TD(acl_widget("oacl", "new_t_oacl", auth.permission.NONE)),
TD(new_acl), _class=_class))
# Tabs
tabs = [SPAN(A(CACL, _class="cacl-tab"),
_class="tab_other")]
if auth.permission.use_facls:
tabs.append(SPAN(A(FACL, _class="facl-tab"),
_class="tab_other"))
tabs.append(SPAN(A(TACL), _class="tab_here"))
acl_forms.append(DIV(DIV(tabs, _class="tabs"),
TABLE(thead, TBODY(form_rows)),
_id="table-acls"))
# Aggregate ACL Form ----------------------------------------------
acl_form = DIV(acl_forms, _id="table-container")
# Action row
if session.s3.cancel:
cancel = session.s3.cancel
else:
cancel = URL(c="admin", f="role",
vars=request.get_vars)
action_row = DIV(INPUT(_type="submit", _value=T("Save")),
A(CANCEL, _href=cancel, _class="action-lnk"),
_id="action-row")
# Complete form
form = FORM(role_form, acl_form, action_row)
# Append role_id
if role_id:
form.append(INPUT(_type="hidden",
_name="role_id",
value=role_id))
# Process the form ------------------------------------------------
if form.accepts(request.post_vars, session):
vars = form.vars
# Update the role
role = Storage(role=vars.role_name, description=vars.role_desc)
if r.record:
r.record.update_record(**role)
role_id = form.vars.role_id
session.confirmation = '%s "%s" %s' % (T("Role"),
role.role,
T("updated"))
else:
import uuid
role.uuid = uuid.uuid4()
role_id = self.table.insert(**role)
session.confirmation = '%s "%s" %s' % (T("Role"),
role.role,
T("created"))
if role_id:
# Collect the ACLs
acls = Storage()
for v in vars:
if v[:4] == "acl_":
acl_type, name = v[4:].split("_", 1)
n = name.split("_", 3)
i, c, f, t = map(lambda item: \
item != any and item or None, n)
if i.isdigit():
i = int(i)
else:
i = None
name = "%s_%s_%s" % (c, f, t)
if name not in acls:
acls[name] = Storage()
acls[name].update({"id": i,
"group_id": role_id,
"controller": c,
"function": f,
"tablename": t,
"%sacl" % acl_type: vars[v]})
for v in ("new_controller", "new_table"):
if v in vars and vars[v]:
c = v == "new_controller" and \
vars.new_controller or None
f = v == "new_controller" and \
vars.new_function or None
t = v == "new_table" and vars.new_table or None
name = "%s_%s_%s" % (c, f, t)
x = v == "new_table" and "t" or "c"
uacl = vars["new_%s_uacl" % x]
oacl = vars["new_%s_oacl" % x]
if name not in acls:
acls[name] = Storage()
acls[name].update(group_id=role_id,
controller=c,
function=f,
tablename=t,
oacl=oacl,
uacl=uacl)
# Save the ACLs
for acl in acls.values():
_id = acl.pop("id", None)
if _id:
query = (acl_table.deleted != True) & \
(acl_table.id == _id)
db(query).update(**acl)
elif acl.oacl or acl.uacl:
_id = acl_table.insert(**acl)
redirect(URL(f="role", vars=request.get_vars))
output.update(form=form)
if form.errors:
if "new_table" in form.errors:
output.update(acl="table")
elif "new_controller" in form.errors:
output.update(acl="function")
current.response.view = "admin/role_edit.html"
else:
r.error(501, manager.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def _delete(self, r, **attr):
"""
Delete role
"""
session = current.session
manager = current.manager
request = self.request
T = current.T
auth = manager.auth
if r.interactive:
if r.record:
role = r.record
role_id = role.id
role_name = role.role
if role_id in self.PROTECTED_ROLES or \
role.protected or role.system:
session.error = '%s "%s" %s' % (T("Role"),
role_name,
T("cannot be deleted."))
redirect(URL(c="admin", f="role",
vars=request.get_vars))
else:
db = current.db
# Delete all ACLs for this role:
acl_table = auth.permission.table
query = (acl_table.deleted != True) & \
(acl_table.group_id == role_id)
db(query).update(deleted=True)
# Remove all memberships:
membership_table = db.auth_membership
query = (membership_table.deleted != True) & \
(membership_table.group_id == role_id)
db(query).update(deleted=True)
# Update roles in session:
session.s3.roles = [role
for role in session.s3.roles
if role != role_id]
# Remove role:
query = (self.table.deleted != True) & \
(self.table.id == role_id)
db(query).update(role=None,
deleted=True)
# Confirmation:
session.confirmation = '%s "%s" %s' % (T("Role"),
role_name,
T("deleted"))
else:
session.error = T("No role to delete")
else:
r.error(501, manager.BAD_FORMAT)
redirect(URL(c="admin", f="role", vars=request.get_vars))
# -------------------------------------------------------------------------
def _roles(self, r, **attr):
"""
View/Update roles of a user
"""
output = dict()
db = current.db
T = current.T
CANCEL = T("Cancel")
session = current.session
manager = current.manager
sr = session.s3.system_roles
request = self.request
crud_settings = manager.s3.crud
formstyle = crud_settings.formstyle
auth = manager.auth
gtable = auth.settings.table_group
mtable = auth.settings.table_membership
if r.interactive:
if r.record:
user = r.record
user_id = user.id
username = user.email
query = (mtable.deleted != True) &\
(mtable.user_id == user_id)
memberships = db(query).select()
memberships = Storage([(str(m.group_id), m.id)
for m in memberships])
roles = db(gtable.deleted != True).select(gtable.ALL)
roles = Storage([(str(g.id), " %s" % g.role)
for g in roles
if g.hidden != True and \
g.id not in (sr.ANONYMOUS,
sr.AUTHENTICATED)])
field = Storage(name="roles",
requires = IS_IN_SET(roles, multiple=True))
widget = CheckboxesWidgetS3.widget(field, memberships.keys())
if session.s3.cancel:
cancel = session.s3.cancel
else:
cancel = r.url(method="")
form = FORM(TABLE(
TR(TD(widget)),
TR(TD(INPUT(_type="submit", _value=T("Save")),
A(CANCEL,
_href=cancel, _class="action-lnk")))))
if form.accepts(request.post_vars, session):
assign = form.vars.roles
for role in roles:
query = (mtable.deleted != True) & \
(mtable.user_id == user_id) & \
(mtable.group_id == role)
_set = db(query)
if str(role) not in assign:
_set.update(deleted=True)
else:
membership = _set.select(limitby=(0, 1)).first()
if not membership:
mtable.insert(user_id=user_id, group_id=role)
session.confirmation = T("User Updated")
redirect(r.url(method=""))
output.update(title="%s - %s" %
(T("Assigned Roles"), username),
form=form)
current.response.view = "admin/user_roles.html"
else:
session.error = T("No user to update")
redirect(r.url(method=""))
else:
r.error(501, manager.BAD_FORMAT)
return output
# -------------------------------------------------------------------------
def _users(self, r, **attr):
"""
View/Update users of a role
"""
output = dict()
session = current.session
manager = current.manager
request = self.request
db = current.db
T = current.T
auth = manager.auth
utable = auth.settings.table_user
gtable = auth.settings.table_group
mtable = auth.settings.table_membership
if r.interactive:
if r.record:
role_id = r.record.id
role_name = r.record.role
role_desc = r.record.description
title = "%s: %s" % (T("Role"), role_name)
output.update(title=title,
description=role_desc,
group=role_id)
if auth.settings.username:
username = "username"
else:
username = "email"
# @todo: Audit
users = db().select(utable.ALL)
query = (mtable.deleted != True) & \
(mtable.group_id == role_id)
assigned = db(query).select(mtable.ALL)
assigned_users = [row.user_id for row in assigned]
unassigned_users = [(row.id, row)
for row in users
if row.id not in assigned_users]
# Delete form
if assigned_users:
thead = THEAD(TR(TH(),
TH(T("Name")),
TH(T("Username")),
TH(T("Remove?"))))
trows = []
i = 0
for user in users:
if user.id not in assigned_users:
continue
_class = i % 2 and "even" or "odd"
i += 1
trow = TR(TD(A(), _name="Id"),
TD("%s %s" % (user.first_name,
user.last_name)),
TD(user[username]),
TD(INPUT(_type="checkbox",
_name="d_%s" % user.id,
_class="remove_item")),
_class=_class)
trows.append(trow)
trows.append(TR(TD(), TD(), TD(),
TD(INPUT(_id="submit_delete_button",
_type="submit",
_value=T("Remove")))))
tbody = TBODY(trows)
del_form = TABLE(thead, tbody, _id="list",
_class="dataTable display")
else:
del_form = T("No users with this role")
del_form = FORM(DIV(del_form, _id="table-container"),
_name="del_form")
# Add form
uname = lambda u: \
"%s: %s %s" % (u.id, u.first_name, u.last_name)
u_opts = [OPTION(uname(u[1]),
_value=u[0]) for u in unassigned_users]
if u_opts:
u_opts = [OPTION("",
_value=None, _selected="selected")] + u_opts
u_select = DIV(TABLE(TR(
TD(SELECT(_name="new_user", *u_opts)),
TD(INPUT(_type="submit",
_id="submit_add_button",
_value=T("Add"))))))
else:
u_select = T("No further users can be added")
add_form = FORM(DIV(u_select), _name="add_form")
# Process delete form
if del_form.accepts(request.post_vars,
session, formname="del_form"):
del_ids = [v[2:] for v in del_form.vars
if v[:2] == "d_" and
del_form.vars[v] == "on"]
query = (mtable.deleted != True) & \
(mtable.group_id == role_id) & \
(mtable.user_id.belongs(del_ids))
db(query).update(deleted=True)
redirect(r.url())
# Process add form
if add_form.accepts(request.post_vars,
session, formname="add_form"):
if add_form.vars.new_user:
mtable.insert(group_id=role_id,
user_id=add_form.vars.new_user)
redirect(r.url())
form = DIV(H4(T("Users with this role")), del_form,
H4(T("Add new users")), add_form)
list_btn = A(T("Back to Roles List"),
_href=URL(c="admin", f="role"),
_class="action-btn")
edit_btn = A(T("Edit Role"),
_href=URL(c="admin", f="role",
args=[role_id]),
_class="action-btn")
output.update(form=form, list_btn=list_btn, edit_btn=edit_btn)
current.response.view = "admin/role_users.html"
else:
session.error = T("No role to update")
redirect(r.there())
else:
r.error(501, manager.BAD_FORMAT)
return output
# =============================================================================
class FaceBookAccount(OAuthAccount):
""" OAuth implementation for FaceBook """
AUTH_URL = "https://graph.facebook.com/oauth/authorize"
TOKEN_URL = "https://graph.facebook.com/oauth/access_token"
# -------------------------------------------------------------------------
def __init__(self):
from facebook import GraphAPI, GraphAPIError
self.GraphAPI = GraphAPI
self.GraphAPIError = GraphAPIError
g = dict(GraphAPI=GraphAPI,
GraphAPIError=GraphAPIError,
request=current.request,
response=current.response,
session=current.session,
HTTP=HTTP)
client = current.auth.settings.facebook
OAuthAccount.__init__(self, g, client["id"], client["secret"],
self.AUTH_URL, self.TOKEN_URL,
scope="email,user_about_me,user_location,user_photos,user_relationships,user_birthday,user_website,create_event,user_events,publish_stream")
self.graph = None
# -------------------------------------------------------------------------
def login_url(self, next="/"):
""" Overriding to produce a different redirect_uri """
request = current.request
session = current.session
if not self.accessToken():
if not request.vars.code:
session.redirect_uri = "%s/%s/default/facebook/login" % \
(current.deployment_settings.get_base_public_url(),
request.application)
data = dict(redirect_uri=session.redirect_uri,
response_type="code",
client_id=self.client_id)
if self.args:
data.update(self.args)
auth_request_url = self.auth_url + "?" + urlencode(data)
raise HTTP(307,
"You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>",
Location=auth_request_url)
else:
session.code = request.vars.code
self.accessToken()
#return session.code
return next
# -------------------------------------------------------------------------
def get_user(self):
""" Returns the user using the Graph API. """
db = current.db
auth = current.auth
session = current.session
if not self.accessToken():
return None
if not self.graph:
self.graph = self.GraphAPI((self.accessToken()))
user = None
try:
user = self.graph.get_object_c("me")
except self.GraphAPIError:
self.session.token = None
self.graph = None
if user:
# Check if a user with this email has already registered
#session.facebooklogin = True
table = auth.settings.table_user
query = (table.email == user["email"])
existent = db(query).select(table.id,
table.password,
limitby=(0, 1)).first()
if existent:
#session["%s_setpassword" % existent.id] = existent.password
_user = dict(first_name = user.get("first_name", ""),
last_name = user.get("last_name", ""),
facebookid = user["id"],
facebook = user.get("username", user["id"]),
email = user["email"],
password = existent.password
)
return _user
else:
# b = user["birthday"]
# birthday = "%s-%s-%s" % (b[-4:], b[0:2], b[-7:-5])
# if 'location' in user:
# session.flocation = user['location']
#session["is_new_from"] = "facebook"
auth.s3_send_welcome_email(user)
# auth.initial_user_permission(user) # Called on profile page
_user = dict(first_name = user.get("first_name", ""),
last_name = user.get("last_name", ""),
facebookid = user["id"],
facebook = user.get("username", user["id"]),
nickname = IS_SLUG()(user.get("username", "%(first_name)s-%(last_name)s" % user) + "-" + user['id'][:5])[0],
email = user["email"],
# birthdate = birthday,
about = user.get("bio", ""),
website = user.get("website", ""),
# gender = user.get("gender", "Not specified").title(),
photo_source = 3,
tagline = user.get("link", ""),
registration_type = 2,
)
return _user
# =============================================================================
class GooglePlusAccount(OAuthAccount):
"""
OAuth implementation for Google
https://code.google.com/apis/console/
"""
AUTH_URL = "https://accounts.google.com/o/oauth2/auth"
TOKEN_URL = "https://accounts.google.com/o/oauth2/token"
API_URL = "https://www.googleapis.com/oauth2/v1/userinfo"
# -------------------------------------------------------------------------
def __init__(self):
request = current.request
settings = current.deployment_settings
g = dict(request=request,
response=current.response,
session=current.session,
HTTP=HTTP)
client = current.auth.settings.google
self.globals = g
self.client = client
self.client_id = client["id"]
self.client_secret = client["secret"]
self.auth_url = self.AUTH_URL
self.args = dict(
scope = "https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile",
user_agent = "google-api-client-python-plus-cmdline/1.0",
xoauth_displayname = settings.get_system_name(),
response_type = "code",
redirect_uri = "%s/%s/default/google/login" % \
(settings.get_base_public_url(),
request.application),
approval_prompt = "force",
state = "google"
)
self.graph = None
# -------------------------------------------------------------------------
def __build_url_opener(self, uri):
"""
Build the url opener for managing HTTP Basic Athentication
"""
# Create an OpenerDirector with support
# for Basic HTTP Authentication...
auth_handler = urllib2.HTTPBasicAuthHandler()
auth_handler.add_password(None,
uri,
self.client_id,
self.client_secret)
opener = urllib2.build_opener(auth_handler)
return opener
# -------------------------------------------------------------------------
def accessToken(self):
"""
Return the access token generated by the authenticating server.
If token is already in the session that one will be used.
Otherwise the token is fetched from the auth server.
"""
session = current.session
if session.token and session.token.has_key("expires"):
expires = session.token["expires"]
# reuse token until expiration
if expires == 0 or expires > time.time():
return session.token["access_token"]
if session.code:
data = dict(client_id = self.client_id,
client_secret = self.client_secret,
redirect_uri = self.args["redirect_uri"],
code = session.code,
grant_type = "authorization_code",
scope = "https://www.googleapis.com/auth/userinfo.email https://www.googleapis.com/auth/userinfo.profile")
# if self.args:
# data.update(self.args)
open_url = None
opener = self.__build_url_opener(self.TOKEN_URL)
try:
open_url = opener.open(self.TOKEN_URL, urlencode(data))
except urllib2.HTTPError, e:
raise Exception(e.read())
finally:
del session.code # throw it away
if open_url:
try:
session.token = json.loads(open_url.read())
session.token["expires"] = int(session.token["expires_in"]) + \
time.time()
finally:
opener.close()
return session.token["access_token"]
session.token = None
return None
# -------------------------------------------------------------------------
def login_url(self, next="/"):
""" Overriding to produce a different redirect_uri """
request = current.request
session = current.session
if not self.accessToken():
if not request.vars.code:
session.redirect_uri = self.args["redirect_uri"]
data = dict(redirect_uri=session.redirect_uri,
response_type="code",
client_id=self.client_id)
if self.args:
data.update(self.args)
auth_request_url = self.auth_url + "?" + urlencode(data)
raise HTTP(307,
"You are not authenticated: you are being redirected to the <a href='" + auth_request_url + "'> authentication server</a>",
Location=auth_request_url)
else:
session.code = request.vars.code
self.accessToken()
#return session.code
return next
# -------------------------------------------------------------------------
def get_user(self):
""" Returns the user using the Graph API. """
db = current.db
auth = current.auth
session = current.session
if not self.accessToken():
return None
user = None
try:
user = self.call_api()
except Exception, e:
print str(e)
session.token = None
if user:
# Check if a user with this email has already registered
#session.googlelogin = True
table = auth.settings.table_user
query = (table.email == user["email"])
existent = db(query).select(table.id,
table.password,
limitby=(0, 1)).first()
if existent:
#session["%s_setpassword" % existent.id] = existent.password
_user = dict(
#first_name = user.get("given_name", user["name"]),
#last_name = user.get("family_name", user["name"]),
googleid = user["id"],
email = user["email"],
password = existent.password
)
return _user
else:
# b = user["birthday"]
# birthday = "%s-%s-%s" % (b[-4:], b[0:2], b[-7:-5])
# if "location" in user:
# session.flocation = user["location"]
#session["is_new_from"] = "google"
auth.s3_send_welcome_email(user)
_user = dict(
first_name = user.get("given_name", user["name"].split()[0]),
last_name = user.get("family_name", user["name"].split()[-1]),
googleid = user["id"],
nickname = "%(first_name)s-%(last_name)s-%(id)s" % dict(first_name=user["name"].split()[0].lower(), last_name=user["name"].split()[-1].lower(), id=user['id'][:5]),
email = user["email"],
# birthdate = birthday,
website = user.get("link", ""),
# gender = user.get("gender", "Not specified").title(),
photo_source = 6 if user.get("picture", None) else 2,
googlepicture = user.get("picture", ""),
registration_type = 3,
)
return _user
# -------------------------------------------------------------------------
def call_api(self):
api_return = urllib.urlopen("https://www.googleapis.com/oauth2/v1/userinfo?access_token=%s" % self.accessToken())
user = json.loads(api_return.read())
if user:
return user
else:
self.session.token = None
return None
# END =========================================================================
|
flavour/iscram
|
modules/s3/s3aaa.py
|
Python
|
mit
| 198,273
| 0.003299
|
from setuptools import setup, find_packages
try:
long_description = open("README.rst").read()
except IOError:
long_description = ""
setup(
name='odin',
version='0.4.2',
url='https://github.com/timsavage/odin',
license='LICENSE',
author='Tim Savage',
author_email='tim.savage@poweredbypenguins.org',
description='Object Data Mapping for Python',
long_description=long_description,
packages=find_packages(),
install_requires=['six'],
extras_require={
# Documentation generation
'doc_gen': ["jinja2>=2.7"],
# Pint integration
'pint': ["pint"],
},
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
|
tjmcewan/odin
|
setup.py
|
Python
|
bsd-3-clause
| 1,229
| 0
|
# -*- coding: utf-8 -*-
#
# Yelandur documentation build configuration file, created by
# sphinx-quickstart on Thu Jan 10 16:10:42 2013.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.mathjax', 'sphinx.ext.viewcode']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Yelandur'
copyright = u'2013, Sébastien Lerique'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1'
# The full version, including alpha/beta/rc tags.
release = '0.1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Yelandurdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Yelandur.tex', u'Yelandur Documentation',
u'Sébastien Lerique', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'yelandur', u'Yelandur Documentation',
[u'Sébastien Lerique'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Yelandur', u'Yelandur Documentation',
u'Sébastien Lerique', 'Yelandur', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
|
science-en-poche/yelandur
|
docs/source/conf.py
|
Python
|
gpl-3.0
| 7,995
| 0.007383
|
import logging
try:
create_external('xml2', build_helper = 'xml2-config',
version_query = '--version', version_parser = lambda x: 'invalid')
except Exception:
logging.critical('external version parsing')
try:
tools['c'].std = 'latest'
except Exception:
logging.critical('std setting')
try:
shared_library('x', [])
except Exception:
logging.critical('shared_library: empty input')
try:
static_library('x', [])
except Exception:
logging.critical('static_library: empty input')
|
pyrate-build/pyrate-build
|
examples/test03.py
|
Python
|
apache-2.0
| 489
| 0.03272
|
import os
path = os.path.dirname(os.path.realpath(__file__))
sbmlFilePath = os.path.join(path, 'MODEL8687196544.xml')
with open(sbmlFilePath,'r') as f:
sbmlString = f.read()
def module_exists(module_name):
try:
__import__(module_name)
except ImportError:
return False
else:
return True
if module_exists('libsbml'):
import libsbml
sbml = libsbml.readSBMLFromString(sbmlString)
|
biomodels/MODEL8687196544
|
MODEL8687196544/model.py
|
Python
|
cc0-1.0
| 427
| 0.009368
|
# encoding: utf-8
from ztq_core import async
import time
@async
def send(body):
print 'START: ', body
time.sleep(3)
print 'END: ', body
@async(queue='mail')
def send_failed(body):
print 'FAIL START:', body
raise Exception('connection error...')
@async(queue='mail')
def failed_callback(return_code, return_msg):
print 'FAILED CALLBACK:', return_code, return_msg
@async(queue='index')
def index(data):
print 'INDEX:', data
time.sleep(1)
def do_commit():
print 'COMMITTED'
import ztq_worker
ztq_worker.register_batch_queue('index', 5, do_commit)
|
liangsuilong/ztq
|
ztq_demo/tasks.py
|
Python
|
mit
| 586
| 0.020478
|
import abc
import os
import os.path
import platform
import yaml
import typing
if typing.TYPE_CHECKING:
from typing import Any, Dict, IO, Optional, Sequence, Type
class PropertyTypeError(ValueError):
def __init__(self, property_name: str, value: 'Any') -> None:
ValueError.__init__(self, "can not set {} property to type {}".format(
property_name, type(value)
))
class Context(object):
"""Context keeps track of the value being unpacked.
The Context type keeps track of a file name and field name which are used
to describe the context of an error when unpacking a configuration object.
A Context must be provided to the unpack function of a configuration
Property, which uses the context to write errors to. The result of the
unpack operation should be a bool indicating overall success or failure,
while the Context can be inspected to check how many errors occurred.
If the `writer` field is not None, errors will be written to it. This must
be set to a type which supports the write function (e.g. sys.stderr), and
each error will result in multiple calls to write. Capturing the output
can be done by setting the `writer` field to an io.StringIO.
"""
def __init__(self, file_name: str = "", field_name: str = "") -> None:
"""Create a new Context.
:param file_name: The filename of the context
:param field_name: The name of the current field
"""
self.file = file_name
self.field = field_name
self.error_count = 0
self.writer = None
def error(self, message: str, *args, **kwargs) -> None:
if self.writer is not None:
if self.file != "":
self.writer.write(self.file)
self.writer.write(": ")
if self.field != "":
self.writer.write(self.field)
self.writer.write(": ")
self.writer.write(message.format(*args, **kwargs))
self.writer.write("\n")
self.error_count += 1
def invalid_type(self, expected_type: 'Type', value: 'Any') -> None:
self.error(
"invalid type; expected {}, got {} ({})",
expected_type, value, type(value)
)
class Property(abc.ABC):
"""Property is the abstract base class of all Configuration properties."""
def __init__(self):
abc.ABC.__init__(self)
@abc.abstractmethod
def copy(self) -> 'Property':
raise NotImplementedError
@abc.abstractmethod
def unpack(self, value: 'Any', context: 'Context') -> bool:
raise NotImplementedError
@abc.abstractmethod
def pack(self) -> 'Any':
raise NotImplementedError
class PrimitiveProperty(Property, abc.ABC):
def __init__(self, default: 'Any') -> None:
Property.__init__(self)
self._default = default
self._value = default
def __repr__(self) -> str:
return str(self._value)
def __str__(self) -> str:
return str(self._value)
def __bool__(self) -> bool:
return bool(self._value)
def __int__(self) -> int:
return int(self._value)
def __float__(self) -> float:
return float(self._value)
def __eq__(self, other: 'Any') -> bool:
return self._value == other
def __ne__(self, other: 'Any') -> bool:
return self._value != other
def __lt__(self, other: 'Any') -> bool:
return self._value < other
def __le__(self, other: 'Any') -> bool:
return self._value <= other
def __gt__(self, other: 'Any') -> bool:
return self._value > other
def __ge__(self, other: 'Any') -> bool:
return self._value >= other
def pack(self) -> 'Any':
return self._value
class Boolean(PrimitiveProperty):
"""A boolean property."""
def __init__(self, default: bool = False) -> None:
"""Create a new Boolean property.
:param default: The default value of the property
"""
if type(default) is not bool:
raise PropertyTypeError("Boolean", default)
PrimitiveProperty.__init__(self, default)
@property
def value(self) -> bool:
"""The value of the property."""
return self._value
@value.setter
def value(self, value: bool) -> None:
if type(value) is not bool:
raise PropertyTypeError("Boolean", value)
self._value = value
@property
def default(self) -> bool:
"""The default value of the property."""
return self._default
def copy(self) -> 'Boolean':
"""Create a copy of this Boolean Property.
:returns: A copy of this boolean property
"""
b = Boolean(self._default)
b._value = self._value
return b
def unpack(self, value: 'Any', context: 'Context') -> bool:
"""Unpack a YAML value into this Boolean property.
:param value: The value to unpack
:param context: The context of this unpack operation
:returns: If the unpack operation succeeded
"""
if type(value) is not bool:
context.invalid_type(bool, value)
return False
self._value = value
return True
class Integer(PrimitiveProperty):
"""An integer property."""
def __init__(self, default: int = 0) -> None:
"""Create a new Integer property.
:param default: The default value of the property
"""
if type(default) is not int:
raise PropertyTypeError("Integer", default)
PrimitiveProperty.__init__(self, default)
@property
def value(self) -> int:
"""The value of the property."""
return self._value
@value.setter
def value(self, value: int) -> None:
if type(value) is not int:
raise PropertyTypeError("Integer", value)
self._value = value
@property
def default(self) -> int:
"""The default value of the property (read-only)."""
return self._default
def copy(self) -> 'Integer':
"""Create a copy of this Integer Property.
:returns: A copy of this Integer
"""
i = Integer(self._default)
i._value = self._value
return i
def unpack(self, value: 'Any', context: 'Context') -> bool:
"""Unpack a YAML value into this Integer Property.
The value being unpacked must be an int, otherwise an error is written
to the context and False is returned.
:param value: The value to unpack
:param context: The context of this unpack operation
:returns: If the unpack operation succeeded
"""
if type(value) is not int:
context.invalid_type(int, value)
return False
self._value = value
return True
class Float(PrimitiveProperty):
"""A Float property."""
def __init__(self, default: float = 0.0) -> None:
"""Create a new Float property.
:param default: The default value of the property
"""
if type(default) is not float:
raise PropertyTypeError("Float", default)
PrimitiveProperty.__init__(self, default)
@property
def value(self) -> float:
"""The value of the property."""
return self._value
@value.setter
def value(self, value: float) -> None:
if type(value) is not float:
raise PropertyTypeError("Float", value)
self._value = value
@property
def default(self) -> int:
"""The default value of the property (read-only)."""
return self._default
def copy(self) -> 'Float':
"""Create a copy of this Float Property.
:returns: A copy of this Float
"""
f = Float(self._default)
f._value = self._value
return f
def unpack(self, value: 'Any', context: 'Context') -> bool:
"""Unpack a YAML value into this Float Property.
The value being unpacked must be either a float or an int, otherwise
an error is written to the context and False is returned.
:param value: The value to unpack
:param context: The context of this unpack operation
:returns: If the unpack operation succeeded
"""
if type(value) is int or type(value) is float:
self._value = float(value)
return True
context.invalid_type(float, value)
return False
class String(PrimitiveProperty):
def __init__(self, default: str = "", allow_empty: bool = True,
strip: bool = False) -> None:
if type(default) is not str:
raise PropertyTypeError("String", default)
if strip:
default = default.strip()
if not allow_empty and default == "":
raise ValueError("may not be empty")
PrimitiveProperty.__init__(self, default)
self._allow_empty = allow_empty
self._strip = strip
@property
def value(self) -> str:
return self._value
@value.setter
def value(self, value: str) -> None:
if type(value) is not str:
raise PropertyTypeError("String", value)
if self._strip:
value = value.strip()
if not self._allow_empty and value == "":
raise ValueError("may not be empty")
self._value = value
@property
def default(self) -> str:
return self._default
def copy(self) -> 'String':
"""Create a copy of this String Property.
:returns: A copy of this String
"""
s = String(self._default, self._allow_empty, self._strip)
s._value = self._value
return s
def unpack(self, value: 'Any', context: 'Context') -> bool:
"""Unpack a YAML value into this String Property.
The value being unpacked must be a str, otherwise an error is written
to the context and False is returned.
:param value: The value to unpack
:param context: The context of this unpack operation
:returns: If the unpack operation succeeded
"""
if type(value) is not str:
context.invalid_type(str, value)
return False
if self._strip:
value = value.strip()
if value == "" and not self._allow_empty:
context.error("may not be empty")
return False
self._value = value
return True
class EnumValueError(ValueError):
def __init__(self, value: str) -> None:
ValueError.__init__(
self, "{} is not an allowed enum value".format(repr(value))
)
class Enum(PrimitiveProperty):
def __init__(self, allowed: 'Sequence[str]',
default: 'Optional[str]' = None) -> None:
if len(allowed) == 0:
raise ValueError("Enum type requires at least one value")
default = default if default is not None else allowed[0]
if default not in allowed:
raise EnumValueError(default)
PrimitiveProperty.__init__(self, default)
self._allowed = allowed
@property
def value(self) -> str:
return self._value
@value.setter
def value(self, value: str) -> None:
if type(value) is not str:
raise PropertyTypeError("Enum", value)
if value not in self._allowed:
raise EnumValueError(value)
self._value = value
@property
def default(self) -> str:
return self._default
def copy(self) -> 'Enum':
"""Create a copy of this Enum Property.
:returns: A copy of this Enum
"""
e = Enum(self._allowed, self._default)
e._value = self._value
return e
def unpack(self, value: 'Any', context: 'Context') -> bool:
"""Unpack a YAML value into this Enum Property.
The value being unpacked must be a str and be present in the list of
allowed enum values.
:param value: The value to unpack
:param context: The context of this unpack operation
:returns: If the unpack operation succeeded
"""
if type(value) is not str:
context.invalid_type(str, value)
return False
if value not in self._allowed:
context.error(
"{} not an allowed enum value, must be one of {}",
repr(value), repr(self._allowed)
)
return False
self._value = value
return True
class Map(Property):
def __init__(self, values: 'Dict[str, Property]') -> None:
Property.__init__(self)
object.__setattr__(self, '_values', values)
def copy(self) -> 'Map':
m = Map(dict())
for key, value in self._values.items():
m.parameters[key] = value.copy()
return m
def unpack(self, value: 'Any', context: 'Context') -> bool:
if value is None:
return True
if type(value) is not dict:
raise PropertyTypeError("Map", value)
good = True
base_field = context.field
field = base_field if base_field == "" else base_field + '.'
for key, value in value.items():
context.field = field + key
child = self._values.get(key, None)
if child is None:
good = False
context.error("invalid key")
continue
good &= child.unpack(value, context)
return good
def pack(self) -> 'Any':
serialized = dict()
for key, value in self._values.items():
serialized[key] = value.pack()
return serialized
@property
def parameters(self) -> 'Dict[str, Property]':
return self._values
def __getitem__(self, key: str) -> 'Property':
return self._values[key]
def __setitem__(self, key: str, value: 'Any') -> None:
self._values[key].value = value
def __getattr__(self, key: str) -> 'Property':
return self._values[key]
def __setattr__(self, key: str, value: 'Any') -> None:
if key in self._values:
self.__dict__['_values'][key].value = value
else:
self.__dict__[key] = value
class Config(Map):
@staticmethod
def folder() -> str:
"""Get the folder where configuration files should be read from.
:return: The folder for configuration files
"""
if platform.win32_ver()[0] != '':
return os.path.expandvars("%APPDATA%/txtrpg")
if platform.mac_ver()[0] != '':
return os.path.expanduser("~/Library/Application Support/txtrpg")
return os.path.expanduser("~/.local/share/txtrpg")
def load(self, filename: str, fp: 'Optional[IO]' = None,
ctx: 'Optional[Context]' = None) -> bool:
ctx = ctx if ctx is not None else Context(filename)
if fp is not None:
return self._load(filename, fp, ctx)
else:
with open(filename, "rb") as fp:
return self._load(filename, fp, ctx)
def _load(self, filename: str, fp: 'IO', ctx: 'Context') -> bool:
ctx.file = filename
ctx.field = ""
data: dict
try:
data = yaml.safe_load(fp)
except Exception as e:
ctx.error("{}: {}", type(e).__name__, e)
return False
return self.unpack(data, ctx)
def save(self, filename: str, fp: 'Optional[IO]' = None) -> None:
data = self.pack()
if fp is None:
with open(filename, "w") as fp:
yaml.dump(data, fp)
else:
yaml.dump(data, fp)
|
tvarney/txtrpg
|
rpg/io/configuration.py
|
Python
|
mit
| 15,670
| 0
|
from __future__ import unicode_literals
from django.test import TestCase
from wtl.wtlib.models import Library, LibraryVersion
from wtl.wtlib.tests.factories import (LibraryFactory, LibraryVersionFactory,
ProjectFactory)
class LibraryTestCase(TestCase):
def test_str(self):
x = LibraryFactory()
self.assertEqual(str(x), x.name)
class LibraryVersionTestCase(TestCase):
def test_str(self):
x = LibraryVersionFactory()
self.assertEqual(str(x), x.library.name + ' ' + x.version)
def test_update_totals(self):
l1 = LibraryFactory(name='l1')
l1v1 = LibraryVersionFactory(library=l1, version="1")
l1v2 = LibraryVersionFactory(library=l1, version="2")
l2 = LibraryFactory(name='l2')
l2v1 = LibraryVersionFactory(library=l2, version="1")
l2v2 = LibraryVersionFactory(library=l2, version="2")
p = ProjectFactory()
p.libraries.add(l1v1)
p.libraries.add(l1v2)
p.libraries.add(l2v1)
LibraryVersion.update_totals(project=p)
self.assertEqual(Library.objects.get(id=l1.id).total_users, 2)
self.assertEqual(Library.objects.get(id=l2.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l1v1.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l1v2.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l2v1.id).total_users, 1)
self.assertEqual(LibraryVersion.objects.get(id=l2v2.id).total_users, 0)
def test_often_used_with(self):
lib1 = LibraryFactory()
lib2 = LibraryFactory()
lib3 = LibraryFactory()
lib4 = LibraryFactory()
ver1 = LibraryVersionFactory(library=lib1)
project_1_2 = ProjectFactory()
project_1_2.libraries.add(ver1)
project_1_2.libraries.add(LibraryVersionFactory(library=lib2))
project_1_2__2 = ProjectFactory()
project_1_2__2.libraries.add(ver1)
project_1_2__2.libraries.add(LibraryVersionFactory(library=lib2))
project_1_3 = ProjectFactory()
project_1_3.libraries.add(LibraryVersionFactory(library=lib1))
project_1_3.libraries.add(LibraryVersionFactory(library=lib3))
project_2_3_4 = ProjectFactory()
project_2_3_4.libraries.add(LibraryVersionFactory(library=lib2))
project_2_3_4.libraries.add(LibraryVersionFactory(library=lib3))
project_2_3_4.libraries.add(LibraryVersionFactory(library=lib4))
lib1_result = lib1.often_used_with()
self.assertEqual(lib2.name, lib1_result[0].name)
self.assertEqual(2, lib1_result[0].usage_count)
self.assertEqual(lib3.name, lib1_result[1].name)
self.assertEqual(1, lib1_result[1].usage_count)
class ProjectTestCase(TestCase):
def test_str(self):
x = ProjectFactory()
self.assertEqual(str(x), x.name)
|
elegion/djangodash2013
|
wtl/wtlib/tests/models.py
|
Python
|
mit
| 2,921
| 0
|
import os
from configurations.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "morgoth.settings")
os.environ.setdefault("DJANGO_CONFIGURATION", "Production")
application = DjangoWhiteNoise(get_wsgi_application())
|
rehandalal/morgoth
|
morgoth/wsgi.py
|
Python
|
mpl-2.0
| 296
| 0
|
#----------------------------------------------------------------------------
#
# Copyright (c) 2014, Enthought, Inc.
# All rights reserved.
#
# This software is provided without warranty under the terms of the BSD
# license included in /LICENSE.txt and may be redistributed only
# under the conditions described in the aforementioned license. The license
# is also available online at http://www.enthought.com/licenses/BSD.txt
#
# Thanks for using Enthought open source!
#
#----------------------------------------------------------------------------
from __future__ import unicode_literals
import ast
import traceback
import sys
import inspect
from _ast import ClassDef, Assign
from sphinx.ext.autodoc import ClassLevelDocumenter
from traits.has_traits import MetaHasTraits
from traits.trait_handlers import TraitType
def is_class_trait(name, cls):
""" Check if the name is in the list of class defined traits of ``cls``.
"""
return isinstance(cls, MetaHasTraits) and name in cls.__class_traits__
class TraitDocumenter(ClassLevelDocumenter):
""" Specialized Documenter subclass for trait attributes.
The class defines a new documenter that recovers the trait definition
signature of module level and class level traits.
To use the documenter, append the module path in the extension
attribute of the `conf.py`.
.. warning::
Using the TraitDocumenter in conjunction with TraitsDoc is not
advised.
"""
objtype = 'traitattribute'
directivetype = 'attribute'
member_order = 60
# must be higher than other attribute documenters
priority = 12
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
""" Check that the documented member is a trait instance.
"""
return (
isattr and
issubclass(type(member), TraitType) or
is_class_trait(membername, parent.object))
def document_members(self, all_members=False):
# Trait attributes have no members """
pass
def add_content(self, more_content, no_docstring=False):
# Never try to get a docstring from the trait object.
ClassLevelDocumenter.add_content(
self, more_content, no_docstring=True)
def import_object(self):
""" Get the Trait object.
Notes
-----
Code adapted from autodoc.Documenter.import_object.
"""
try:
__import__(self.modname)
current = self.module = sys.modules[self.modname]
for part in self.objpath[:-1]:
current = self.get_attr(current, part)
name = self.objpath[-1]
self.object_name = name
self.object = None
self.parent = current
return True
# this used to only catch SyntaxError, ImportError and
# AttributeError, but importing modules with side effects can raise
# all kinds of errors.
except Exception as err:
if self.env.app and not self.env.app.quiet:
self.env.app.info(traceback.format_exc().rstrip())
msg = (
'autodoc can\'t import/find {0} {r1}, it reported error: '
'"{2}", please check your spelling and sys.path')
self.directive.warn(msg.format(
self.objtype, str(self.fullname), err))
self.env.note_reread()
return False
def add_directive_header(self, sig):
""" Add the sphinx directives.
Add the 'attribute' directive with the annotation option
set to the trait definition.
"""
ClassLevelDocumenter.add_directive_header(self, sig)
definition = self.get_trait_definition()
self.add_line(
' :annotation: = {0}'.format(definition), '<autodoc>')
def get_trait_definition(self):
""" Retrieve the Trait attribute definition
"""
# Get the class source and tokenize it.
source = inspect.getsource(self.parent)
nodes = ast.parse(source)
for node in ast.iter_child_nodes(nodes):
if isinstance(node, ClassDef):
parent_node = node
break
else:
return ''
for node in ast.iter_child_nodes(parent_node):
if isinstance(node, Assign):
name = node.targets[0]
if name.id == self.object_name:
break
else:
return ''
endlineno = name.lineno
for item in ast.walk(node):
if hasattr(item, 'lineno'):
endlineno = max(endlineno, item.lineno)
definition_lines = [
line.strip()
for line in source.splitlines()[name.lineno-1:endlineno]]
definition = ''.join(definition_lines)
equal = definition.index('=')
return definition[equal + 1:].lstrip()
|
itziakos/trait-documenter
|
trait_documenter/trait_documenter.py
|
Python
|
bsd-3-clause
| 4,946
| 0.000404
|
from scipy.spatial.distance import cdist
import numpy as np
class KNNC1(object):
def fit(self, X, Y):
self.X = X
self.Y = Y
def predict(self, Z):
dists = cdist(self.X, Z, 'correlation')
indices = dists.argmin(axis = 0)
return self.Y[indices]
def predict_proba(self, Z):
predictions = self.predict(Z)
result = np.zeros((Z.shape[0], np.unique(self.Y).size))
result[:,predictions-1] = 1
return result
|
maat25/brain-decoding
|
classifier/knn-correlation.py
|
Python
|
mit
| 427
| 0.04918
|
# coding: utf-8
#
# Copyright 2015 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Domain objects for a collection and its constituents.
Domain objects capture domain-specific logic and are agnostic of how the
objects they represent are stored. All methods and properties in this file
should therefore be independent of the specific storage models used.
"""
__author__ = 'Ben Henning'
import copy
import feconf
import utils
# Do not modify the values of these constants. This is to preserve backwards
# compatibility with previous change dicts.
COLLECTION_NODE_PROPERTY_PREREQUISITE_SKILLS = 'prerequisite_skills'
COLLECTION_NODE_PROPERTY_ACQUIRED_SKILLS = 'acquired_skills'
# This takes an additional 'exploration_id' parameter.
CMD_ADD_COLLECTION_NODE = 'add_collection_node'
# This takes an additional 'exploration_id' parameter.
CMD_DELETE_COLLECTION_NODE = 'delete_collection_node'
# This takes additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_EDIT_COLLECTION_NODE_PROPERTY = 'edit_collection_node_property'
# This takes additional 'property_name' and 'new_value' parameters and,
# optionally, 'old_value'.
CMD_EDIT_COLLECTION_PROPERTY = 'edit_collection_property'
# This takes additional 'from_version' and 'to_version' parameters for logging.
CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION = 'migrate_schema_to_latest_version'
class CollectionChange(object):
"""Domain object class for a change to a collection.
IMPORTANT: Ensure that all changes to this class (and how these cmds are
interpreted in general) preserve backward-compatibility with the
collection snapshots in the datastore. Do not modify the definitions of
cmd keys that already exist.
"""
COLLECTION_NODE_PROPERTIES = (
COLLECTION_NODE_PROPERTY_PREREQUISITE_SKILLS,
COLLECTION_NODE_PROPERTY_ACQUIRED_SKILLS)
COLLECTION_PROPERTIES = ('title', 'category', 'objective')
def __init__(self, change_dict):
"""Initializes an CollectionChange object from a dict.
change_dict represents a command. It should have a 'cmd' key, and one
or more other keys. The keys depend on what the value for 'cmd' is.
The possible values for 'cmd' are listed below, together with the other
keys in the dict:
- 'add_collection_node' (with exploration_id)
- 'delete_collection_node' (with exploration_id)
- 'edit_collection_node_property' (with exploration_id,
property_name, new_value and, optionally, old_value)
- 'edit_collection_property' (with property_name, new_value and,
optionally, old_value)
- 'migrate_schema' (with from_version and to_version)
For a collection node, property_name must be one of
COLLECTION_NODE_PROPERTIES. For a collection, property_name must be
one of COLLECTION_PROPERTIES.
"""
if 'cmd' not in change_dict:
raise Exception('Invalid change_dict: %s' % change_dict)
self.cmd = change_dict['cmd']
if self.cmd == CMD_ADD_COLLECTION_NODE:
self.exploration_id = change_dict['exploration_id']
elif self.cmd == CMD_DELETE_COLLECTION_NODE:
self.exploration_id = change_dict['exploration_id']
elif self.cmd == CMD_EDIT_COLLECTION_NODE_PROPERTY:
if (change_dict['property_name'] not in
self.COLLECTION_NODE_PROPERTIES):
raise Exception('Invalid change_dict: %s' % change_dict)
self.exploration_id = change_dict['exploration_id']
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
elif self.cmd == CMD_EDIT_COLLECTION_PROPERTY:
if (change_dict['property_name'] not in
self.COLLECTION_PROPERTIES):
raise Exception('Invalid change_dict: %s' % change_dict)
self.property_name = change_dict['property_name']
self.new_value = change_dict['new_value']
self.old_value = change_dict.get('old_value')
elif self.cmd == CMD_MIGRATE_SCHEMA_TO_LATEST_VERSION:
self.from_version = change_dict['from_version']
self.to_version = change_dict['to_version']
else:
raise Exception('Invalid change_dict: %s' % change_dict)
class CollectionCommitLogEntry(object):
"""Value object representing a commit to an collection."""
def __init__(
self, created_on, last_updated, user_id, username, collection_id,
commit_type, commit_message, commit_cmds, version,
post_commit_status, post_commit_community_owned,
post_commit_is_private):
self.created_on = created_on
self.last_updated = last_updated
self.user_id = user_id
self.username = username
self.collection_id = collection_id
self.commit_type = commit_type
self.commit_message = commit_message
self.commit_cmds = commit_cmds
self.version = version
self.post_commit_status = post_commit_status
self.post_commit_community_owned = post_commit_community_owned
self.post_commit_is_private = post_commit_is_private
def to_dict(self):
"""This omits created_on, user_id and (for now) commit_cmds."""
return {
'last_updated': utils.get_time_in_millisecs(self.last_updated),
'username': self.username,
'collection_id': self.collection_id,
'commit_type': self.commit_type,
'commit_message': self.commit_message,
'version': self.version,
'post_commit_status': self.post_commit_status,
'post_commit_community_owned': self.post_commit_community_owned,
'post_commit_is_private': self.post_commit_is_private,
}
class CollectionNode(object):
"""Domain object describing a node in the exploration graph of a
collection. The node contains various information, including a reference to
an exploration (its ID), prerequisite skills in order to be qualified to
play the exploration, and acquired skills attained once the exploration is
completed.
"""
"""Constructs a new CollectionNode object.
Args:
exploration_id: A valid ID of an exploration referenced by this node.
prerequisite_skills: A list of skills (strings).
acquired_skills: A list of skills (strings).
"""
def __init__(self, exploration_id, prerequisite_skills, acquired_skills):
self.exploration_id = exploration_id
self.prerequisite_skills = prerequisite_skills
self.acquired_skills = acquired_skills
def to_dict(self):
return {
'exploration_id': self.exploration_id,
'prerequisite_skills': self.prerequisite_skills,
'acquired_skills': self.acquired_skills
}
@classmethod
def from_dict(cls, node_dict):
return cls(
copy.deepcopy(node_dict['exploration_id']),
copy.deepcopy(node_dict['prerequisite_skills']),
copy.deepcopy(node_dict['acquired_skills']))
@property
def skills(self):
"""Returns a set of skills where each prerequisite and acquired skill
in this collection node is represented at most once.
"""
return set(self.prerequisite_skills) | set(self.acquired_skills)
def update_prerequisite_skills(self, prerequisite_skills):
self.prerequisite_skills = copy.deepcopy(prerequisite_skills)
def update_acquired_skills(self, acquired_skills):
self.acquired_skills = copy.deepcopy(acquired_skills)
def validate(self):
"""Validates various properties of the collection node."""
if not isinstance(self.exploration_id, basestring):
raise utils.ValidationError(
'Expected exploration ID to be a string, received %s' %
self.exploration_id)
if not isinstance(self.prerequisite_skills, list):
raise utils.ValidationError(
'Expected prerequisite_skills to be a list, received %s' %
self.prerequisite_skills)
if len(set(self.prerequisite_skills)) != len(self.prerequisite_skills):
raise utils.ValidationError(
'The prerequisite_skills list has duplicate entries: %s' %
self.prerequisite_skills)
for prerequisite_skill in self.prerequisite_skills:
if not isinstance(prerequisite_skill, basestring):
raise utils.ValidationError(
'Expected all prerequisite skills to be strings, '
'received %s' % prerequisite_skill)
if not isinstance(self.acquired_skills, list):
raise utils.ValidationError(
'Expected acquired_skills to be a list, received %s' %
self.acquired_skills)
if len(set(self.acquired_skills)) != len(self.acquired_skills):
raise utils.ValidationError(
'The acquired_skills list has duplicate entries: %s' %
self.acquired_skills)
for acquired_skill in self.acquired_skills:
if not isinstance(acquired_skill, basestring):
raise utils.ValidationError(
'Expected all acquired skills to be strings, received %s' %
acquired_skill)
redundant_skills = (
set(self.prerequisite_skills) & set(self.acquired_skills))
if redundant_skills:
raise utils.ValidationError(
'There are some skills which are both required for '
'exploration %s and acquired after playing it: %s' %
(self.exploration_id, redundant_skills))
@classmethod
def create_default_node(cls, exploration_id):
return cls(exploration_id, [], [])
class Collection(object):
"""Domain object for an Oppia collection."""
"""Constructs a new collection given all the information necessary to
represent a collection.
Note: The schema_version represents the version of any underlying
dictionary or list structures stored within the collection. In particular,
the schema for CollectionNodes is represented by this version. If the
schema for CollectionNode changes, then a migration function will need to
be added to this class to convert from the current schema version to the
new one. This function should be called in both from_yaml in this class and
collection_services._migrate_collection_to_latest_schema.
feconf.CURRENT_COLLECTION_SCHEMA_VERSION should be incremented and the new
value should be saved in the collection after the migration process,
ensuring it represents the latest schema version.
"""
def __init__(self, collection_id, title, category, objective,
schema_version, nodes, version, created_on=None,
last_updated=None):
self.id = collection_id
self.title = title
self.category = category
self.objective = objective
self.schema_version = schema_version
self.nodes = nodes
self.version = version
self.created_on = created_on
self.last_updated = last_updated
def to_dict(self):
return {
'id': self.id,
'title': self.title,
'category': self.category,
'objective': self.objective,
'schema_version': self.schema_version,
'nodes': [
node.to_dict() for node in self.nodes
]
}
@classmethod
def create_default_collection(
cls, collection_id, title, category, objective):
return cls(
collection_id, title, category, objective,
feconf.CURRENT_COLLECTION_SCHEMA_VERSION, [], 0)
@classmethod
def from_dict(
cls, collection_dict, collection_version=0,
collection_created_on=None, collection_last_updated=None):
collection = cls(
collection_dict['id'], collection_dict['title'],
collection_dict['category'], collection_dict['objective'],
collection_dict['schema_version'], [], collection_version,
collection_created_on, collection_last_updated)
for node_dict in collection_dict['nodes']:
collection.nodes.append(
CollectionNode.from_dict(node_dict))
return collection
def to_yaml(self):
collection_dict = self.to_dict()
# The ID is the only property which should not be stored within the
# YAML representation.
del collection_dict['id']
return utils.yaml_from_dict(collection_dict)
@classmethod
def from_yaml(cls, collection_id, yaml_content):
try:
collection_dict = utils.dict_from_yaml(yaml_content)
except Exception as e:
raise Exception(
'Please ensure that you are uploading a YAML text file, not '
'a zip file. The YAML parser returned the following error: %s'
% e)
collection_dict['id'] = collection_id
return Collection.from_dict(collection_dict)
@property
def skills(self):
"""The skills of a collection are made up of all prerequisite and
acquired skills of each exploration that is part of this collection.
This returns a sorted list of all the skills of the collection.
"""
unique_skills = set()
for node in self.nodes:
unique_skills.update(node.skills)
return sorted(unique_skills)
@property
def exploration_ids(self):
"""Returns a list of all the exploration IDs that are part of this
collection.
"""
return [
node.exploration_id for node in self.nodes]
@property
def init_exploration_ids(self):
"""Returns a list of exploration IDs that are starting points for this
collection (ie, they require no prior skills to complete). The order
of these IDs is given by the order each respective exploration was
added to the collection.
"""
init_exp_ids = []
for node in self.nodes:
if not node.prerequisite_skills:
init_exp_ids.append(node.exploration_id)
return init_exp_ids
def get_next_exploration_ids(self, completed_exploration_ids):
"""Returns a list of exploration IDs for which the prerequisite skills
are satisfied. These are the next explorations to complete for a user.
If the list returned is empty and the collection is valid, then all
skills have been acquired and the collection is completed. If the input
list is empty, then only explorations with no prerequisite skills are
returned. The order of the exploration IDs is given by the order in
which each exploration was added to the collection.
"""
acquired_skills = set()
for completed_exp_id in completed_exploration_ids:
acquired_skills.update(
self.get_node(completed_exp_id).acquired_skills)
next_exp_ids = []
for node in self.nodes:
if node.exploration_id in completed_exploration_ids:
continue
prereq_skills = set(node.prerequisite_skills)
if prereq_skills <= acquired_skills:
next_exp_ids.append(node.exploration_id)
return next_exp_ids
@classmethod
def is_demo_collection_id(cls, collection_id):
"""Whether the collection id is that of a demo collection."""
return collection_id in feconf.DEMO_COLLECTIONS
@property
def is_demo(self):
"""Whether the collection is one of the demo collections."""
return self.is_demo_collection_id(self.id)
def update_title(self, title):
self.title = title
def update_category(self, category):
self.category = category
def update_objective(self, objective):
self.objective = objective
def _find_node(self, exploration_id):
for i in range(len(self.nodes)):
if self.nodes[i].exploration_id == exploration_id:
return i
return None
def get_node(self, exploration_id):
"""Retrieves a collection node from the collection based on an
exploration ID.
"""
for node in self.nodes:
if node.exploration_id == exploration_id:
return node
return None
def add_node(self, exploration_id):
if self.get_node(exploration_id) is not None:
raise ValueError(
'Exploration is already part of this collection: %s' %
exploration_id)
self.nodes.append(CollectionNode.create_default_node(exploration_id))
def delete_node(self, exploration_id):
node_index = self._find_node(exploration_id)
if node_index is None:
raise ValueError(
'Exploration is not part of this collection: %s' %
exploration_id)
del self.nodes[node_index]
def validate(self, strict=True):
"""Validates all properties of this collection and its constituents."""
if not isinstance(self.title, basestring):
raise utils.ValidationError(
'Expected title to be a string, received %s' % self.title)
utils.require_valid_name(self.title, 'the collection title')
if not isinstance(self.category, basestring):
raise utils.ValidationError(
'Expected category to be a string, received %s'
% self.category)
utils.require_valid_name(self.category, 'the collection category')
if not isinstance(self.objective, basestring):
raise utils.ValidationError(
'Expected objective to be a string, received %s' %
self.objective)
if not self.objective:
raise utils.ValidationError(
'An objective must be specified (in the \'Settings\' tab).')
if not isinstance(self.schema_version, int):
raise utils.ValidationError(
'Expected schema version to be an integer, received %s' %
self.schema_version)
if self.schema_version != feconf.CURRENT_COLLECTION_SCHEMA_VERSION:
raise utils.ValidationError(
'Expected schema version to be %s, received %s' % (
feconf.CURRENT_COLLECTION_SCHEMA_VERSION,
self.schema_version))
if not isinstance(self.nodes, list):
raise utils.ValidationError(
'Expected nodes to be a list, received %s' % self.nodes)
all_exp_ids = self.exploration_ids
if len(set(all_exp_ids)) != len(all_exp_ids):
raise utils.ValidationError(
'There are explorations referenced in the collection more '
'than once.')
# Validate all collection nodes.
for node in self.nodes:
node.validate()
if strict:
if not self.nodes:
raise utils.ValidationError(
'Expected to have at least 1 exploration in the '
'collection.')
# Ensure the collection may be started.
if not self.init_exploration_ids:
raise utils.ValidationError(
'Expected to have at least 1 exploration with no '
'prerequisite skills.')
# Ensure the collection can be completed. This is done in two
# steps: first, no exploration may grant a skill that it
# simultaneously lists as a prerequisite. Second, every exploration
# in the collection must be reachable when starting from the
# explorations with no prerequisite skills and playing through all
# subsequent explorations provided by get_next_exploration_ids.
completed_exp_ids = set(self.init_exploration_ids)
next_exp_ids = self.get_next_exploration_ids(
list(completed_exp_ids))
while next_exp_ids:
completed_exp_ids.update(set(next_exp_ids))
next_exp_ids = self.get_next_exploration_ids(
list(completed_exp_ids))
if len(completed_exp_ids) != len(self.nodes):
unreachable_ids = set(all_exp_ids) - completed_exp_ids
raise utils.ValidationError(
'Some explorations are unreachable from the initial '
'explorations: %s' % unreachable_ids)
class CollectionSummary(object):
"""Domain object for an Oppia collection summary."""
def __init__(self, collection_id, title, category, objective,
status, community_owned, owner_ids, editor_ids,
viewer_ids, version, collection_model_created_on,
collection_model_last_updated):
self.id = collection_id
self.title = title
self.category = category
self.objective = objective
self.status = status
self.community_owned = community_owned
self.owner_ids = owner_ids
self.editor_ids = editor_ids
self.viewer_ids = viewer_ids
self.version = version
self.collection_model_created_on = collection_model_created_on
self.collection_model_last_updated = collection_model_last_updated
def to_dict(self):
return {
'id': self.id,
'title': self.title,
'category': self.category,
'objective': self.objective,
'status': self.status,
'community_owned': self.community_owned,
'owner_ids': self.owner_ids,
'editor_ids': self.editor_ids,
'viewer_ids': self.viewer_ids,
'version': self.version,
'collection_model_created_on': self.collection_model_created_on,
'collection_model_last_updated': self.collection_model_last_updated
}
|
won0089/oppia
|
core/domain/collection_domain.py
|
Python
|
apache-2.0
| 22,777
| 0
|
__author__ = 'aakilomar'
import requests, json, time
requests.packages.urllib3.disable_warnings()
host = "https://localhost:8443"
#from rest_requests import add_user
def add_user(phone):
post_url = host + "/api/user/add/" + str(phone)
return requests.post(post_url,None, verify=False).json()
def add_group(userid,phonenumbers):
post_url = host + "/api/group/add/" + str(userid) + "/" + phonenumbers
return requests.post(post_url,None, verify=False).json()
#/add/{userId}/{groupId}/{issue}
def add_vote(userid,groupid,issue):
post_url = host + "/api/vote/add/" + str(userid) + "/" + str(groupid) + "/" + issue
return requests.post(post_url,None, verify=False).json()
def vote_list():
list_url = host + "/api/vote/listallfuture"
r = requests.get(list_url)
print r.json
print r.text
def set_event_time(eventid,time):
post_url = host + "/api/event/settime/" + str(eventid) + "/" + time
return requests.post(post_url,None, verify=False).json()
def rsvp(eventid,userid,message):
post_url = host + "/api/event/rsvp/" + str(eventid) + "/" + str(userid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
def add_user_to_group(userid,groupid):
post_url = host + "/api/group/add/usertogroup/" + str(userid) + "/" + str(groupid)
return requests.post(post_url,None, verify=False).json()
def manualreminder(eventid,message):
post_url = host + "/api/event/manualreminder/" + str(eventid) + "/" + str(message)
return requests.post(post_url,None, verify=False).json()
user = add_user("0826607134")
group = add_group(user['id'],"0821111111")
user2 = add_user("0821111112")
group = add_user_to_group(user2['id'],group['id'])
print user
print group
issue = add_vote(user['id'], group['id'],"test vote")
print issue
#future_votes = vote_list()
#print future_votes
issue = set_event_time(issue['id'],"30th 7pm")
r = rsvp(issue['id'],user['id'],"yes")
r2 = rsvp(issue['id'],user2['id'],"no")
r = rsvp(issue['id'],user['id'],"yes")
ok = manualreminder(issue['id'],"|") # should use reminder mesage
ok = manualreminder(issue['id'],"my manual messsage")
|
PaballoDitshego/grassroot-platform
|
docs/tests/vote_requests.py
|
Python
|
bsd-3-clause
| 2,148
| 0.021881
|
# Copyright (C) 2011-2012 Patrick Totzke <patricktotzke@gmail.com>
# This file is released under the GNU GPL, version 3 or a later revision.
# For further details see the COPYING file
import glob
import logging
import os
import re
import email
import email.policy
from email.encoders import encode_7or8bit
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.mime.application import MIMEApplication
import email.charset as charset
import gpg
from .attachment import Attachment
from .. import __version__
from .. import helper
from .. import crypto
from ..settings.const import settings
from ..errors import GPGProblem, GPGCode
charset.add_charset('utf-8', charset.QP, charset.QP, 'utf-8')
class Envelope(object):
"""a message that is not yet sent and still editable.
It holds references to unencoded! body text and mail headers among other
things. Envelope implements the python container API for easy access of
header values. So `e['To']`, `e['To'] = 'foo@bar.baz'` and
'e.get_all('To')' would work for an envelope `e`..
"""
headers = None
"""
dict containing the mail headers (a list of strings for each header key)
"""
body = None
"""mail body as unicode string"""
tmpfile = None
"""template text for initial content"""
attachments = None
"""list of :class:`Attachments <alot.db.attachment.Attachment>`"""
tags = []
"""tags to add after successful sendout"""
def __init__(
self, template=None, bodytext=None, headers=None, attachments=None,
sign=False, sign_key=None, encrypt=False, tags=None, replied=None,
passed=None):
"""
:param template: if not None, the envelope will be initialised by
:meth:`parsing <parse_template>` this string before
setting any other values given to this constructor.
:type template: str
:param bodytext: text used as body part
:type bodytext: str
:param headers: unencoded header values
:type headers: dict (str -> [unicode])
:param attachments: file attachments to include
:type attachments: list of :class:`~alot.db.attachment.Attachment`
:param tags: tags to add after successful sendout and saving this msg
:type tags: list of str
:param replied: message being replied to
:type replied: :class:`~alot.db.message.Message`
:param passed: message being passed on
:type replied: :class:`~alot.db.message.Message`
"""
logging.debug('TEMPLATE: %s', template)
if template:
self.parse_template(template)
logging.debug('PARSED TEMPLATE: %s', template)
logging.debug('BODY: %s', self.body)
self.body = bodytext or u''
# TODO: if this was as collections.defaultdict a number of methods
# could be simplified.
self.headers = headers or {}
self.attachments = list(attachments) if attachments is not None else []
self.sign = sign
self.sign_key = sign_key
self.encrypt = encrypt
self.encrypt_keys = {}
self.tags = tags or [] # tags to add after successful sendout
self.replied = replied # message being replied to
self.passed = passed # message being passed on
self.sent_time = None
self.modified_since_sent = False
self.sending = False # semaphore to avoid accidental double sendout
def __str__(self):
return "Envelope (%s)\n%s" % (self.headers, self.body)
def __setitem__(self, name, val):
"""setter for header values. This allows adding header like so:
envelope['Subject'] = u'sm\xf8rebr\xf8d'
"""
if name not in self.headers:
self.headers[name] = []
self.headers[name].append(val)
if self.sent_time:
self.modified_since_sent = True
def __getitem__(self, name):
"""getter for header values.
:raises: KeyError if undefined
"""
return self.headers[name][0]
def __delitem__(self, name):
del self.headers[name]
if self.sent_time:
self.modified_since_sent = True
def __contains__(self, name):
return name in self.headers
def get(self, key, fallback=None):
"""secure getter for header values that allows specifying a `fallback`
return string (defaults to None). This returns the first matching value
and doesn't raise KeyErrors"""
if key in self.headers:
value = self.headers[key][0]
else:
value = fallback
return value
def get_all(self, key, fallback=None):
"""returns all header values for given key"""
if key in self.headers:
value = self.headers[key]
else:
value = fallback or []
return value
def add(self, key, value):
"""add header value"""
if key not in self.headers:
self.headers[key] = []
self.headers[key].append(value)
if self.sent_time:
self.modified_since_sent = True
def attach(self, attachment, filename=None, ctype=None):
"""
attach a file
:param attachment: File to attach, given as
:class:`~alot.db.attachment.Attachment` object or path to a file.
:type attachment: :class:`~alot.db.attachment.Attachment` or str
:param filename: filename to use in content-disposition.
Will be ignored if `path` matches multiple files
:param ctype: force content-type to be used for this attachment
:type ctype: str
"""
if isinstance(attachment, Attachment):
self.attachments.append(attachment)
elif isinstance(attachment, str):
path = os.path.expanduser(attachment)
part = helper.mimewrap(path, filename, ctype)
self.attachments.append(Attachment(part))
else:
raise TypeError('attach accepts an Attachment or str')
if self.sent_time:
self.modified_since_sent = True
def construct_mail(self):
"""
compiles the information contained in this envelope into a
:class:`email.Message`.
"""
# Build body text part. To properly sign/encrypt messages later on, we
# convert the text to its canonical format (as per RFC 2015).
canonical_format = self.body.encode('utf-8')
textpart = MIMEText(canonical_format, 'plain', 'utf-8')
# wrap it in a multipart container if necessary
if self.attachments:
inner_msg = MIMEMultipart()
inner_msg.attach(textpart)
# add attachments
for a in self.attachments:
inner_msg.attach(a.get_mime_representation())
else:
inner_msg = textpart
if self.sign:
plaintext = inner_msg.as_bytes(policy=email.policy.SMTP)
logging.debug('signing plaintext: %s', plaintext)
try:
signatures, signature_str = crypto.detached_signature_for(
plaintext, [self.sign_key])
if len(signatures) != 1:
raise GPGProblem("Could not sign message (GPGME "
"did not return a signature)",
code=GPGCode.KEY_CANNOT_SIGN)
except gpg.errors.GPGMEError as e:
if e.getcode() == gpg.errors.BAD_PASSPHRASE:
# If GPG_AGENT_INFO is unset or empty, the user just does
# not have gpg-agent running (properly).
if os.environ.get('GPG_AGENT_INFO', '').strip() == '':
msg = "Got invalid passphrase and GPG_AGENT_INFO\
not set. Please set up gpg-agent."
raise GPGProblem(msg, code=GPGCode.BAD_PASSPHRASE)
else:
raise GPGProblem("Bad passphrase. Is gpg-agent "
"running?",
code=GPGCode.BAD_PASSPHRASE)
raise GPGProblem(str(e), code=GPGCode.KEY_CANNOT_SIGN)
micalg = crypto.RFC3156_micalg_from_algo(signatures[0].hash_algo)
unencrypted_msg = MIMEMultipart(
'signed', micalg=micalg, protocol='application/pgp-signature')
# wrap signature in MIMEcontainter
stype = 'pgp-signature; name="signature.asc"'
signature_mime = MIMEApplication(
_data=signature_str.decode('ascii'),
_subtype=stype,
_encoder=encode_7or8bit)
signature_mime['Content-Description'] = 'signature'
signature_mime.set_charset('us-ascii')
# add signed message and signature to outer message
unencrypted_msg.attach(inner_msg)
unencrypted_msg.attach(signature_mime)
unencrypted_msg['Content-Disposition'] = 'inline'
else:
unencrypted_msg = inner_msg
if self.encrypt:
plaintext = unencrypted_msg.as_bytes(policy=email.policy.SMTP)
logging.debug('encrypting plaintext: %s', plaintext)
try:
encrypted_str = crypto.encrypt(
plaintext, list(self.encrypt_keys.values()))
except gpg.errors.GPGMEError as e:
raise GPGProblem(str(e), code=GPGCode.KEY_CANNOT_ENCRYPT)
outer_msg = MIMEMultipart('encrypted',
protocol='application/pgp-encrypted')
version_str = 'Version: 1'
encryption_mime = MIMEApplication(_data=version_str,
_subtype='pgp-encrypted',
_encoder=encode_7or8bit)
encryption_mime.set_charset('us-ascii')
encrypted_mime = MIMEApplication(
_data=encrypted_str.decode('ascii'),
_subtype='octet-stream',
_encoder=encode_7or8bit)
encrypted_mime.set_charset('us-ascii')
outer_msg.attach(encryption_mime)
outer_msg.attach(encrypted_mime)
else:
outer_msg = unencrypted_msg
headers = self.headers.copy()
# add Message-ID
if 'Message-ID' not in headers:
headers['Message-ID'] = [email.utils.make_msgid()]
if 'User-Agent' in headers:
uastring_format = headers['User-Agent'][0]
else:
uastring_format = settings.get('user_agent').strip()
uastring = uastring_format.format(version=__version__)
if uastring:
headers['User-Agent'] = [uastring]
# copy headers from envelope to mail
for k, vlist in headers.items():
for v in vlist:
outer_msg.add_header(k, v)
return outer_msg
def parse_template(self, tmp, reset=False, only_body=False):
"""parses a template or user edited string to fills this envelope.
:param tmp: the string to parse.
:type tmp: str
:param reset: remove previous envelope content
:type reset: bool
"""
logging.debug('GoT: """\n%s\n"""', tmp)
if self.sent_time:
self.modified_since_sent = True
if only_body:
self.body = tmp
else:
m = re.match(r'(?P<h>([a-zA-Z0-9_-]+:.+\n)*)\n?(?P<b>(\s*.*)*)',
tmp)
assert m
d = m.groupdict()
headertext = d['h']
self.body = d['b']
# remove existing content
if reset:
self.headers = {}
# go through multiline, utf-8 encoded headers
# we decode the edited text ourselves here as
# email.message_from_file can't deal with raw utf8 header values
key = value = None
for line in headertext.splitlines():
if re.match('[a-zA-Z0-9_-]+:', line): # new k/v pair
if key and value: # save old one from stack
self.add(key, value) # save
key, value = line.strip().split(':', 1) # parse new pair
# strip spaces, otherwise we end up having " foo" as value
# of "Subject: foo"
value = value.strip()
elif key and value: # append new line without key prefix
value += line
if key and value: # save last one if present
self.add(key, value)
# interpret 'Attach' pseudo header
if 'Attach' in self:
to_attach = []
for line in self.get_all('Attach'):
gpath = os.path.expanduser(line.strip())
to_attach += [g for g in glob.glob(gpath)
if os.path.isfile(g)]
logging.debug('Attaching: %s', to_attach)
for path in to_attach:
self.attach(path)
del self['Attach']
|
lucc/alot
|
alot/db/envelope.py
|
Python
|
gpl-3.0
| 13,285
| 0
|
# Copyright 2017 QuantRocket - All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import getpass
from quantrocket.houston import houston
from quantrocket.cli.utils.output import json_to_cli
def get_credentials(gateway):
"""
Returns username and trading mode (paper/live) for IB Gateway.
Parameters
----------
gateway : str, required
name of IB Gateway service to get credentials for (for example, 'ibg1')
Returns
-------
dict
credentials
"""
statuses = list_gateway_statuses(gateways=[gateway])
if not statuses:
raise ValueError("no such IB Gateway: {0}".format(gateway))
response = houston.get("/{0}/credentials".format(gateway))
houston.raise_for_status_with_json(response)
# It's possible to get a 204 empty response
if not response.content:
return {}
return response.json()
def set_credentials(gateway, username=None, password=None, trading_mode=None):
"""
Set username/password and trading mode (paper/live) for IB Gateway.
Can be used to set new credentials or switch between paper and live trading
(must have previously entered live credentials). Setting new credentials will
restart IB Gateway and takes a moment to complete.
Credentials are encrypted at rest and never leave your deployment.
Parameters
----------
gateway : str, required
name of IB Gateway service to set credentials for (for example, 'ibg1')
username : str, optional
IBKR username (optional if only modifying trading environment)
password : str, optional
IBKR password (if omitted and username is provided, will be prompted
for password)
trading_mode : str, optional
the trading mode to use ('paper' or 'live')
Returns
-------
dict
status message
"""
statuses = list_gateway_statuses(gateways=[gateway])
if not statuses:
raise ValueError("no such IB Gateway: {0}".format(gateway))
if username and not password:
password = getpass.getpass(prompt="Enter IBKR Password: ")
data = {}
if username:
data["username"] = username
if password:
data["password"] = password
if trading_mode:
data["trading_mode"] = trading_mode
response = houston.put("/{0}/credentials".format(gateway), data=data, timeout=180)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_get_or_set_credentials(*args, **kwargs):
if kwargs.get("username", None) or kwargs.get("password", None) or kwargs.get("trading_mode", None):
return json_to_cli(set_credentials, *args, **kwargs)
else:
return json_to_cli(get_credentials, gateway=kwargs.get("gateway", None))
def list_gateway_statuses(status=None, gateways=None):
"""
Query statuses of IB Gateways.
Parameters
----------
status : str, optional
limit to IB Gateways in this status. Possible choices: running, stopped, error
gateways : list of str, optional
limit to these IB Gateways
Returns
-------
dict of gateway:status (if status arg not provided), or list of gateways (if status arg provided)
"""
params = {}
if gateways:
params["gateways"] = gateways
if status:
params["status"] = status
response = houston.get("/ibgrouter/gateways", params=params)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_list_gateway_statuses(*args, **kwargs):
return json_to_cli(list_gateway_statuses, *args, **kwargs)
def start_gateways(gateways=None, wait=False):
"""
Start one or more IB Gateways.
Parameters
----------
gateways : list of str, optional
limit to these IB Gateways
wait: bool
wait for the IB Gateway to start before returning (default is to start
the gateways asynchronously)
Returns
-------
dict
status message
"""
params = {"wait": wait}
if gateways:
params["gateways"] = gateways
response = houston.post("/ibgrouter/gateways", params=params, timeout=120)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_start_gateways(*args, **kwargs):
return json_to_cli(start_gateways, *args, **kwargs)
def stop_gateways(gateways=None, wait=False):
"""
Stop one or more IB Gateways.
Parameters
----------
gateways : list of str, optional
limit to these IB Gateways
wait: bool
wait for the IB Gateway to stop before returning (default is to stop
the gateways asynchronously)
Returns
-------
dict
status message
"""
params = {"wait": wait}
if gateways:
params["gateways"] = gateways
response = houston.delete("/ibgrouter/gateways", params=params, timeout=60)
houston.raise_for_status_with_json(response)
return response.json()
def _cli_stop_gateways(*args, **kwargs):
return json_to_cli(stop_gateways, *args, **kwargs)
def load_ibg_config(filename):
"""
Upload a new IB Gateway permissions config.
Permission configs are only necessary when running multiple IB Gateways with
differing market data permissions.
Parameters
----------
filename : str, required
the config file to upload
Returns
-------
dict
status message
"""
with open(filename) as file:
response = houston.put("/ibgrouter/config", data=file.read())
houston.raise_for_status_with_json(response)
return response.json()
def get_ibg_config():
"""
Returns the current IB Gateway permissions config.
Returns
-------
dict
the config as a dict
"""
response = houston.get("/ibgrouter/config")
houston.raise_for_status_with_json(response)
# It's possible to get a 204 empty response
if not response.content:
return {}
return response.json()
def _cli_load_or_show_config(filename=None):
if filename:
return json_to_cli(load_ibg_config, filename)
else:
return json_to_cli(get_ibg_config)
|
quantrocket-llc/quantrocket-client
|
quantrocket/ibg.py
|
Python
|
apache-2.0
| 6,653
| 0.002856
|
# coding=utf-8
# Author: Nic Wolfe <nic@wolfeden.ca>
# URL: https://sickrage.github.io
#
# This file is part of SickRage.
#
# SickRage is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# SickRage is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with SickRage. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function, unicode_literals
from sickbeard import helpers, logger
meta_session = helpers.make_session()
def getShowImage(url, imgNum=None):
if url is None:
return None
# if they provided a fanart number try to use it instead
if imgNum is not None:
tempURL = url.split('-')[0] + "-" + str(imgNum) + ".jpg"
else:
tempURL = url
logger.log("Fetching image from " + tempURL, logger.DEBUG)
image_data = helpers.getURL(tempURL, session=meta_session, returns='content')
if image_data is None:
logger.log("There was an error trying to retrieve the image, aborting", logger.WARNING)
return
return image_data
|
b0ttl3z/SickRage
|
sickbeard/metadata/helpers.py
|
Python
|
gpl-3.0
| 1,452
| 0.001377
|
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
class PyAzureMgmtDeploymentmanager(PythonPackage):
"""Microsoft Azure Deployment Manager Client Library for Python."""
homepage = "https://github.com/Azure/azure-sdk-for-python"
pypi = "azure-mgmt-deploymentmanager/azure-mgmt-deploymentmanager-0.2.0.zip"
version('0.2.0', sha256='46e342227993fc9acab1dda42f2eb566b522a8c945ab9d0eea56276b46f6d730')
depends_on('py-setuptools', type='build')
depends_on('py-msrest@0.5.0:', type=('build', 'run'))
depends_on('py-msrestazure@0.4.32:1', type=('build', 'run'))
depends_on('py-azure-common@1.1:1', type=('build', 'run'))
depends_on('py-azure-mgmt-nspkg', when='^python@:2', type=('build', 'run'))
|
LLNL/spack
|
var/spack/repos/builtin/packages/py-azure-mgmt-deploymentmanager/package.py
|
Python
|
lgpl-2.1
| 877
| 0.002281
|
# -*- coding: utf-8 -*-
#
# Copyright (C) 2008-2015 Sébastien Helleu <flashcode@flashtux.org>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""
Documentation generator for WeeChat: build include files with commands,
options, infos, infolists, hdata and completions for WeeChat core and
plugins.
Instructions to build config files yourself in WeeChat directories (replace
all paths with your path to WeeChat):
1. run WeeChat and load this script, with following command:
/python load ~/src/weechat/doc/docgen.py
2. change path to build in your doc/ directory:
/set plugins.var.python.docgen.path "~/src/weechat/doc"
3. run docgen command:
/docgen
Note: it is recommended to load only this script when building doc.
Files should be in ~/src/weechat/doc/xx/autogen/ (where xx is language).
"""
from __future__ import print_function
SCRIPT_NAME = 'docgen'
SCRIPT_AUTHOR = 'Sébastien Helleu <flashcode@flashtux.org>'
SCRIPT_VERSION = '0.1'
SCRIPT_LICENSE = 'GPL3'
SCRIPT_DESC = 'Documentation generator for WeeChat'
SCRIPT_COMMAND = 'docgen'
IMPORT_OK = True
# pylint: disable=wrong-import-position
try:
import gettext
import hashlib
import os
import re
from collections import defaultdict
from operator import itemgetter
except ImportError as message:
print('Missing package(s) for {0}: {1}'.format(SCRIPT_NAME, message))
IMPORT_OK = False
try:
import weechat # pylint: disable=import-error
except ImportError:
print('This script must be run under WeeChat.')
print('Get WeeChat now at: https://weechat.org/')
IMPORT_OK = False
# default path where doc files will be written (should be doc/ in sources
# package tree)
# path must have subdirectories with languages and autogen directory:
# path
# |-- en
# | |-- autogen
# |-- fr
# | |-- autogen
# ...
DEFAULT_PATH = '~/src/weechat/doc'
# list of locales for which we want to build doc files to include
LOCALE_LIST = ('en_US', 'fr_FR', 'it_IT', 'de_DE', 'ja_JP', 'pl_PL')
# all commands/options/.. of following plugins will produce a file
# non-listed plugins will be ignored
# value: "c" = plugin may have many commands
# "o" = write config options for plugin
# if plugin is listed without "c", that means plugin has only one command
# /name (where "name" is name of plugin)
# Note: we consider core is a plugin called "weechat"
PLUGIN_LIST = {
'sec': 'o',
'weechat': 'co',
'alias': '',
'aspell': 'o',
'charset': 'o',
'exec': 'o',
'fifo': 'o',
'irc': 'co',
'logger': 'o',
'relay': 'o',
'script': 'o',
'perl': '',
'python': '',
'javascript': '',
'ruby': '',
'lua': '',
'tcl': '',
'guile': '',
'trigger': 'o',
'xfer': 'co',
}
# options to ignore
IGNORE_OPTIONS = (
r'aspell\.dict\..*',
r'aspell\.option\..*',
r'charset\.decode\..*',
r'charset\.encode\..*',
r'irc\.msgbuffer\..*',
r'irc\.ctcp\..*',
r'irc\.ignore\..*',
r'irc\.server\..*',
r'jabber\.server\..*',
r'logger\.level\..*',
r'logger\.mask\..*',
r'relay\.port\..*',
r'trigger\.trigger\..*',
r'weechat\.palette\..*',
r'weechat\.proxy\..*',
r'weechat\.bar\..*',
r'weechat\.debug\..*',
r'weechat\.notify\..*',
)
# completions to ignore
IGNORE_COMPLETIONS_ITEMS = (
'docgen.*',
'jabber.*',
'weeget.*',
)
class AutogenDoc(object):
"""A class to write auto-generated doc files."""
def __init__(self, directory, doc, name):
"""Initialize auto-generated doc file."""
self.filename = os.path.join(directory, doc, name + '.asciidoc')
self.filename_tmp = self.filename + '.tmp'
self._file = open(self.filename_tmp, 'w')
self.write('//\n')
self.write('// This file is auto-generated by script docgen.py.\n')
self.write('// DO NOT EDIT BY HAND!\n')
self.write('//\n')
def write(self, string):
"""Write a line in auto-generated doc file."""
self._file.write(string)
def update(self, obj_name, num_files, num_files_updated):
"""Update doc file if needed (if content has changed)."""
# close temp file
self._file.close()
# compute checksum on old file
try:
with open(self.filename, 'r') as _file:
shaold = hashlib.sha256(_file.read()).hexdigest()
except IOError:
shaold = ''
# compute checksum on new (temp) file
try:
with open(self.filename_tmp, 'r') as _file:
shanew = hashlib.sha256(_file.read()).hexdigest()
except IOError:
shanew = ''
# compare checksums
if shaold != shanew:
# update doc file
if os.path.exists(self.filename):
os.unlink(self.filename)
os.rename(self.filename_tmp, self.filename)
num_files_updated['total1'] += 1
num_files_updated['total2'] += 1
num_files_updated[obj_name] += 1
else:
os.unlink(self.filename_tmp)
# update counters
num_files['total1'] += 1
num_files['total2'] += 1
num_files[obj_name] += 1
def get_commands():
"""
Get list of WeeChat/plugins commands as dictionary with 3 indexes: plugin,
command, xxx.
"""
commands = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'command')
while weechat.infolist_next(infolist):
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
if plugin in PLUGIN_LIST:
command = weechat.infolist_string(infolist, 'command')
if command == plugin or 'c' in PLUGIN_LIST[plugin]:
for key in ('description', 'args', 'args_description',
'completion'):
commands[plugin][command][key] = \
weechat.infolist_string(infolist, key)
weechat.infolist_free(infolist)
return commands
def get_options():
"""
Get list of WeeChat/plugins config options as dictionary with 4 indexes:
config, section, option, xxx.
"""
options = \
defaultdict(lambda: defaultdict(lambda: defaultdict(defaultdict)))
infolist = weechat.infolist_get('option', '', '')
while weechat.infolist_next(infolist):
full_name = weechat.infolist_string(infolist, 'full_name')
if not re.search('|'.join(IGNORE_OPTIONS), full_name):
config = weechat.infolist_string(infolist, 'config_name')
if config in PLUGIN_LIST and 'o' in PLUGIN_LIST[config]:
section = weechat.infolist_string(infolist, 'section_name')
option = weechat.infolist_string(infolist, 'option_name')
for key in ('type', 'string_values', 'default_value',
'description'):
options[config][section][option][key] = \
weechat.infolist_string(infolist, key)
for key in ('min', 'max', 'null_value_allowed'):
options[config][section][option][key] = \
weechat.infolist_integer(infolist, key)
weechat.infolist_free(infolist)
return options
def get_infos():
"""
Get list of WeeChat/plugins infos as dictionary with 3 indexes: plugin,
name, xxx.
"""
infos = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'info')
while weechat.infolist_next(infolist):
info_name = weechat.infolist_string(infolist, 'info_name')
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
for key in ('description', 'args_description'):
infos[plugin][info_name][key] = \
weechat.infolist_string(infolist, key)
weechat.infolist_free(infolist)
return infos
def get_infos_hashtable():
"""
Get list of WeeChat/plugins infos (hashtable) as dictionary with 3 indexes:
plugin, name, xxx.
"""
infos_hashtable = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'info_hashtable')
while weechat.infolist_next(infolist):
info_name = weechat.infolist_string(infolist, 'info_name')
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
for key in ('description', 'args_description', 'output_description'):
infos_hashtable[plugin][info_name][key] = \
weechat.infolist_string(infolist, key)
weechat.infolist_free(infolist)
return infos_hashtable
def get_infolists():
"""
Get list of WeeChat/plugins infolists as dictionary with 3 indexes: plugin,
name, xxx.
"""
infolists = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'infolist')
while weechat.infolist_next(infolist):
infolist_name = weechat.infolist_string(infolist, 'infolist_name')
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
for key in ('description', 'pointer_description', 'args_description'):
infolists[plugin][infolist_name][key] = \
weechat.infolist_string(infolist, key)
weechat.infolist_free(infolist)
return infolists
# pylint: disable=too-many-locals
def get_hdata():
"""
Get list of WeeChat/plugins hdata as dictionary with 3 indexes: plugin,
name, xxx.
"""
hdata = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'hdata')
while weechat.infolist_next(infolist):
hdata_name = weechat.infolist_string(infolist, 'hdata_name')
plugin = weechat.infolist_string(infolist, 'plugin_name') or 'weechat'
hdata[plugin][hdata_name]['description'] = \
weechat.infolist_string(infolist, 'description')
variables = ''
variables_update = ''
lists = ''
ptr_hdata = weechat.hdata_get(hdata_name)
if ptr_hdata:
hdata2 = []
string = weechat.hdata_get_string(ptr_hdata, 'var_keys_values')
if string:
for item in string.split(','):
key = item.split(':')[0]
var_offset = weechat.hdata_get_var_offset(ptr_hdata, key)
var_array_size = \
weechat.hdata_get_var_array_size_string(ptr_hdata, '',
key)
if var_array_size:
var_array_size = \
', array_size: "{0}"'.format(var_array_size)
var_hdata = weechat.hdata_get_var_hdata(ptr_hdata, key)
if var_hdata:
var_hdata = ', hdata: "{0}"'.format(var_hdata)
type_string = weechat.hdata_get_var_type_string(ptr_hdata,
key)
hdata2.append({
'offset': var_offset,
'text': '\'{0}\' ({1})'.format(key, type_string),
'textlong': '\'{0}\' ({1}{2}{3})'.format(
key, type_string, var_array_size, var_hdata),
'update': weechat.hdata_update(
ptr_hdata, '', {'__update_allowed': key}),
})
hdata2 = sorted(hdata2, key=itemgetter('offset'))
for item in hdata2:
variables += '*** {0}\n'.format(item['textlong'])
if item['update']:
variables_update += '*** {0}\n'.format(item['text'])
if weechat.hdata_update(ptr_hdata, '',
{'__create_allowed': ''}):
variables_update += '*** \'__create\'\n'
if weechat.hdata_update(ptr_hdata, '',
{'__delete_allowed': ''}):
variables_update += '*** \'__delete\'\n'
hdata[plugin][hdata_name]['vars'] = variables
hdata[plugin][hdata_name]['vars_update'] = variables_update
string = weechat.hdata_get_string(ptr_hdata, 'list_keys')
if string:
for item in sorted(string.split(',')):
lists += '*** \'{0}\'\n'.format(item)
hdata[plugin][hdata_name]['lists'] = lists
weechat.infolist_free(infolist)
return hdata
def get_completions():
"""
Get list of WeeChat/plugins completions as dictionary with 3 indexes:
plugin, item, xxx.
"""
completions = defaultdict(lambda: defaultdict(defaultdict))
infolist = weechat.infolist_get('hook', '', 'completion')
while weechat.infolist_next(infolist):
completion_item = weechat.infolist_string(infolist, 'completion_item')
if not re.search('|'.join(IGNORE_COMPLETIONS_ITEMS), completion_item):
plugin = weechat.infolist_string(infolist, 'plugin_name') or \
'weechat'
completions[plugin][completion_item]['description'] = \
weechat.infolist_string(infolist, 'description')
weechat.infolist_free(infolist)
return completions
def get_url_options():
"""
Get list of URL options as list of dictionaries.
"""
url_options = []
infolist = weechat.infolist_get('url_options', '', '')
while weechat.infolist_next(infolist):
url_options.append({
'name': weechat.infolist_string(infolist, 'name').lower(),
'option': weechat.infolist_integer(infolist, 'option'),
'type': weechat.infolist_string(infolist, 'type'),
'constants': weechat.infolist_string(
infolist, 'constants').lower().replace(',', ', ')
})
weechat.infolist_free(infolist)
return url_options
def get_irc_colors():
"""
Get list of IRC colors as list of dictionaries.
"""
irc_colors = []
infolist = weechat.infolist_get('irc_color_weechat', '', '')
while weechat.infolist_next(infolist):
irc_colors.append({
'color_irc': weechat.infolist_string(infolist, 'color_irc'),
'color_weechat': weechat.infolist_string(infolist,
'color_weechat'),
})
weechat.infolist_free(infolist)
return irc_colors
def get_plugins_priority():
"""
Get priority of default WeeChat plugins as a dictionary.
"""
plugins_priority = {}
infolist = weechat.infolist_get('plugin', '', '')
while weechat.infolist_next(infolist):
name = weechat.infolist_string(infolist, 'name')
priority = weechat.infolist_integer(infolist, 'priority')
if priority in plugins_priority:
plugins_priority[priority].append(name)
else:
plugins_priority[priority] = [name]
weechat.infolist_free(infolist)
return plugins_priority
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
# pylint: disable=too-many-nested-blocks
def docgen_cmd_cb(data, buf, args):
"""Callback for /docgen command."""
if args:
locales = args.split(' ')
else:
locales = LOCALE_LIST
commands = get_commands()
options = get_options()
infos = get_infos()
infos_hashtable = get_infos_hashtable()
infolists = get_infolists()
hdata = get_hdata()
completions = get_completions()
url_options = get_url_options()
irc_colors = get_irc_colors()
plugins_priority = get_plugins_priority()
# get path and replace ~ by home if needed
path = weechat.config_get_plugin('path')
if path.startswith('~'):
path = os.environ['HOME'] + path[1:]
# write to doc files, by locale
num_files = defaultdict(int)
num_files_updated = defaultdict(int)
# pylint: disable=undefined-variable
translate = lambda s: (s and _(s)) or s
escape = lambda s: s.replace('|', '\\|')
for locale in locales:
for key in num_files:
if key != 'total2':
num_files[key] = 0
num_files_updated[key] = 0
trans = gettext.translation('weechat',
weechat.info_get('weechat_localedir', ''),
languages=[locale + '.UTF-8'],
fallback=True)
trans.install()
directory = path + '/' + locale[0:2] + '/autogen'
if not os.path.isdir(directory):
weechat.prnt('',
'{0}docgen error: directory "{1}" does not exist'
''.format(weechat.prefix('error'), directory))
continue
# write commands
for plugin in commands:
doc = AutogenDoc(directory, 'user', plugin + '_commands')
for i, command in enumerate(sorted(commands[plugin])):
if i > 0:
doc.write('\n')
_cmd = commands[plugin][command]
args = translate(_cmd['args'])
args_formats = args.split(' || ')
desc = translate(_cmd['description'])
args_desc = translate(_cmd['args_description'])
doc.write('[[command_{0}_{1}]]\n'.format(plugin, command))
doc.write('[command]*`{0}`* {1}::\n\n'.format(command, desc))
doc.write('----\n')
prefix = '/' + command + ' '
if args_formats != ['']:
for fmt in args_formats:
doc.write(prefix + fmt + '\n')
prefix = ' ' * len(prefix)
if args_desc:
doc.write('\n')
for line in args_desc.split('\n'):
doc.write(line + '\n')
doc.write('----\n')
doc.update('commands', num_files, num_files_updated)
# write config options
for config in options:
doc = AutogenDoc(directory, 'user', config + '_options')
i = 0
for section in sorted(options[config]):
for option in sorted(options[config][section]):
if i > 0:
doc.write('\n')
i += 1
_opt = options[config][section][option]
opt_type = _opt['type']
string_values = _opt['string_values']
default_value = _opt['default_value']
opt_min = _opt['min']
opt_max = _opt['max']
null_value_allowed = _opt['null_value_allowed']
desc = translate(_opt['description'])
type_nls = translate(opt_type)
values = ''
if opt_type == 'boolean':
values = 'on, off'
elif opt_type == 'integer':
if string_values:
values = string_values.replace('|', ', ')
else:
values = '{0} .. {1}'.format(opt_min, opt_max)
elif opt_type == 'string':
if opt_max <= 0:
values = _('any string')
elif opt_max == 1:
values = _('any char')
elif opt_max > 1:
values = '{0} ({1}: {2})'.format(_('any string'),
_('max chars'),
opt_max)
else:
values = _('any string')
default_value = '"{0}"'.format(
default_value.replace('"', '\\"'))
elif opt_type == 'color':
values = _('a WeeChat color name (default, black, '
'(dark)gray, white, (light)red, '
'(light)green, brown, yellow, (light)blue, '
'(light)magenta, (light)cyan), a terminal '
'color number or an alias; attributes are '
'allowed before color (for text color '
'only, not background): \"*\" for bold, '
'\"!\" for reverse, \"/\" for italic, '
'\"_\" for underline')
doc.write('* [[option_{0}.{1}.{2}]] *{3}.{4}.{5}*\n'
''.format(config, section, option, config,
section, option))
doc.write('** {0}: `{1}`\n'.format(_('description'), desc))
doc.write('** {0}: {1}\n'.format(_('type'), type_nls))
doc.write('** {0}: {1} ({2}: `{3}`)\n'
''.format(_('values'), values,
_('default value'), default_value))
if null_value_allowed:
doc.write('** {0}\n'.format(
_('undefined value allowed (null)')))
doc.update('options', num_files, num_files_updated)
# write IRC colors
doc = AutogenDoc(directory, 'user', 'irc_colors')
doc.write('[width="30%",cols="^2m,3",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1}\n\n'
''.format(_('IRC color'), _('WeeChat color')))
for color in irc_colors:
doc.write('| {0} | {1}\n'
''.format(escape(color['color_irc']),
escape(color['color_weechat'])))
doc.write('|===\n')
doc.update('irc_colors', num_files, num_files_updated)
# write infos hooked
doc = AutogenDoc(directory, 'plugin_api', 'infos')
doc.write('[width="100%",cols="^1,^2,6,6",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2} | {3}\n\n'
''.format(_('Plugin'), _('Name'), _('Description'),
_('Arguments')))
for plugin in sorted(infos):
for info in sorted(infos[plugin]):
_inf = infos[plugin][info]
desc = translate(_inf['description'])
args_desc = translate(_inf['args_description'] or '-')
doc.write('| {0} | {1} | {2} | {3}\n\n'
''.format(escape(plugin), escape(info),
escape(desc), escape(args_desc)))
doc.write('|===\n')
doc.update('infos', num_files, num_files_updated)
# write infos (hashtable) hooked
doc = AutogenDoc(directory, 'plugin_api', 'infos_hashtable')
doc.write('[width="100%",cols="^1,^2,6,6,6",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2} | {3} | {4}\n\n'
''.format(_('Plugin'), _('Name'), _('Description'),
_('Hashtable (input)'), _('Hashtable (output)')))
for plugin in sorted(infos_hashtable):
for info in sorted(infos_hashtable[plugin]):
_inh = infos_hashtable[plugin][info]
desc = translate(_inh['description'])
args_desc = translate(_inh['args_description'])
output_desc = translate(_inh['output_description']) or '-'
doc.write('| {0} | {1} | {2} | {3} | {4}\n\n'
''.format(escape(plugin), escape(info),
escape(desc), escape(args_desc),
escape(output_desc)))
doc.write('|===\n')
doc.update('infos_hashtable', num_files, num_files_updated)
# write infolists hooked
doc = AutogenDoc(directory, 'plugin_api', 'infolists')
doc.write('[width="100%",cols="^1,^2,5,5,5",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2} | {3} | {4}\n\n'
''.format(_('Plugin'), _('Name'), _('Description'),
_('Pointer'), _('Arguments')))
for plugin in sorted(infolists):
for infolist in sorted(infolists[plugin]):
_inl = infolists[plugin][infolist]
desc = translate(_inl['description'])
pointer_desc = translate(_inl['pointer_description']) or '-'
args_desc = translate(_inl['args_description']) or '-'
doc.write('| {0} | {1} | {2} | {3} | {4}\n\n'
''.format(escape(plugin), escape(infolist),
escape(desc), escape(pointer_desc),
escape(args_desc)))
doc.write('|===\n')
doc.update('infolists', num_files, num_files_updated)
# write hdata hooked
doc = AutogenDoc(directory, 'plugin_api', 'hdata')
for plugin in sorted(hdata):
for hdata_name in sorted(hdata[plugin]):
anchor = 'hdata_{0}'.format(hdata_name)
_hda = hdata[plugin][hdata_name]
desc = translate(_hda['description'])
variables = _hda['vars']
variables_update = _hda['vars_update']
lists = _hda['lists']
doc.write('* [[{0}]]<<{0},\'{1}\'>>: {2}\n'
''.format(escape(anchor), escape(hdata_name),
escape(desc)))
doc.write('** {0}: {1}\n'.format(_('plugin'),
escape(plugin)))
doc.write('** {0}:\n{1}'.format(_('variables'),
escape(variables)))
if variables_update:
doc.write('** {0}:\n{1}'.format(
_('update allowed'),
escape(variables_update)))
if lists:
doc.write('** {0}:\n{1}'.format(_('lists'),
escape(lists)))
doc.update('hdata', num_files, num_files_updated)
# write completions hooked
doc = AutogenDoc(directory, 'plugin_api', 'completions')
doc.write('[width="65%",cols="^1,^2,8",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2}\n\n'
''.format(_('Plugin'), _('Name'), _('Description')))
for plugin in sorted(completions):
for completion_item in sorted(completions[plugin]):
_cmp = completions[plugin][completion_item]
desc = translate(_cmp['description'])
doc.write('| {0} | {1} | {2}\n\n'
''.format(escape(plugin), escape(completion_item),
escape(desc)))
doc.write('|===\n')
doc.update('completions', num_files, num_files_updated)
# write url options
doc = AutogenDoc(directory, 'plugin_api', 'url_options')
doc.write('[width="100%",cols="2,^1,7",options="header"]\n')
doc.write('|===\n')
doc.write('| {0} | {1} | {2}\n\n'
''.format(_('Option'), _('Type'),
_('Constants') + ' ^(1)^'))
for option in url_options:
constants = option['constants']
if constants:
constants = ' ' + constants
doc.write('| {0} | {1} |{2}\n\n'
''.format(escape(option['name']),
escape(option['type']),
escape(constants)))
doc.write('|===\n')
doc.update('url_options', num_files, num_files_updated)
# write plugins priority
doc = AutogenDoc(directory, 'plugin_api', 'plugins_priority')
for priority in sorted(plugins_priority, reverse=True):
plugins = ', '.join(sorted(plugins_priority[priority]))
doc.write('. {0} ({1})\n'.format(escape(plugins), priority))
doc.update('plugins_priority', num_files, num_files_updated)
# write counters
weechat.prnt('',
'docgen: {0}: {1} files, {2} updated'
''.format(locale,
num_files['total1'],
num_files_updated['total1']))
weechat.prnt('',
'docgen: total: {0} files, {1} updated'
''.format(num_files['total2'], num_files_updated['total2']))
return weechat.WEECHAT_RC_OK
def docgen_completion_cb(data, completion_item, buf, completion):
"""Callback for completion."""
for locale in LOCALE_LIST:
weechat.hook_completion_list_add(completion, locale, 0,
weechat.WEECHAT_LIST_POS_SORT)
return weechat.WEECHAT_RC_OK
if __name__ == '__main__' and IMPORT_OK:
if weechat.register(SCRIPT_NAME, SCRIPT_AUTHOR, SCRIPT_VERSION,
SCRIPT_LICENSE, SCRIPT_DESC, '', ''):
weechat.hook_command(SCRIPT_COMMAND,
'Documentation generator.',
'[locales]',
'locales: list of locales to build (by default '
'build all locales)',
'%(docgen_locales)|%*',
'docgen_cmd_cb', '')
weechat.hook_completion('docgen_locales', 'locales for docgen',
'docgen_completion_cb', '')
if not weechat.config_is_set_plugin('path'):
weechat.config_set_plugin('path', DEFAULT_PATH)
|
Mkaysi/weechat
|
doc/docgen.py
|
Python
|
gpl-3.0
| 30,574
| 0.000065
|
from django.contrib.auth.decorators import login_required
from django.views.generic import TemplateView
from flop.cooking.forms import MealForm, MealContributionFormSet
from flop.decorators import view_decorator
@view_decorator(login_required)
class IndexView(TemplateView):
template_name = 'dashboard/index.html'
|
sbrandtb/flop
|
flop/dashboard/views.py
|
Python
|
mit
| 320
| 0
|
"""
Copyright (c) 2015 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the MIT license. See the LICENSE file for details.
"""
import os
import shutil
import grp
import pwd
from cct.module import Module
from cct.lib.file_utils import create_dir
class File(Module):
def copy(self, source, destination):
"""
Copies file.
Args:
source: path to file
destination: path where file should be copied
"""
create_dir(destination)
shutil.copy(source, destination)
def link(self, source, destination):
"""
Creates symbolik link.
Args:
source: path to symbolik link destination
destination: Symbolik link name
"""
create_dir(destination)
os.symlink(source, destination)
def move(self, source, destination):
"""
Moves file.
Args:
source: path to file
destination: path where file should be moved
"""
create_dir(destination)
shutil.move(source, destination)
def remove(self, path):
"""
Removes file.
Args:
source: path to file to be removed
"""
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.unlink(path)
def chown(self, owner, group, path, recursive=False):
"""
Change the ownership of a path.
Args:
owner: the owner (numeric or name) to change ownership to
group: the group (numeric or name) to change groupship to
path: the path to operate on
recursive: if path is a directory, recursively change ownership for all
paths within
"""
# supplied owner/group might be symbolic (e.g. 'wheel') or numeric.
# Try interpreting symbolically first
try:
gid = grp.getgrnam(group).gr_gid
except KeyError:
gid = int(group,0)
try:
uid = pwd.getpwnam(owner).pw_uid
except KeyError:
uid = int(owner,0)
# Beware: argument order is different
os.chown(path, uid, gid)
if recursive and os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for f in (dirnames + filenames):
os.chown(os.path.join(dirpath, f), uid, gid)
def chmod(self, mode, path, recursive=False):
"""
Change the permissions of a path.
Args:
path: the path to operate on
mode: the numeric mode to set
recursive: whether to change mode recursively
"""
mode = int(mode,0)
# Beware: argument order swapped
os.chmod(path, mode)
if recursive and os.path.isdir(path):
for dirpath, dirnames, filenames in os.walk(path):
for f in (dirnames + filenames):
os.chmod(os.path.join(dirpath, f), mode)
|
containers-tools/base
|
base/file.py
|
Python
|
mit
| 3,057
| 0.001636
|
import urllib, urllib2, re, cookielib, os, tempfile, json, md5, time
from BeautifulSoup import BeautifulSoup
import proxy
from gomutil import *
class NotLoggedInException(Exception):
pass
def request(url, params=None, headers={}, opener=None):
data = params and urllib.urlencode(params)
req = urllib2.Request(url, data, headers)
if opener:
response = opener.open(req)
else:
response = urllib2.urlopen(req)
r = response.read()
response.close()
return r
class VodSet(object):
def __init__(self, params):
self.params = params
self._fix_params()
self.xml = request('http://gox.gomtv.net/cgi-bin/gox_vod_sfile.cgi', self.params)
def _fix_params(self):
if 'uip' not in self.params:
self.params["uip"] = request('http://www.gomtv.net/webPlayer/getIP.gom')
self.params["adstate"] = "0"
self.params["goxkey"] = "qoaEl"
keys = ["leagueid", "conid", "goxkey", "level", "uno", "uip", "adstate", "vjoinid", "nid"]
hashstr = "".join([self.params[key] for key in keys])
self.params['goxkey'] = md5.new(hashstr).hexdigest()
def get_error(self):
if re.search('purchase_btn', self.xml):
return "Available for ticket holders only."
else:
return "Unknown error"
def _get_href(self):
match = re.search('<REF\s+href="(.+)"\s+reftype="vod"', self.xml)
if match:
href = match.group(1).replace('&', '&').replace(' ', '%20')
remote_ip = re.search("//([0-9.]+)/", href).group(1)
payload = gom_key_payload(remote_ip, self.params)
return (href, remote_ip, payload)
else:
return (None, None, None)
def get_url(self):
href, remote_ip, payload = self._get_href()
return href and "%s&key=%s" % (href, gom_stream_key(remote_ip, payload))
def get_proxy_url(self):
href, remote_ip, payload = self._get_href()
return href and proxy.url(href, payload)
class GOMtv(object):
VODLIST_ORDER_MOST_RECENT = 1
VODLIST_ORDER_MOST_VIEWED = 2
VODLIST_ORDER_MOST_COMMENTED = 3
VODLIST_TYPE_ALL = 0
VODLIST_TYPE_CODE_S = 32
VODLIST_TYPE_CODE_A = 16
VODLIST_TYPE_UP_DOWN = 64
AUTH_GOMTV = 1
AUTH_TWITTER = 2
AUTH_FACEBOOK = 3
LEVEL = {
'EHQ': 65,
'HQ': 60,
'SQ': 6
}
OLDLEVEL = {
'EHQ': 50,
'HQ': 50,
'SQ': 5
}
def __init__(self, cookie_path=None):
self.vod_sets = {}
if cookie_path is None:
cookie_path = "%s%scookies_gomtv.txt" % (tempfile.gettempdir(), os.path.sep)
self.cookie_jar = cookielib.LWPCookieJar(cookie_path)
if not os.path.exists(os.path.dirname(cookie_path)):
os.makedirs(os.path.dirname(cookie_path))
if (os.path.isfile(cookie_path) and os.path.getsize(cookie_path) > 0):
self.cookie_jar.load(cookie_path,True)
self.opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(self.cookie_jar))
def _request(self, url, data=None, headers={}):
r = request(url, data, headers, opener=self.opener)
# Ugly hack required to fix cookie names.
# Guessing there's some javascript somewhere on that mess of a website
# that uppercases the cookies..?
for cookie in self.cookie_jar:
if cookie.name.startswith("SES_"):
cookie.name = cookie.name.upper()
self.cookie_jar.save(None,True)
return r
def set_cookie(self, name, value):
exp = time.time() + 24 * 60 * 60
cookie = cookielib.Cookie(version=0, name=name, value=value, port=None, port_specified=False,
domain='.gomtv.net', domain_specified=True, domain_initial_dot=True,
path='/', path_specified=True, secure=False, expires=exp,
discard=False, comment=None, comment_url=None, rest={})
self.cookie_jar.set_cookie(cookie)
def login(self, username, password, auth_type=AUTH_GOMTV):
self.cookie_jar.clear()
if auth_type == self.AUTH_GOMTV:
form = {
"mb_username": username,
"mb_password": password,
"cmd": "login",
"rememberme": "1"
}
ret = self._request("https://ssl.gomtv.net/userinfo/loginProcess.gom", form, {'Referer': 'http://www.gomtv.net/'})
cookies = [cookie.name for cookie in self.cookie_jar if cookie.domain == '.gomtv.net']
return 'SES_MEMBERNO' in cookies
elif auth_type == self.AUTH_TWITTER:
data = self._request("http://www.gomtv.net/twitter/redirect.gom?burl=/index.gom")
location = re.search("document.location.replace\(\"(.*)\"\)", data).group(1)
oauth_token = re.search("setCookie\('oauth_token', \"(.*)\"", data).group(1)
oauth_token_secret = re.search("setCookie\('oauth_token_secret', \"(.*)\"", data).group(1)
self.set_cookie("oauth_token", oauth_token)
self.set_cookie("oauth_token_secret", oauth_token_secret)
data = self._request(location)
soup = BeautifulSoup(data)
oauth_token = soup.find("input", {"id": "oauth_token"})["value"]
auth_token = soup.find("input", {"name": "authenticity_token"})["value"]
url = soup.find("form")["action"]
data = self._request(url, {"oauth_token": oauth_token,
"session[username_or_email]": username,
"session[password]": password,
"submit": "Sign in",
"authenticity_token": auth_token})
refresh = re.search('<meta http-equiv="refresh" content="0;url=(.*)">', data)
if refresh is None:
return False
else:
location = refresh.group(1)
data = self._request(location)
return True
elif auth_type == self.AUTH_FACEBOOK:
data = self._request("http://www.gomtv.net/facebook/index.gom?burl=/index.gom")
soup = BeautifulSoup(data)
# already logged in
if data.startswith("<script>"):
return False
url = soup.find("form")["action"]
payload = {}
for field in soup.findAll("input"):
if not field["name"] == "charset_test":
payload[field["name"]] = field["value"]
payload["email"] = username
payload["pass"] = password
data = self._request(url, payload)
if re.search("<title>Logga in", data) is None:
return True
else:
return False
def get_league_list(self):
soup = BeautifulSoup(self._request("http://www.gomtv.net/view/channelDetails.gom?gameid=0"))
leagues = soup.findAll("dl", "league_list")
result = []
for league in leagues:
result.append({"id": league.find("a")["href"].replace("/", ""),
"logo": league.find("img")["src"],
"name": league.find("strong").find(text=True)})
return result
def get_most_recent_list(self, page=1):
return self.get_vod_list(league=None, page=page)
def get_vod_list(self, order=1, page=1, league=None, type=VODLIST_TYPE_ALL):
if league is None:
url = "http://www.gomtv.net/videos/index.gom?page=%d" % (page)
else:
url = "http://www.gomtv.net/%s/vod/?page=%d&order=%d<ype=%d" % (league, page, order, type)
soup = BeautifulSoup(self._request(url))
thumb_links = soup.findAll("td", {"class": ["vod_info", "listOff"]})
nums = soup.findAll("a", "num", href=re.compile("page=[0-9]+"))
if len(nums) > 0:
last = int(re.search("page=([0-9]+)",
nums[-1]["href"]).group(1))
else:
last = page
vods = []
result = {"order": order,
"page": page,
"vods": vods,
"has_previous": page is not 1,
"has_next": page is not last}
if page > last or page < 1:
return result
for thumb_link in thumb_links:
href = thumb_link.find("a", {'class': ["vod_link", "vodlink"]})["href"].replace("/./", "/")
thumb = thumb_link.find("img", {'class': ["v_thumb", "vodthumb"]})
vods.append({"url": "http://www.gomtv.net%s" % href, "preview": thumb["src"], "title": thumb["alt"]})
return result
def _get_set_params(self, body):
flashvars = re.search('flashvars\s+=\s+([^;]+);', body).group(1)
return json.loads(flashvars)
def extract_jsonData(self, body):
jsondata = re.search('var\s+jsonData\s+=\s+eval\s+\(([^)]*)\)', body).group(1)
return json.loads(jsondata)
def get_vod_set(self, vod_url, quality="EHQ"):
self.set_cookie('SES_VODLEVEL', str(self.LEVEL[quality]))
self.set_cookie('SES_VODOLDLEVEL', str(self.OLDLEVEL[quality]))
r = self._request(vod_url)
flashvars = self._get_set_params(r)
if flashvars['uno'] == '0':
raise NotLoggedInException
# 0 english, 1 korean
jsondata = self.extract_jsonData(r)[0]
soup = BeautifulSoup(r)
vodlist = soup.find('ul', id='vodList')
sets = vodlist.findAll("a")
for (i, s) in enumerate(sets):
params = dict(flashvars, **jsondata[i])
yield {"params": params,
"title": "%i - %s" % (i+1, s['title'])}
|
jhawthorn/plugin.video.gomtv.net
|
gomtv.py
|
Python
|
gpl-3.0
| 9,910
| 0.005045
|
../../../../share/pyshared/jockey/xorg_driver.py
|
Alberto-Beralix/Beralix
|
i386-squashfs-root/usr/lib/python2.7/dist-packages/jockey/xorg_driver.py
|
Python
|
gpl-3.0
| 48
| 0.020833
|
#
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2002-2006 Donald N. Allingham
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard Python modules
#
#-------------------------------------------------------------------------
from ....const import GRAMPS_LOCALE as glocale
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from .. import Rule
from ....lib.familyreltype import FamilyRelType
#-------------------------------------------------------------------------
#
# HasRelationship
#
#-------------------------------------------------------------------------
class HasRelationship(Rule):
"""Rule that checks for a person who has a particular relationship"""
labels = [ _('Number of relationships:'),
_('Relationship type:'),
_('Number of children:') ]
name = _('People with the <relationships>')
description = _("Matches people with a particular relationship")
category = _('Family filters')
def apply(self,db,person):
rel_type = 0
cnt = 0
num_rel = len(person.get_family_handle_list())
if self.list[1]:
specified_type = FamilyRelType()
specified_type.set_from_xml_str(self.list[1])
# count children and look for a relationship type match
for f_id in person.get_family_handle_list():
f = db.get_family_from_handle(f_id)
if f:
cnt = cnt + len(f.get_child_ref_list())
if self.list[1] and specified_type == f.get_relationship():
rel_type = 1
# if number of relations specified
if self.list[0]:
try:
v = int(self.list[0])
except:
return False
if v != num_rel:
return False
# number of childred
if self.list[2]:
try:
v = int(self.list[2])
except:
return False
if v != cnt:
return False
# relation
if self.list[1]:
return rel_type == 1
else:
return True
|
SNoiraud/gramps
|
gramps/gen/filters/rules/person/_hasrelationship.py
|
Python
|
gpl-2.0
| 3,035
| 0.005601
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os,sys, re, json, requests
from oxapi import *
def get_a_task(ox):
folder = ox.get_standard_folder('tasks')
task = list(ox.get_tasks(folder.id))[0]
return task
def upload(bean, args=[{'content':None,'file':None, 'mimetype':'text/plain','name':'attachment.txt'}]):
from requests.packages.urllib3.fields import RequestField
from requests.packages.urllib3.filepost import encode_multipart_formdata
ox = bean._ox
url = ox._url('attachment', 'attach')
params = ox._params()
meta = {'module': bean.module_type,
#'attached': bean.id,
'folder': bean.folder_id}
counter = 0; fields = []
for data in args:
# json metadata
rf = RequestField(name='json_' + str(counter) ,data=json.dumps(meta))
rf.make_multipart(content_disposition='form-data')
fields.append(rf)
# content: data or file to read
filename = 'attachment.txt'
mimetype = 'text/plain'
content = None
if 'content' in data:
content = data['content']
else:
if 'file' in data:
filename = data['file']
if os.path.isfile(filename):
with open(filename, 'rb') as fh:
content = fh.read()
if content is None:
#TODO: process error
return None
if 'name' in data:
filename = data['name']
mimetype = 'text/plain'
if 'mimetype' in data:
mimetype = data['mimetype']
rf = RequestField(name='file_' + str(counter), data=content, filename=filename)
rf.make_multipart(content_disposition='form-data',content_type=mimetype)
fields.append(rf)
post_body, content_type = encode_multipart_formdata(fields)
content_type = ''.join(('multipart/mixed',) + content_type.partition(';')[1:])
headers = {'Content-Type': content_type}
response = requests.post(url, cookies=ox._cookies, params=params, headers=headers, data=post_body)
if response and response.status_code == 200:
regex='\((\{.*\})\)'
match = re.search(regex, response.content)
if match:
return json.loads(match.group(1))
return None
def create_attachment(ox, task):
from requests.packages.urllib3.fields import RequestField
from requests.packages.urllib3.filepost import encode_multipart_formdata
url = ox._url('attachment', 'attach')
params = ox._params()
json_0 = {'module': task.module_type,
'attached': task.id,
'folder': task.folder_id}
fields = []
rf = RequestField(name='json_0',data=json.dumps(json_0))
rf.make_multipart(content_disposition='form-data')
fields.append(rf)
rf = RequestField(name='file_0', data="TEXT", filename='attachment.txt')
rf.make_multipart(content_disposition='form-data',content_type='text/plain')
fields.append(rf)
post_body, content_type = encode_multipart_formdata(fields)
content_type = ''.join(('multipart/mixed',) + content_type.partition(';')[1:])
headers = {'Content-Type': content_type}
response = requests.post(url, cookies=ox._cookies, params=params, headers=headers, data=post_body)
if response and response.status_code == 200:
regex='\((\{.*\})\)'
match = re.search(regex, response.content)
if match:
return json.loads(match.group(1))
return None
if __name__ == '__main__':
with OxHttpAPI.get_session() as ox:
task = get_a_task(ox)
# args = [{ 'file':'attachments_module.py' }]
# upload(task, args)
#create_attachment(ox,task)
#attachments = list(ox.get_attachments(task))
attachments = ox.get_attachments(task)
pass
|
bstrebel/OxAPI
|
test/_attachment.py
|
Python
|
gpl-2.0
| 3,939
| 0.010916
|
"""Tests for the deprecated version of the django-reversion API."""
from __future__ import with_statement
import datetime
from django.db import models, transaction
from django.test import TestCase
from django.core.management import call_command
import reversion
from reversion.models import Version, Revision, VERSION_ADD, VERSION_CHANGE, VERSION_DELETE
from reversion.revisions import RegistrationError
from reversion.tests import UTC
class ReversionTestModel(models.Model):
"""A test model for reversion."""
name = models.CharField(max_length=100)
class Meta:
app_label = "auth" # Hack: Cannot use an app_label that is under South control, due to http://south.aeracode.org/ticket/520
str_pk_gen = 0;
def get_str_pk():
global str_pk_gen
str_pk_gen += 1;
return str(str_pk_gen)
class ReversionTestModelStrPrimary(models.Model):
"""A test model for reversion."""
id = models.CharField(
primary_key = True,
max_length = 100,
default = get_str_pk
)
name = models.CharField(max_length=100)
class Meta:
app_label = "auth" # Hack: Cannot use an app_label that is under South control, due to http://south.aeracode.org/ticket/520
class ReversionRegistrationTest(TestCase):
"""Tests the django-reversion registration functionality."""
def setUp(self):
"""Sets up the ReversionTestModel."""
reversion.register(ReversionTestModel)
def testCanRegisterModel(self):
"""Tests that a model can be registered."""
self.assertTrue(reversion.is_registered(ReversionTestModel))
# Check that duplicate registration is disallowed.
self.assertRaises(RegistrationError, lambda: reversion.register(ReversionTestModel))
def testCanUnregisterModel(self):
"""Tests that a model can be unregistered."""
reversion.unregister(ReversionTestModel)
try:
self.assertFalse(reversion.is_registered(ReversionTestModel))
# Check that duplicate unregistration is disallowed.
self.assertRaises(RegistrationError, lambda: reversion.unregister(ReversionTestModel))
finally:
# Re-register the model.
reversion.register(ReversionTestModel)
def tearDown(self):
"""Tears down the tests."""
reversion.unregister(ReversionTestModel)
class ReversionCreateTest(TestCase):
"""Tests the django-reversion revision creation functionality."""
model = ReversionTestModel
def setUp(self):
"""Sets up the ReversionTestModel."""
# Clear the database.
Revision.objects.all().delete()
self.model.objects.all().delete()
# Register the model.
reversion.register(self.model)
def testCanSaveWithNoRevision(self):
"""Tests that without an active revision, no model is saved."""
test = self.model.objects.create(name="test1.0")
self.assertEqual(Version.objects.get_for_object(test).count(), 0)
def testRevisionContextManager(self):
"""Tests that the revision context manager works."""
with reversion.revision:
test = self.model.objects.create(name="test1.0")
self.assertEqual(Version.objects.get_for_object(test).count(), 1)
def testRevisionDecorator(self):
"""Tests that the revision function decorator works."""
@reversion.revision.create_on_success
def create_revision():
return self.model.objects.create(name="test1.0")
self.assertEqual(Version.objects.get_for_object(create_revision()).count(), 1)
def testRevisionAbandonedOnError(self):
"""Tests that the revision is abandoned on error."""
# Create the first revision.
with reversion.revision:
test = self.model.objects.create(name="test1.0")
# Create the second revision.
try:
with reversion.revision:
test.name = "test1.1"
test.save()
raise Exception()
except:
transaction.rollback()
# Check that there is still only one revision.
self.assertEqual(Version.objects.get_for_object(test).count(), 1)
# Assert the revision is not invalid.
self.assertFalse(reversion.revision._revision_context_manager.is_invalid())
def tearDown(self):
"""Tears down the tests."""
# Unregister the model.
reversion.unregister(self.model)
# Clear the database.
Revision.objects.all().delete()
self.model.objects.all().delete()
class ReversionCreateStrPrimaryTest(ReversionCreateTest):
model = ReversionTestModelStrPrimary
class ReversionQueryTest(TestCase):
"""Tests that django-reversion can retrieve revisions using the api."""
model = ReversionTestModel
def setUp(self):
"""Sets up the ReversionTestModel."""
# Clear the database.
Revision.objects.all().delete()
self.model.objects.all().delete()
# Register the model.
reversion.register(self.model)
# Create some initial revisions.
with reversion.revision:
self.test = self.model.objects.create(name="test1.0")
with reversion.revision:
self.test.name = "test1.1"
self.test.save()
with reversion.revision:
self.test.name = "test1.2"
self.test.save()
def testCanGetVersions(self):
"""Tests that the versions for an obj can be retrieved."""
versions = Version.objects.get_for_object(self.test)
self.assertEqual(versions[0].field_dict["name"], "test1.0")
self.assertEqual(versions[1].field_dict["name"], "test1.1")
self.assertEqual(versions[2].field_dict["name"], "test1.2")
def testCanGetUniqueVersions(self):
"""Tests that the unique versions for an objext can be retrieved."""
with reversion.revision:
self.test.save()
versions = Version.objects.get_unique_for_object(self.test)
# Check correct version data.
self.assertEqual(versions[0].field_dict["name"], "test1.0")
self.assertEqual(versions[1].field_dict["name"], "test1.1")
self.assertEqual(versions[2].field_dict["name"], "test1.2")
# Check correct number of versions.
self.assertEqual(len(versions), 3)
def testCanGetForDate(self):
"""Tests that the latest version for a particular date can be loaded."""
with self.settings(USE_TZ=True):
self.assertEqual(Version.objects.get_for_date(self.test, datetime.datetime.now(UTC())).field_dict["name"], "test1.2")
def testCanRevert(self):
"""Tests that an object can be reverted to a previous revision."""
oldest = Version.objects.get_for_object(self.test)[0]
self.assertEqual(oldest.field_dict["name"], "test1.0")
oldest.revert()
self.assertEqual(self.model.objects.get().name, "test1.0")
def testCanGetDeleted(self):
"""Tests that deleted objects can be retrieved."""
self.assertEqual(len(Version.objects.get_deleted(self.model)), 0)
# Create and delete another model.
with reversion.revision:
test2 = self.model.objects.create(name="test2.0")
test2.delete()
# Delete the test model.
self.test.delete()
# Ensure that there are now two deleted models.
deleted = Version.objects.get_deleted(self.model)
self.assertEqual(len(deleted), 2)
self.assertEqual(deleted[0].field_dict["name"], "test1.2")
self.assertEqual(deleted[1].field_dict["name"], "test2.0")
self.assertEqual(len(deleted), 2)
def testCanRecoverDeleted(self):
"""Tests that a deleted object can be recovered."""
self.test.delete()
# Ensure deleted.
self.assertEqual(self.model.objects.count(), 0)
# Recover.
Version.objects.get_deleted(self.model)[0].revert()
# Ensure recovered.
self.assertEqual(self.model.objects.get().name, "test1.2")
def testCanGenerateStatistics(self):
"""Tests that the stats are accurate for Version models."""
self.assertEqual(Version.objects.filter(type=VERSION_ADD).count(), 1)
self.assertEqual(Version.objects.filter(type=VERSION_CHANGE).count(), 2)
self.assertEqual(Version.objects.filter(type=VERSION_DELETE).count(), 0)
with reversion.revision:
self.test.delete()
self.assertEqual(Version.objects.filter(type=VERSION_DELETE).count(), 1)
def tearDown(self):
"""Tears down the tests."""
# Unregister the model.
reversion.unregister(self.model)
# Clear the database.
Revision.objects.all().delete()
self.model.objects.all().delete()
# Clear references.
del self.test
class ReversionQueryStrPrimaryTest(ReversionQueryTest):
model = ReversionTestModelStrPrimary
class ReversionCustomRegistrationTest(TestCase):
"""Tests the custom model registration options."""
def setUp(self):
"""Sets up the ReversionTestModel."""
# Clear the database.
Revision.objects.all().delete()
ReversionTestModel.objects.all().delete()
# Register the model.
reversion.register(ReversionTestModel, fields=("id",), format="xml")
# Create some initial revisions.
with reversion.revision:
self.test = ReversionTestModel.objects.create(name="test1.0")
with reversion.revision:
self.test.name = "test1.1"
self.test.save()
with reversion.revision:
self.test.name = "test1.2"
self.test.save()
def testCustomRegistrationHonored(self):
"""Ensures that the custom settings were honored."""
self.assertEqual(tuple(reversion.revision.get_adapter(ReversionTestModel).get_fields_to_serialize()), ("id",))
self.assertEqual(reversion.revision.get_adapter(ReversionTestModel).get_serialization_format(), "xml")
def testCanRevertOnlySpecifiedFields(self):
""""Ensures that only the restricted set of fields are loaded."""
Version.objects.get_for_object(self.test)[0].revert()
self.assertEqual(ReversionTestModel.objects.get().name, "")
def testCustomSerializationFormat(self):
"""Ensures that the custom serialization format is used."""
self.assertEquals(Version.objects.get_for_object(self.test)[0].serialized_data[0], "<");
def testIgnoreDuplicates(self):
"""Ensures that duplicate revisions can be ignores."""
self.assertEqual(len(Version.objects.get_for_object(self.test)), 3)
with reversion.revision:
self.test.save()
self.assertEqual(len(Version.objects.get_for_object(self.test)), 4)
with reversion.revision:
reversion.revision.ignore_duplicates = True
self.assertTrue(reversion.revision.ignore_duplicates)
self.test.save()
self.assertEqual(len(Version.objects.get_for_object(self.test)), 4)
def tearDown(self):
"""Tears down the tests."""
# Unregister the model.
reversion.unregister(ReversionTestModel)
# Clear the database.
Revision.objects.all().delete()
ReversionTestModel.objects.all().delete()
# Clear references.
del self.test
class TestRelatedModel(models.Model):
"""A model used to test Reversion relation following."""
name = models.CharField(max_length=100)
relation = models.ForeignKey(ReversionTestModel)
class Meta:
app_label = "auth" # Hack: Cannot use an app_label that is under South control, due to http://south.aeracode.org/ticket/520
class ReversionRelatedTest(TestCase):
"""Tests the ForeignKey and OneToMany support."""
def setUp(self):
"""Sets up the ReversionTestModel."""
# Clear the database.
Revision.objects.all().delete()
ReversionTestModel.objects.all().delete()
TestRelatedModel.objects.all().delete()
# Register the models.
reversion.register(ReversionTestModel, follow=("testrelatedmodel_set",))
reversion.register(TestRelatedModel, follow=("relation",))
def testCanCreateRevisionForiegnKey(self):
"""Tests that a revision containing both models is created."""
with reversion.revision:
test = ReversionTestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related1.0", relation=test)
self.assertEqual(Version.objects.get_for_object(test).count(), 1)
self.assertEqual(Version.objects.get_for_object(related).count(), 1)
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.get_for_object(test)[0].revision.version_set.all().count(), 2)
def testCanCreateRevisionOneToMany(self):
"""Tests that a revision containing both models is created."""
with reversion.revision:
test = ReversionTestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related1.0", relation=test)
with reversion.revision:
test.save()
self.assertEqual(Version.objects.get_for_object(test).count(), 2)
self.assertEqual(Version.objects.get_for_object(related).count(), 2)
self.assertEqual(Revision.objects.count(), 2)
self.assertEqual(Version.objects.get_for_object(test)[1].revision.version_set.all().count(), 2)
def testCanRevertRevision(self):
"""Tests that an entire revision can be reverted."""
with reversion.revision:
test = ReversionTestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related1.0", relation=test)
with reversion.revision:
test.name = "test1.1"
test.save()
related.name = "related1.1"
related.save()
# Attempt revert.
Version.objects.get_for_object(test)[0].revision.revert()
self.assertEqual(ReversionTestModel.objects.get().name, "test1.0")
self.assertEqual(TestRelatedModel.objects.get().name, "related1.0")
def testCanRevertDeleteRevistion(self):
"""Tests that an entire revision can be reverted with the delete functionality enabled."""
with reversion.revision:
test = ReversionTestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related-a-1.0", relation=test)
with reversion.revision:
related2 = TestRelatedModel.objects.create(name="related-b-1.0", relation=test)
test.name = "test1.1"
test.save()
related.name = "related-a-1.1"
related.save()
# Attempt revert with delete.
Version.objects.get_for_object(test)[0].revision.revert(delete=True)
self.assertEqual(ReversionTestModel.objects.get().name, "test1.0")
self.assertEqual(TestRelatedModel.objects.get(id=related.id).name, "related-a-1.0")
self.assertEqual(TestRelatedModel.objects.filter(id=related2.id).count(), 0)
self.assertEqual(TestRelatedModel.objects.count(), 1)
def testCanRecoverRevision(self):
"""Tests that an entire revision can be recovered."""
with reversion.revision:
test = ReversionTestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related1.0", relation=test)
with reversion.revision:
test.name = "test1.1"
test.save()
related.name = "related1.1"
related.save()
# Delete the models.
test.delete()
# Ensure deleted.
self.assertEqual(ReversionTestModel.objects.count(), 0)
self.assertEqual(TestRelatedModel.objects.count(), 0)
# Query the deleted models..
self.assertEqual(len(Version.objects.get_deleted(ReversionTestModel)), 1)
self.assertEqual(len(Version.objects.get_deleted(TestRelatedModel)), 1)
# Revert the revision.
Version.objects.get_deleted(ReversionTestModel)[0].revision.revert()
# Ensure reverted.
self.assertEqual(ReversionTestModel.objects.count(), 1)
self.assertEqual(TestRelatedModel.objects.count(), 1)
# Ensure correct version.
self.assertEqual(ReversionTestModel.objects.get().name, "test1.1")
self.assertEqual(TestRelatedModel.objects.get().name, "related1.1")
def testIgnoreDuplicates(self):
"""Ensures the ignoring duplicates works across a foreign key."""
with reversion.revision:
test = ReversionTestModel.objects.create(name="test1.0")
related = TestRelatedModel.objects.create(name="related1.0", relation=test)
with reversion.revision:
test.name = "test1.1"
test.save()
related.name = "related1.1"
related.save()
self.assertEqual(len(Version.objects.get_for_object(test)), 2)
with reversion.revision:
test.save()
self.assertEqual(len(Version.objects.get_for_object(test)), 3)
with reversion.revision:
test.save()
reversion.revision.ignore_duplicates = True
self.assertEqual(len(Version.objects.get_for_object(test)), 3)
def tearDown(self):
"""Tears down the tests."""
# Unregister the models.
reversion.unregister(ReversionTestModel)
reversion.unregister(TestRelatedModel)
# Clear the database.
Revision.objects.all().delete()
ReversionTestModel.objects.all().delete()
TestRelatedModel.objects.all().delete()
class TestManyToManyModel(models.Model):
"""A model used to test Reversion M2M relation following."""
name = models.CharField(max_length=100)
relations = models.ManyToManyField(ReversionTestModel)
class Meta:
app_label = "auth" # Hack: Cannot use an app_label that is under South control, due to http://south.aeracode.org/ticket/520
class ReversionManyToManyTest(TestCase):
"""Tests the ManyToMany support."""
def setUp(self):
"""Sets up the ReversionTestModel."""
# Clear the database.
Revision.objects.all().delete()
ReversionTestModel.objects.all().delete()
TestManyToManyModel.objects.all().delete()
# Register the models.
reversion.register(ReversionTestModel, follow=("testmanytomanymodel_set",))
reversion.register(TestManyToManyModel, follow=("relations",))
def testCanCreateRevision(self):
"""Tests that a revision containing both models is created."""
with reversion.revision:
test1 = ReversionTestModel.objects.create(name="test1.0")
test2 = ReversionTestModel.objects.create(name="test2.0")
related = TestManyToManyModel.objects.create(name="related1.0")
related.relations.add(test1)
related.relations.add(test2)
self.assertEqual(Version.objects.get_for_object(test1).count(), 1)
self.assertEqual(Version.objects.get_for_object(test2).count(), 1)
self.assertEqual(Version.objects.get_for_object(related).count(), 1)
self.assertEqual(Revision.objects.count(), 1)
self.assertEqual(Version.objects.get_for_object(related)[0].revision.version_set.all().count(), 3)
def testCanCreateRevisionRelated(self):
"""Tests that a revision containing both models is created."""
with reversion.revision:
test = ReversionTestModel.objects.create(name="test1.0")
related1 = TestManyToManyModel.objects.create(name="related1.0")
related2 = TestManyToManyModel.objects.create(name="related2.0")
test.testmanytomanymodel_set.add(related1)
test.testmanytomanymodel_set.add(related2)
with reversion.revision:
test.save()
self.assertEqual(Version.objects.get_for_object(test).count(), 2)
self.assertEqual(Version.objects.get_for_object(related1).count(), 2)
self.assertEqual(Version.objects.get_for_object(related2).count(), 2)
self.assertEqual(Revision.objects.count(), 2)
self.assertEqual(Version.objects.get_for_object(test)[0].revision.version_set.all().count(), 3)
def testCanRevertRevision(self):
"""Tests that an entire revision can be reverted."""
with reversion.revision:
test1 = ReversionTestModel.objects.create(name="test1.0")
test2 = ReversionTestModel.objects.create(name="test2.0")
related = TestManyToManyModel.objects.create(name="related1.0")
related.relations.add(test1)
related.relations.add(test2)
with reversion.revision:
test1.name = "test1.1"
test1.save()
test2.name = "test2.1"
test2.save()
related.name = "related1.1"
related.save()
# Attempt revert.
Version.objects.get_for_object(related)[0].revision.revert()
self.assertEqual(ReversionTestModel.objects.get(pk=test1.pk).name, "test1.0")
self.assertEqual(ReversionTestModel.objects.get(pk=test2.pk).name, "test2.0")
self.assertEqual(TestManyToManyModel.objects.get().name, "related1.0")
def testCanRecoverRevision(self):
"""Tests that an entire revision can be recovered."""
with reversion.revision:
test1 = ReversionTestModel.objects.create(name="test1.0")
test2 = ReversionTestModel.objects.create(name="test2.0")
related = TestManyToManyModel.objects.create(name="related1.0")
related.relations.add(test1)
related.relations.add(test2)
with reversion.revision:
test1.name = "test1.1"
test1.save()
test2.name = "test2.1"
test2.save()
related.name = "related1.1"
related.save()
# Save the pks.
test1_pk = test1.pk
test2_pk = test2.pk
# Delete the models.
related.delete()
test1.delete()
test2.delete()
# Ensure deleted.
self.assertEqual(ReversionTestModel.objects.count(), 0)
self.assertEqual(TestManyToManyModel.objects.count(), 0)
# Query the deleted models..
self.assertEqual(len(Version.objects.get_deleted(ReversionTestModel)), 2)
self.assertEqual(len(Version.objects.get_deleted(TestManyToManyModel)), 1)
# Revert the revision.
Version.objects.get_deleted(TestManyToManyModel)[0].revision.revert()
# Ensure reverted.
self.assertEqual(ReversionTestModel.objects.count(), 2)
self.assertEqual(TestManyToManyModel.objects.count(), 1)
# Ensure correct version.
self.assertEqual(ReversionTestModel.objects.get(pk=test1_pk).name, "test1.1")
self.assertEqual(ReversionTestModel.objects.get(pk=test2_pk).name, "test2.1")
self.assertEqual(TestManyToManyModel.objects.get().name, "related1.1")
def tearDown(self):
"""Tears down the tests."""
# Unregister the models.
reversion.unregister(ReversionTestModel)
reversion.unregister(TestManyToManyModel)
# Clear the database.
Revision.objects.all().delete()
ReversionTestModel.objects.all().delete()
TestManyToManyModel.objects.all().delete()
class ReversionCreateInitialRevisionsTest(TestCase):
"""Tests that the createinitialrevisions command works."""
model = ReversionTestModel
def setUp(self):
"""Sets up the ReversionTestModel."""
# Clear the database.
Revision.objects.all().delete()
self.model.objects.all().delete()
# Register the model.
reversion.register(self.model)
# Create some initial revisions.
self.test = self.model.objects.create(name="test1.0")
def testCreateInitialRevisions(self):
self.assertEqual(Version.objects.get_for_object(self.test).count(), 0)
call_command("createinitialrevisions", verbosity=0)
self.assertEqual(Version.objects.get_for_object(self.test).count(), 1)
def tearDown(self):
"""Tears down the tests."""
# Unregister the model.
reversion.unregister(self.model)
# Clear the database.
Revision.objects.all().delete()
self.model.objects.all().delete()
# Clear references.
del self.test
class ReversionCreateInitialRevisionsStrPrimaryTest(ReversionCreateInitialRevisionsTest):
model = ReversionTestModelStrPrimary
|
cbrepo/django-reversion
|
src/reversion/tests_deprecated.py
|
Python
|
bsd-3-clause
| 25,219
| 0.005512
|
# -*- encoding: utf-8 -*-
###########################################################################
# Module Writen to OpenERP, Open Source Management Solution
#
# Copyright (c) 2011 Vauxoo - http://www.vauxoo.com/
# All Rights Reserved.
# info Vauxoo (info@vauxoo.com)
############################################################################
# Coded by: Juan Carlos Hernandez Funes (info@vauxoo.com)
# Planned by: Moises Augusto Lopez Calderon (info@vauxoo.com)
############################################################################
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv, fields
class account_analytic_account(osv.Model):
_inherit = 'account.analytic.account'
_order = "parent_left"
_parent_order = "code"
_parent_store = True
_columns = {
'parent_right': fields.integer('Parent Right', select=1),
'parent_left': fields.integer('Parent Left', select=1),
}
|
3dfxsoftware/cbss-addons
|
account_analytic_btree/account_analytic_btree.py
|
Python
|
gpl-2.0
| 1,689
| 0
|
"""
FEniCS tutorial demo program: Poisson equation with Dirichlet,
Neumann and Robin conditions.
The solution is checked to coincide with the exact solution at all nodes.
The file is a modification of dn2_p2D.py. Note that the boundary is now also
split into two distinct parts (separate objects and integrations)
and we have a Robin condition instead of a Neumann condition at y=0.
"""
from dolfin import *
import numpy
#-------------- Preprocessing step -----------------
# Create mesh and define function space
mesh = UnitSquareMesh(3, 2)
V = FunctionSpace(mesh, 'Lagrange', 1)
# Define boundary segments for Neumann, Robin and Dirichlet conditions
# Create mesh function over cell facets
boundary_parts = MeshFunction("size_t", mesh, mesh.topology().dim()-1)
# Mark lower boundary facets as subdomain 0
class LowerRobinBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[1]) < tol
Gamma_R = LowerRobinBoundary()
Gamma_R.mark(boundary_parts, 0)
q = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')
p = Constant(100) # arbitrary function can go here
# Mark upper boundary facets as subdomain 1
class UpperNeumannBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[1] - 1) < tol
Gamma_N = UpperNeumannBoundary()
Gamma_N.mark(boundary_parts, 1)
g = Expression('-4*x[1]')
# Mark left boundary as subdomain 2
class LeftBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[0]) < tol
Gamma_0 = LeftBoundary()
Gamma_0.mark(boundary_parts, 2)
# Mark right boundary as subdomain 3
class RightBoundary(SubDomain):
def inside(self, x, on_boundary):
tol = 1E-14 # tolerance for coordinate comparisons
return on_boundary and abs(x[0] - 1) < tol
Gamma_1 = RightBoundary()
Gamma_1.mark(boundary_parts, 3)
#-------------- Solution and problem definition step -----------------
# given mesh and boundary_parts
u_L = Expression('1 + 2*x[1]*x[1]')
u_R = Expression('2 + 2*x[1]*x[1]')
bcs = [DirichletBC(V, u_L, boundary_parts, 2),
DirichletBC(V, u_R, boundary_parts, 3)]
# Define variational problem
u = TrialFunction(V)
v = TestFunction(V)
f = Constant(-6.0)
a = inner(nabla_grad(u), nabla_grad(v))*dx + p*u*v*ds(0)
L = f*v*dx - g*v*ds(1) + p*q*v*ds(0)
# Compute solution
A = assemble(a, exterior_facet_domains=boundary_parts)
b = assemble(L, exterior_facet_domains=boundary_parts)
for condition in bcs: condition.apply(A, b)
# Alternative is not yet supported
#A, b = assemble_system(a, L, bc, exterior_facet_domains=boundary_parts)
u = Function(V)
solve(A, u.vector(), b, 'lu')
print mesh
# Verification
u_exact = Expression('1 + x[0]*x[0] + 2*x[1]*x[1]')
u_e = interpolate(u_exact, V)
print 'Max error:', abs(u_e.vector().array() - u.vector().array()).max()
#interactive()
|
akshmakov/Dolfin-Fijee-Fork
|
test/unit/book/python/chapter_1_files/stationary/poisson/dnr_p2D.py
|
Python
|
lgpl-3.0
| 3,020
| 0.004305
|
from .basic import ProcFile
from collections import namedtuple
class Consoles(ProcFile):
filename = '/proc/consoles'
Console = namedtuple('Console', ['operations', 'flags', 'major', 'minor'])
def names(self):
return [line.split()[0] for line in self._readfile()]
def get(self, name, default=None):
for line in self._readfile():
console_info = line.replace('(', '').replace(')', '').split()
if name == console_info[0]:
major, minor = console_info[-1].split(':')
return [console_info[1],
''.join(console_info[2:-1]), major, minor
]
else:
return default
def __getattr__(self, name):
if name in self.names():
return self.Console(*tuple(self.get(name)))
else:
raise AttributeError
if __name__ == '__main__':
CONSOLES = Consoles()
print(CONSOLES.names())
print(CONSOLES.get('tty0'))
print(CONSOLES.tty0.operations)
print(CONSOLES.tty0.flags)
print(CONSOLES.tty0.major)
print(CONSOLES.tty0.minor)
|
thuck/proc
|
proc/consoles.py
|
Python
|
lgpl-3.0
| 1,126
| 0.000888
|
'''@file cross_enthropytrainer_rec.py
contains the CrossEnthropyTrainerRec for reconstruction of the audio samples'''
import tensorflow as tf
import trainer
from nabu.neuralnetworks import ops
class CostFeaturesRec(trainer.Trainer):
'''A trainer that minimises the cross-enthropy loss, the output sequences
must be of the same length as the input sequences'''
def compute_loss(self, targets, logits, logit_seq_length,
target_seq_length):
'''
Compute the loss
Creates the operation to compute the cross-entropy loss for every input
frame (if you want to have a different loss function, overwrite this
method)
Args:
targets: a tupple of targets, the first one being a
[batch_size x max_target_length] tensor containing the real
targets, the second one being a [batch_size x max_audioseq_length x dim]
tensor containing the audio samples or other extra information.
logits: a tuple of [batch_size, max_logit_length, dim] tensors
containing the logits for the text and the audio samples
logit_seq_length: the length of all the logit sequences as a tuple of
[batch_size] vectors
target_seq_length: the length of all the target sequences as a
tupple of two [batch_size] vectors, both for one of the elements
in the targets tupple
Returns:
a scalar value containing the loss
'''
with tf.name_scope('cross_entropy_loss'):
total_loss = ops.mse(targets[1], logits[1], target_seq_length[1])
return total_loss
|
JeroenBosmans/nabu
|
nabu/neuralnetworks/trainers/cost_features_rec.py
|
Python
|
mit
| 1,705
| 0.002346
|
#!/usr/bin/env python
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the
# Free Software Foundation; either version 2 of the License, or (at your
# option) any later version. See http://www.gnu.org/copyleft/gpl.html for
# the full text of the license.
# query.py: Perform a few varieties of queries
from __future__ import print_function
import time
import bugzilla
# public test instance of bugzilla.redhat.com. It's okay to make changes
URL = "partner-bugzilla.redhat.com"
bzapi = bugzilla.Bugzilla(URL)
# build_query is a helper function that handles some bugzilla version
# incompatibility issues. All it does is return a properly formatted
# dict(), and provide friendly parameter names. The param names map
# to those accepted by XMLRPC Bug.search:
# https://bugzilla.readthedocs.io/en/latest/api/core/v1/bug.html#search-bugs
query = bzapi.build_query(
product="Fedora",
component="python-bugzilla")
# Since 'query' is just a dict, you could set your own parameters too, like
# if your bugzilla had a custom field. This will set 'status' for example,
# but for common opts it's better to use build_query
query["status"] = "CLOSED"
# query() is what actually performs the query. it's a wrapper around Bug.search
t1 = time.time()
bugs = bzapi.query(query)
t2 = time.time()
print("Found %d bugs with our query" % len(bugs))
print("Query processing time: %s" % (t2 - t1))
# Depending on the size of your query, you can massively speed things up
# by telling bugzilla to only return the fields you care about, since a
# large chunk of the return time is transmitting the extra bug data. You
# tweak this with include_fields:
# https://wiki.mozilla.org/Bugzilla:BzAPI#Field_Control
# Bugzilla will only return those fields listed in include_fields.
query = bzapi.build_query(
product="Fedora",
component="python-bugzilla",
include_fields=["id", "summary"])
t1 = time.time()
bugs = bzapi.query(query)
t2 = time.time()
print("Quicker query processing time: %s" % (t2 - t1))
# bugzilla.redhat.com, and bugzilla >= 5.0 support queries using the same
# format as is used for 'advanced' search URLs via the Web UI. For example,
# I go to partner-bugzilla.redhat.com -> Search -> Advanced Search, select
# Classification=Fedora
# Product=Fedora
# Component=python-bugzilla
# Unselect all bug statuses (so, all status values)
# Under Custom Search
# Creation date -- is less than or equal to -- 2010-01-01
#
# Run that, copy the URL and bring it here, pass it to url_to_query to
# convert it to a dict(), and query as usual
query = bzapi.url_to_query("https://partner-bugzilla.redhat.com/"
"buglist.cgi?classification=Fedora&component=python-bugzilla&"
"f1=creation_ts&o1=lessthaneq&order=Importance&product=Fedora&"
"query_format=advanced&v1=2010-01-01")
query["include_fields"] = ["id", "summary"]
bugs = bzapi.query(query)
print("The URL query returned 22 bugs... "
"I know that without even checking because it shouldn't change!... "
"(count is %d)" % len(bugs))
# One note about querying... you can get subtley different results if
# you are not logged in. Depending on your bugzilla setup it may not matter,
# but if you are dealing with private bugs, check bzapi.logged_in setting
# to ensure your cached credentials are up to date. See update.py for
# an example usage
|
abn/python-bugzilla
|
examples/query.py
|
Python
|
gpl-2.0
| 3,441
| 0.000872
|
"""Methods to set media related (resolution, length) scene properties"""
import bpy
def setup(animated, width, height, length):
"""
Sets up the type, resolution and length of the currently open scene
The render resolution of the scene is set, and additionally ...
... for stills, sets the length of the scene to exactly 1 frame.
... for animations, enables animated seed for cycles, sets the fps to
24 and the fps_base to 1.
"""
bpy.context.scene.render.resolution_percentage = 100
bpy.context.scene.render.resolution_x = int(width)
bpy.context.scene.render.resolution_y = int(height)
if not animated:
bpy.context.scene.frame_end = 1
else:
bpy.context.scene.cycles.use_animated_seed = True
bpy.context.scene.frame_end = length * 24
bpy.context.scene.render.fps = 24
# different for weird framerates (23.9243924 stuffies you know)
bpy.context.scene.render.fps_base = 1
|
apertus-open-source-cinema/elmyra
|
src/python/lib/media.py
|
Python
|
gpl-3.0
| 974
| 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.